update kubernetes dependencies 1.17.2
This commit is contained in:
parent
e9a1aad6e3
commit
b01c6f4f23
15
README.md
15
README.md
|
|
@ -60,14 +60,13 @@ All additional compatibility is only best effort, or happens to still/already be
|
|||
#### Compatibility matrix
|
||||
At most, 5 kube-state-metrics and 5 [kubernetes releases](https://github.com/kubernetes/kubernetes/releases) will be recorded below.
|
||||
|
||||
| kube-state-metrics | **Kubernetes 1.12** | **Kubernetes 1.13** | **Kubernetes 1.14** | **Kubernetes 1.15** | **Kubernetes 1.16** |
|
||||
|--------------------|---------------------|---------------------|---------------------|----------------------|----------------------|
|
||||
| **v1.5.0** | ✓ | - | - | - | - |
|
||||
| **v1.6.0** | ✓ | ✓ | - | - | - |
|
||||
| **v1.7.2** | ✓ | ✓ | ✓ | - | - |
|
||||
| **v1.8.0** | ✓ | ✓ | ✓ | ✓ | - |
|
||||
| **v1.9.3** | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| **master** | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| kube-state-metrics | **Kubernetes 1.13** | **Kubernetes 1.14** | **Kubernetes 1.15** | **Kubernetes 1.16** | **Kubernetes 1.17** |
|
||||
|--------------------|---------------------|---------------------|----------------------|----------------------|----------------------|
|
||||
| **v1.6.0** | ✓ | - | - | - | - |
|
||||
| **v1.7.2** | ✓ | ✓ | - | - | - |
|
||||
| **v1.8.0** | ✓ | ✓ | ✓ | - | - |
|
||||
| **v1.9.3** | ✓ | ✓ | ✓ | ✓ | - |
|
||||
| **master** | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
- `✓` Fully supported version range.
|
||||
- `-` The Kubernetes cluster has features the client-go library can't use (additional API objects, etc).
|
||||
|
||||
|
|
|
|||
8
go.mod
8
go.mod
|
|
@ -12,10 +12,10 @@ require (
|
|||
github.com/robfig/cron/v3 v3.0.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72
|
||||
k8s.io/api v0.0.0-20191112020540-7f9008e52f64
|
||||
k8s.io/apimachinery v0.0.0-20191111054156-6eb29fdf75dc
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20191115143342-4cf961056038
|
||||
k8s.io/client-go v0.0.0-20191109102209-3c0d1af94be5
|
||||
k8s.io/api v0.17.2
|
||||
k8s.io/apimachinery v0.17.2
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20200123122250-fa95810cfc1e
|
||||
k8s.io/client-go v0.17.2
|
||||
k8s.io/klog v1.0.0
|
||||
)
|
||||
|
||||
|
|
|
|||
15
go.sum
15
go.sum
|
|
@ -105,6 +105,7 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
|||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
|
|
@ -352,16 +353,22 @@ k8s.io/api v0.0.0-20191016110408-35e52d86657a/go.mod h1:/L5qH+AD540e7Cetbui1tuJe
|
|||
k8s.io/api v0.0.0-20191109101512-6d4d1612ba53/go.mod h1:VJq7+38rpM4TSUbRiZX4P5UVAKK2UQpNQLZClkFQkpE=
|
||||
k8s.io/api v0.0.0-20191112020540-7f9008e52f64 h1:aAGj+7E+gRYpt4qjByqnh9miBg8VeV4Rzc4pjwa0gmY=
|
||||
k8s.io/api v0.0.0-20191112020540-7f9008e52f64/go.mod h1:8svLRMiLwQReMTycutfjsaQ0ackWIf8HCT4UcixYLjI=
|
||||
k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc=
|
||||
k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 h1:Iieh/ZEgT3BWwbLD5qEKcY06jKuPEl6zC7gPSehoLw4=
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ=
|
||||
k8s.io/apimachinery v0.0.0-20191109100837-dffb012825f2/go.mod h1:+6CX7hP4aLfX2sb91JYDMIp0VqDSog2kZu0BHe+lP+s=
|
||||
k8s.io/apimachinery v0.0.0-20191111054156-6eb29fdf75dc h1:hC0UI7qlplCVlRexiPMHwcOCT3IPk9Pgo599vKGOOS4=
|
||||
k8s.io/apimachinery v0.0.0-20191111054156-6eb29fdf75dc/go.mod h1:+6CX7hP4aLfX2sb91JYDMIp0VqDSog2kZu0BHe+lP+s=
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20191115143342-4cf961056038 h1:2AUVR4J8OIUM1UM1PshlfoXvXD4/XRZat9c/LIfFLOM=
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20191115143342-4cf961056038/go.mod h1:bo2qh32Y1lvDnTWVSlYibXIVVtwZMb0fYAqeyEWNEuI=
|
||||
k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4=
|
||||
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20200123122250-fa95810cfc1e h1:DNsUsm4mAMM2CfScAsnw6IhldW4zMhByNT/foRlPF8M=
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20200123122250-fa95810cfc1e/go.mod h1:OTEQD8eINHjDYmn6lmYKG8/ETSTCzulpPc13agsXQC0=
|
||||
k8s.io/client-go v0.0.0-20191016111102-bec269661e48/go.mod h1:hrwktSwYGI4JK+TJA3dMaFyyvHVi/aLarVHpbs8bgCU=
|
||||
k8s.io/client-go v0.0.0-20191109102209-3c0d1af94be5 h1:HfNU1HaDlojoVyQkUeSvDPvbe+pHysuTSjSVAzXH3xw=
|
||||
k8s.io/client-go v0.0.0-20191109102209-3c0d1af94be5/go.mod h1:T9KDMwZhkD0ygfa5hs6lPRymsuj92WN84SowSq6gOEw=
|
||||
k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc=
|
||||
k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
|
||||
k8s.io/code-generator v0.0.0-20191109100332-a9a0d9c0b3aa/go.mod h1:fRFrKVixH946mn5PeglV2fvxbE86JesGi16bsWZ1xz4=
|
||||
k8s.io/component-base v0.0.0-20191016111319-039242c015a9/go.mod h1:SuWowIgd/dtU/m/iv8OD9eOxp3QZBBhTIiWMsBQvKjI=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
|
@ -379,6 +386,10 @@ k8s.io/metrics v0.0.0-20191109111301-80b462294217/go.mod h1:GSVMuBbi34fc7MSnJ1Q/
|
|||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191030222137-2b95a09bc58d h1:1P0iBJsBzxRmR+dIFnM+Iu4aLxnoa7lBqozW/0uHbT8=
|
||||
k8s.io/utils v0.0.0-20191030222137-2b95a09bc58d/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09 h1:sz6xjn8QP74104YNmJpzLbJ+a3ZtHt0tkD0g8vpdWNw=
|
||||
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1142,7 +1142,7 @@ message EnvVar {
|
|||
// EnvVarSource represents a source for the value of an EnvVar.
|
||||
message EnvVarSource {
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
// +optional
|
||||
optional ObjectFieldSelector fieldRef = 1;
|
||||
|
||||
|
|
@ -3384,7 +3384,6 @@ message PodSpec {
|
|||
// in the same pod, and the first process in each container will not be assigned PID 1.
|
||||
// HostPID and ShareProcessNamespace cannot both be set.
|
||||
// Optional: Default to false.
|
||||
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
optional bool shareProcessNamespace = 27;
|
||||
|
|
@ -4742,6 +4741,21 @@ message ServiceSpec {
|
|||
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
|
||||
// +optional
|
||||
optional string ipFamily = 15;
|
||||
|
||||
// topologyKeys is a preference-order list of topology keys which
|
||||
// implementations of services should use to preferentially sort endpoints
|
||||
// when accessing this Service, it can not be used at the same time as
|
||||
// externalTrafficPolicy=Local.
|
||||
// Topology keys must be valid label keys and at most 16 keys may be specified.
|
||||
// Endpoints are chosen based on the first topology key with available backends.
|
||||
// If this field is specified and all entries have no backends that match
|
||||
// the topology of the client, the service has no backends for that client
|
||||
// and connections should fail.
|
||||
// The special value "*" may be used to mean "any topology". This catch-all
|
||||
// value, if used, only makes sense as the last value in the list.
|
||||
// If this is not specified or empty, no topology constraints will be applied.
|
||||
// +optional
|
||||
repeated string topologyKeys = 16;
|
||||
}
|
||||
|
||||
// ServiceStatus represents the current status of a service.
|
||||
|
|
@ -5257,7 +5271,7 @@ message WindowsSecurityContextOptions {
|
|||
// Defaults to the user specified in image metadata if unspecified.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
|
||||
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
|
||||
// +optional
|
||||
optional string runAsUserName = 3;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ const (
|
|||
NamespaceAll string = ""
|
||||
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
|
||||
NamespaceNodeLease string = "kube-node-lease"
|
||||
// TopologyKeyAny is the service topology key that matches any node
|
||||
TopologyKeyAny string = "*"
|
||||
)
|
||||
|
||||
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
|
@ -1846,7 +1848,7 @@ type EnvVar struct {
|
|||
// EnvVarSource represents a source for the value of an EnvVar.
|
||||
type EnvVarSource struct {
|
||||
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
|
||||
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
// +optional
|
||||
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
|
||||
// Selects a resource of the container: only resources limits and requests
|
||||
|
|
@ -2940,7 +2942,6 @@ type PodSpec struct {
|
|||
// in the same pod, and the first process in each container will not be assigned PID 1.
|
||||
// HostPID and ShareProcessNamespace cannot both be set.
|
||||
// Optional: Default to false.
|
||||
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
|
||||
|
|
@ -3827,6 +3828,8 @@ const (
|
|||
IPv4Protocol IPFamily = "IPv4"
|
||||
// IPv6Protocol indicates that this IP is IPv6 protocol
|
||||
IPv6Protocol IPFamily = "IPv6"
|
||||
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
|
||||
MaxServiceTopologyKeys = 16
|
||||
)
|
||||
|
||||
// ServiceSpec describes the attributes that a user creates on a service.
|
||||
|
|
@ -3941,6 +3944,7 @@ type ServiceSpec struct {
|
|||
// of peer discovery.
|
||||
// +optional
|
||||
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
|
||||
|
||||
// sessionAffinityConfig contains the configurations of session affinity.
|
||||
// +optional
|
||||
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
|
||||
|
|
@ -3954,6 +3958,21 @@ type ServiceSpec struct {
|
|||
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
|
||||
// +optional
|
||||
IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
|
||||
|
||||
// topologyKeys is a preference-order list of topology keys which
|
||||
// implementations of services should use to preferentially sort endpoints
|
||||
// when accessing this Service, it can not be used at the same time as
|
||||
// externalTrafficPolicy=Local.
|
||||
// Topology keys must be valid label keys and at most 16 keys may be specified.
|
||||
// Endpoints are chosen based on the first topology key with available backends.
|
||||
// If this field is specified and all entries have no backends that match
|
||||
// the topology of the client, the service has no backends for that client
|
||||
// and connections should fail.
|
||||
// The special value "*" may be used to mean "any topology". This catch-all
|
||||
// value, if used, only makes sense as the last value in the list.
|
||||
// If this is not specified or empty, no topology constraints will be applied.
|
||||
// +optional
|
||||
TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
|
||||
}
|
||||
|
||||
// ServicePort contains information on service's port.
|
||||
|
|
@ -5789,7 +5808,7 @@ type WindowsSecurityContextOptions struct {
|
|||
// Defaults to the user specified in image metadata if unspecified.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
|
||||
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
|
||||
// +optional
|
||||
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -566,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_EnvVarSource = map[string]string{
|
||||
"": "EnvVarSource represents a source for the value of an EnvVar.",
|
||||
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
|
||||
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
|
||||
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
|
||||
"configMapKeyRef": "Selects a key of a ConfigMap.",
|
||||
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
|
||||
|
|
@ -1614,7 +1614,7 @@ var map_PodSpec = map[string]string{
|
|||
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
||||
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
||||
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
|
||||
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
|
||||
"securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
|
||||
"imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
|
||||
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
|
||||
|
|
@ -2204,6 +2204,7 @@ var map_ServiceSpec = map[string]string{
|
|||
"publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
|
||||
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
|
||||
"ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.",
|
||||
"topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.",
|
||||
}
|
||||
|
||||
func (ServiceSpec) SwaggerDoc() map[string]string {
|
||||
|
|
@ -2457,7 +2458,7 @@ var map_WindowsSecurityContextOptions = map[string]string{
|
|||
"": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
|
||||
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
|
||||
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
|
||||
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.",
|
||||
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.",
|
||||
}
|
||||
|
||||
func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
|
||||
|
|
|
|||
|
|
@ -30,6 +30,10 @@ const (
|
|||
LabelOSStable = "kubernetes.io/os"
|
||||
LabelArchStable = "kubernetes.io/arch"
|
||||
|
||||
// LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0.
|
||||
// It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763)
|
||||
LabelWindowsBuild = "node.kubernetes.io/windows-build"
|
||||
|
||||
// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
|
||||
LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
|
||||
// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
|
||||
|
|
|
|||
|
|
@ -5186,6 +5186,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
|||
*out = new(IPFamily)
|
||||
**out = **in
|
||||
}
|
||||
if in.TopologyKeys != nil {
|
||||
in, out := &in.TopologyKeys, &out.TopologyKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -200,54 +200,54 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_772f83c5b34e07a5 = []byte{
|
||||
// 744 bytes of a gzipped FileDescriptorProto
|
||||
// 746 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a,
|
||||
0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca,
|
||||
0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0x6c, 0xa8, 0x51, 0x79, 0x48, 0x3c, 0xc2, 0xd0,
|
||||
0x05, 0x42, 0x2c, 0x98, 0xd8, 0x53, 0xc7, 0x24, 0xf1, 0x58, 0xf6, 0x24, 0x52, 0x76, 0xfc, 0x04,
|
||||
0x7e, 0x10, 0x4b, 0x84, 0xba, 0xec, 0xb2, 0x2b, 0x43, 0xcd, 0xbf, 0xe8, 0x0a, 0xcd, 0xf8, 0x95,
|
||||
0x12, 0x1e, 0xd9, 0xcd, 0x7c, 0x73, 0xbe, 0xef, 0x9c, 0xf3, 0xcd, 0x39, 0xe0, 0xc1, 0xf8, 0x20,
|
||||
0x46, 0x3e, 0xb3, 0xc6, 0xb3, 0x21, 0x8d, 0x02, 0xca, 0x69, 0x6c, 0xcd, 0x69, 0xe0, 0xb2, 0xc8,
|
||||
0xca, 0x1f, 0x48, 0xe8, 0x5b, 0xae, 0x1f, 0x3b, 0x6c, 0x4e, 0xa3, 0x85, 0x35, 0xef, 0x93, 0x49,
|
||||
0x38, 0x22, 0x7d, 0xcb, 0xa3, 0x01, 0x8d, 0x08, 0xa7, 0x2e, 0x0a, 0x23, 0xc6, 0x19, 0xfc, 0x2f,
|
||||
0x0b, 0x47, 0x24, 0xf4, 0x51, 0x19, 0x8e, 0x8a, 0xf0, 0xf6, 0x0d, 0xcf, 0xe7, 0xa3, 0xd9, 0x10,
|
||||
0x39, 0x6c, 0x6a, 0x79, 0xcc, 0x63, 0x96, 0x64, 0x0d, 0x67, 0x27, 0xf2, 0x26, 0x2f, 0xf2, 0x94,
|
||||
0xa9, 0xb5, 0xbb, 0x4b, 0xc9, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x92, 0xb1, 0x7d, 0xab, 0x8a, 0x99,
|
||||
0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbe, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x9f, 0xb1,
|
||||
0xac, 0x5f, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xfe, 0x13, 0x21, 0x76, 0x46,
|
||||
0x74, 0x4a, 0x7e, 0xe4, 0x75, 0x3f, 0xd6, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77,
|
||||
0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26,
|
||||
0xa6, 0x7e, 0x58, 0x80, 0xb8, 0x7a, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4,
|
||||
0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x6f, 0xfd, 0x45, 0x45, 0xa6, 0xfb, 0x25, 0xd1,
|
||||
0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58,
|
||||
0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xa3, 0x1c,
|
||||
0xc3, 0xe5, 0x2b, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xac, 0xe7,
|
||||
0xff, 0xe5, 0x7a, 0xc4, 0x0f, 0xa1, 0x79, 0x1f, 0x3d, 0x1f, 0xbe, 0xa3, 0x8e, 0x08, 0xa2, 0x11,
|
||||
0x0d, 0x1c, 0x9a, 0xb5, 0x78, 0x5c, 0x30, 0x71, 0x25, 0x02, 0x1d, 0xa0, 0x71, 0x16, 0xb2, 0x09,
|
||||
0xf3, 0x16, 0x2d, 0xb5, 0x53, 0xef, 0x35, 0xf6, 0xf6, 0xd7, 0x6c, 0x10, 0x1d, 0xe7, 0xbc, 0xa3,
|
||||
0x80, 0x47, 0x0b, 0x7b, 0x3b, 0x6f, 0x52, 0x2b, 0x60, 0x5c, 0x0a, 0xb7, 0xef, 0x82, 0xe6, 0x95,
|
||||
0x60, 0xb8, 0x0d, 0xea, 0x63, 0xba, 0x68, 0x29, 0xa2, 0x59, 0x2c, 0x8e, 0xf0, 0x6f, 0xa0, 0xce,
|
||||
0xc9, 0x64, 0x46, 0xa5, 0xcb, 0x3a, 0xce, 0x2e, 0x77, 0x36, 0x0e, 0x94, 0xee, 0x3e, 0x80, 0xab,
|
||||
0x9e, 0x42, 0x13, 0xa8, 0x11, 0x25, 0x6e, 0xa6, 0xa1, 0xd9, 0x7a, 0x9a, 0x98, 0x2a, 0x16, 0x00,
|
||||
0xce, 0xf0, 0xee, 0x67, 0x05, 0x6c, 0x15, 0xbc, 0x01, 0x8b, 0x38, 0xfc, 0x17, 0x6c, 0x4a, 0x87,
|
||||
0x65, 0x52, 0x5b, 0x4b, 0x13, 0x73, 0xf3, 0x99, 0x70, 0x57, 0xa2, 0xf0, 0x21, 0xd0, 0xe4, 0xb4,
|
||||
0x38, 0x6c, 0x92, 0x95, 0x60, 0xef, 0x8a, 0x66, 0x06, 0x39, 0x76, 0x99, 0x98, 0xff, 0xac, 0x6e,
|
||||
0x02, 0x2a, 0x9e, 0x71, 0x49, 0x16, 0x69, 0x42, 0x16, 0x71, 0xf9, 0x91, 0x6a, 0x96, 0x46, 0xa4,
|
||||
0xc7, 0x12, 0x85, 0x7d, 0xd0, 0x20, 0x61, 0x58, 0xd0, 0xe4, 0x17, 0xea, 0xf6, 0x5f, 0x69, 0x62,
|
||||
0x36, 0x0e, 0x2b, 0x18, 0x2f, 0xc7, 0x74, 0xbf, 0x6c, 0x80, 0x66, 0xd1, 0xc8, 0xcb, 0x89, 0xef,
|
||||
0x50, 0xf8, 0x16, 0x68, 0x62, 0xa9, 0x5c, 0xc2, 0x89, 0xec, 0xa6, 0xb1, 0x77, 0x73, 0xe9, 0xcf,
|
||||
0xca, 0xdd, 0x40, 0xe1, 0xd8, 0x13, 0x40, 0x8c, 0x44, 0x74, 0x35, 0x16, 0x4f, 0x29, 0x27, 0xd5,
|
||||
0x4c, 0x56, 0x18, 0x2e, 0x55, 0xe1, 0x3d, 0xd0, 0xc8, 0xb7, 0xe0, 0x78, 0x11, 0xd2, 0xbc, 0x4c,
|
||||
0x43, 0x96, 0x59, 0xc1, 0x97, 0x57, 0xaf, 0x78, 0x99, 0x02, 0x5f, 0x01, 0x9d, 0xe6, 0x45, 0x8b,
|
||||
0xcd, 0x11, 0x83, 0x75, 0x6d, 0xcd, 0xc1, 0xb2, 0x77, 0xf2, 0xda, 0xf4, 0x02, 0x89, 0x71, 0x25,
|
||||
0x06, 0x07, 0x40, 0x15, 0x56, 0xc6, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x7c, 0x82, 0xdd,
|
||||
0xcc, 0x95, 0x55, 0x71, 0x8b, 0x71, 0x26, 0xd4, 0xfd, 0xa4, 0x80, 0x9d, 0x2b, 0x0e, 0x3f, 0xf1,
|
||||
0x63, 0x0e, 0xdf, 0xac, 0xb8, 0x8c, 0xd6, 0x73, 0x59, 0xb0, 0xa5, 0xc7, 0xe5, 0x4a, 0x14, 0xc8,
|
||||
0x92, 0xc3, 0x2f, 0x80, 0xea, 0x73, 0x3a, 0x2d, 0xbc, 0xb9, 0xbe, 0x66, 0x17, 0xb2, 0xbc, 0xaa,
|
||||
0x8d, 0xc7, 0x42, 0x02, 0x67, 0x4a, 0x36, 0x3a, 0xbd, 0x30, 0x6a, 0x67, 0x17, 0x46, 0xed, 0xfc,
|
||||
0xc2, 0xa8, 0xbd, 0x4f, 0x0d, 0xe5, 0x34, 0x35, 0x94, 0xb3, 0xd4, 0x50, 0xce, 0x53, 0x43, 0xf9,
|
||||
0x9a, 0x1a, 0xca, 0x87, 0x6f, 0x46, 0xed, 0xb5, 0x56, 0x68, 0x7e, 0x0f, 0x00, 0x00, 0xff, 0xff,
|
||||
0x99, 0xbb, 0x72, 0xd7, 0x71, 0x06, 0x00, 0x00,
|
||||
0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba,
|
||||
0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0,
|
||||
0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a,
|
||||
0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10,
|
||||
0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64,
|
||||
0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24,
|
||||
0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97,
|
||||
0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88,
|
||||
0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca,
|
||||
0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c,
|
||||
0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58,
|
||||
0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23,
|
||||
0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb,
|
||||
0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13,
|
||||
0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2,
|
||||
0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68,
|
||||
0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c,
|
||||
0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e,
|
||||
0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3,
|
||||
0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88,
|
||||
0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84,
|
||||
0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51,
|
||||
0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a,
|
||||
0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7,
|
||||
0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55,
|
||||
0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00,
|
||||
0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3,
|
||||
0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a,
|
||||
0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37,
|
||||
0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2,
|
||||
0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31,
|
||||
0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b,
|
||||
0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3,
|
||||
0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35,
|
||||
0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3,
|
||||
0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79,
|
||||
0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31,
|
||||
0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f,
|
||||
0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5,
|
||||
0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b,
|
||||
0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96,
|
||||
0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8,
|
||||
0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a,
|
||||
0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
|
||||
|
|
@ -437,13 +437,11 @@ func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.AddressType != nil {
|
||||
i -= len(*m.AddressType)
|
||||
copy(dAtA[i:], *m.AddressType)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
i -= len(m.AddressType)
|
||||
copy(dAtA[i:], m.AddressType)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
if len(m.Ports) > 0 {
|
||||
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
|
|
@ -632,10 +630,8 @@ func (m *EndpointSlice) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.AddressType != nil {
|
||||
l = len(*m.AddressType)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
l = len(m.AddressType)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
|
|
@ -727,7 +723,7 @@ func (this *EndpointSlice) String() string {
|
|||
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Endpoints:` + repeatedStringForEndpoints + `,`,
|
||||
`Ports:` + repeatedStringForPorts + `,`,
|
||||
`AddressType:` + valueToStringGenerated(this.AddressType) + `,`,
|
||||
`AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
|
@ -1476,8 +1472,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
s := AddressType(dAtA[iNdEx:postIndex])
|
||||
m.AddressType = &s
|
||||
m.AddressType = AddressType(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
|
|
|||
|
|
@ -32,12 +32,10 @@ option go_package = "v1alpha1";
|
|||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
message Endpoint {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. This
|
||||
// allows for cases like dual-stack networking where both IPv4 and IPv6
|
||||
// addresses would be included with the IP addressType. Consumers (e.g.
|
||||
// kube-proxy) must handle different types of addresses in the context of
|
||||
// their own capabilities. This must contain at least one address but no
|
||||
// more than 100.
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
repeated string addresses = 1;
|
||||
|
||||
|
|
@ -123,13 +121,12 @@ message EndpointSlice {
|
|||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. The following address
|
||||
// types are currently supported:
|
||||
// * IP: Represents an IP Address. This can include both IPv4 and IPv6
|
||||
// addresses.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
// Default is IP
|
||||
// +optional
|
||||
optional string addressType = 4;
|
||||
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
|
|
|
|||
|
|
@ -33,14 +33,13 @@ type EndpointSlice struct {
|
|||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. The following address
|
||||
// types are currently supported:
|
||||
// * IP: Represents an IP Address. This can include both IPv4 and IPv6
|
||||
// addresses.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
// Default is IP
|
||||
// +optional
|
||||
AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
|
|
@ -59,22 +58,27 @@ type EndpointSlice struct {
|
|||
type AddressType string
|
||||
|
||||
const (
|
||||
// AddressTypeIP represents an IP Address. Inclusive of IPv4 and IPv6
|
||||
// addresses.
|
||||
// AddressTypeIP represents an IP Address.
|
||||
// This address type has been deprecated and has been replaced by the IPv4
|
||||
// and IPv6 adddress types. New resources with this address type will be
|
||||
// considered invalid. This will be fully removed in 1.18.
|
||||
// +deprecated
|
||||
AddressTypeIP = AddressType("IP")
|
||||
// AddressTypeFQDN represents a Fully Qualified Domain Name.
|
||||
// AddressTypeIPv4 represents an IPv4 Address.
|
||||
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
|
||||
// AddressTypeIPv6 represents an IPv6 Address.
|
||||
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
|
||||
// AddressTypeFQDN represents a FQDN.
|
||||
AddressTypeFQDN = AddressType("FQDN")
|
||||
)
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
type Endpoint struct {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. This
|
||||
// allows for cases like dual-stack networking where both IPv4 and IPv6
|
||||
// addresses would be included with the IP addressType. Consumers (e.g.
|
||||
// kube-proxy) must handle different types of addresses in the context of
|
||||
// their own capabilities. This must contain at least one address but no
|
||||
// more than 100.
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ package v1alpha1
|
|||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_Endpoint = map[string]string{
|
||||
"": "Endpoint represents a single logical \"backend\" implementing a service.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack networking where both IPv4 and IPv6 addresses would be included with the IP addressType. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"conditions": "conditions contains information about the current status of the endpoint.",
|
||||
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
|
||||
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
|
||||
|
|
@ -64,7 +64,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
|
|||
var map_EndpointSlice = map[string]string{
|
||||
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. The following address types are currently supported: * IP: Represents an IP Address. This can include both IPv4 and IPv6\n addresses.\n* FQDN: Represents a Fully Qualified Domain Name. Default is IP",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
|
||||
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
|
||||
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,4 +19,10 @@ package v1alpha1
|
|||
const (
|
||||
// LabelServiceName is used to indicate the name of a Kubernetes service.
|
||||
LabelServiceName = "kubernetes.io/service-name"
|
||||
// LabelManagedBy is used to indicate the controller or entity that manages
|
||||
// an EndpointSlice. This label aims to enable different EndpointSlice
|
||||
// objects to be managed by different controllers or entities within the
|
||||
// same cluster. It is highly recommended to configure this label for all
|
||||
// EndpointSlices.
|
||||
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -126,11 +126,6 @@ func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
|
|||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.AddressType != nil {
|
||||
in, out := &in.AddressType, &out.AddressType
|
||||
*out = new(AddressType)
|
||||
**out = **in
|
||||
}
|
||||
if in.Endpoints != nil {
|
||||
in, out := &in.Endpoints, &out.Endpoints
|
||||
*out = make([]Endpoint, len(*in))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=discovery.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/discovery/v1beta1"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.discovery.v1beta1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1beta1";
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
message Endpoint {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
repeated string addresses = 1;
|
||||
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
optional EndpointConditions conditions = 2;
|
||||
|
||||
// hostname of this endpoint. This field may be used by consumers of
|
||||
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
|
||||
// Multiple endpoints which use the same hostname should be considered
|
||||
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
|
||||
// validation.
|
||||
// +optional
|
||||
optional string hostname = 3;
|
||||
|
||||
// targetRef is a reference to a Kubernetes object that represents this
|
||||
// endpoint.
|
||||
// +optional
|
||||
optional k8s.io.api.core.v1.ObjectReference targetRef = 4;
|
||||
|
||||
// topology contains arbitrary topology information associated with the
|
||||
// endpoint. These key/value pairs must conform with the label format.
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
|
||||
// Topology may include a maximum of 16 key/value pairs. This includes, but
|
||||
// is not limited to the following well known keys:
|
||||
// * kubernetes.io/hostname: the value indicates the hostname of the node
|
||||
// where the endpoint is located. This should match the corresponding
|
||||
// node label.
|
||||
// * topology.kubernetes.io/zone: the value indicates the zone where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// * topology.kubernetes.io/region: the value indicates the region where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// +optional
|
||||
map<string, string> topology = 5;
|
||||
}
|
||||
|
||||
// EndpointConditions represents the current condition of an endpoint.
|
||||
message EndpointConditions {
|
||||
// ready indicates that this endpoint is prepared to receive traffic,
|
||||
// according to whatever system is managing the endpoint. A nil value
|
||||
// indicates an unknown state. In most cases consumers should interpret this
|
||||
// unknown state as ready.
|
||||
// +optional
|
||||
optional bool ready = 1;
|
||||
}
|
||||
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
message EndpointPort {
|
||||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
optional string name = 1;
|
||||
|
||||
// The IP protocol for this port.
|
||||
// Must be UDP, TCP, or SCTP.
|
||||
// Default is TCP.
|
||||
optional string protocol = 2;
|
||||
|
||||
// The port number of the endpoint.
|
||||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
optional int32 port = 3;
|
||||
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
optional string appProtocol = 4;
|
||||
}
|
||||
|
||||
// EndpointSlice represents a subset of the endpoints that implement a service.
|
||||
// For a given service there may be multiple EndpointSlice objects, selected by
|
||||
// labels, which must be joined to produce the full set of endpoints.
|
||||
message EndpointSlice {
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
optional string addressType = 4;
|
||||
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
repeated Endpoint endpoints = 2;
|
||||
|
||||
// ports specifies the list of network ports exposed by each endpoint in
|
||||
// this slice. Each port must have a unique name. When ports is empty, it
|
||||
// indicates that there are no defined ports. When a port is defined with a
|
||||
// nil port value, it indicates "all ports". Each slice may include a
|
||||
// maximum of 100 ports.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated EndpointPort ports = 3;
|
||||
}
|
||||
|
||||
// EndpointSliceList represents a list of endpoint slices
|
||||
message EndpointSliceList {
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of endpoint slices
|
||||
// +listType=set
|
||||
repeated EndpointSlice items = 2;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "discovery.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&EndpointSlice{},
|
||||
&EndpointSliceList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EndpointSlice represents a subset of the endpoints that implement a service.
|
||||
// For a given service there may be multiple EndpointSlice objects, selected by
|
||||
// labels, which must be joined to produce the full set of endpoints.
|
||||
type EndpointSlice struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// addressType specifies the type of address carried by this EndpointSlice.
|
||||
// All addresses in this slice must be the same type. This field is
|
||||
// immutable after creation. The following address types are currently
|
||||
// supported:
|
||||
// * IPv4: Represents an IPv4 Address.
|
||||
// * IPv6: Represents an IPv6 Address.
|
||||
// * FQDN: Represents a Fully Qualified Domain Name.
|
||||
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
|
||||
// endpoints is a list of unique endpoints in this slice. Each slice may
|
||||
// include a maximum of 1000 endpoints.
|
||||
// +listType=atomic
|
||||
Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
|
||||
// ports specifies the list of network ports exposed by each endpoint in
|
||||
// this slice. Each port must have a unique name. When ports is empty, it
|
||||
// indicates that there are no defined ports. When a port is defined with a
|
||||
// nil port value, it indicates "all ports". Each slice may include a
|
||||
// maximum of 100 ports.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
|
||||
}
|
||||
|
||||
// AddressType represents the type of address referred to by an endpoint.
|
||||
type AddressType string
|
||||
|
||||
const (
|
||||
// AddressTypeIP represents an IP Address.
|
||||
// This address type has been deprecated and has been replaced by the IPv4
|
||||
// and IPv6 adddress types. New resources with this address type will be
|
||||
// considered invalid. This will be fully removed in 1.18.
|
||||
// +deprecated
|
||||
AddressTypeIP = AddressType("IP")
|
||||
// AddressTypeIPv4 represents an IPv4 Address.
|
||||
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
|
||||
// AddressTypeIPv6 represents an IPv6 Address.
|
||||
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
|
||||
// AddressTypeFQDN represents a FQDN.
|
||||
AddressTypeFQDN = AddressType("FQDN")
|
||||
)
|
||||
|
||||
// Endpoint represents a single logical "backend" implementing a service.
|
||||
type Endpoint struct {
|
||||
// addresses of this endpoint. The contents of this field are interpreted
|
||||
// according to the corresponding EndpointSlice addressType field. Consumers
|
||||
// must handle different types of addresses in the context of their own
|
||||
// capabilities. This must contain at least one address but no more than
|
||||
// 100.
|
||||
// +listType=set
|
||||
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
|
||||
// conditions contains information about the current status of the endpoint.
|
||||
Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
|
||||
// hostname of this endpoint. This field may be used by consumers of
|
||||
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
|
||||
// Multiple endpoints which use the same hostname should be considered
|
||||
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
|
||||
// validation.
|
||||
// +optional
|
||||
Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
|
||||
// targetRef is a reference to a Kubernetes object that represents this
|
||||
// endpoint.
|
||||
// +optional
|
||||
TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"`
|
||||
// topology contains arbitrary topology information associated with the
|
||||
// endpoint. These key/value pairs must conform with the label format.
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
|
||||
// Topology may include a maximum of 16 key/value pairs. This includes, but
|
||||
// is not limited to the following well known keys:
|
||||
// * kubernetes.io/hostname: the value indicates the hostname of the node
|
||||
// where the endpoint is located. This should match the corresponding
|
||||
// node label.
|
||||
// * topology.kubernetes.io/zone: the value indicates the zone where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// * topology.kubernetes.io/region: the value indicates the region where the
|
||||
// endpoint is located. This should match the corresponding node label.
|
||||
// +optional
|
||||
Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"`
|
||||
}
|
||||
|
||||
// EndpointConditions represents the current condition of an endpoint.
|
||||
type EndpointConditions struct {
|
||||
// ready indicates that this endpoint is prepared to receive traffic,
|
||||
// according to whatever system is managing the endpoint. A nil value
|
||||
// indicates an unknown state. In most cases consumers should interpret this
|
||||
// unknown state as ready.
|
||||
// +optional
|
||||
Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
|
||||
}
|
||||
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
type EndpointPort struct {
|
||||
// The name of this port. All ports in an EndpointSlice must have a unique
|
||||
// name. If the EndpointSlice is dervied from a Kubernetes service, this
|
||||
// corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
// * must start and end with an alphanumeric character.
|
||||
// Default is empty string.
|
||||
Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
|
||||
// The IP protocol for this port.
|
||||
// Must be UDP, TCP, or SCTP.
|
||||
// Default is TCP.
|
||||
Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
|
||||
// The port number of the endpoint.
|
||||
// If this is not specified, ports are not restricted and must be
|
||||
// interpreted in the context of the specific consumer.
|
||||
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
|
||||
// The application protocol for this port.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// RFC-6335 and http://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names.
|
||||
// Default is empty string.
|
||||
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// EndpointSliceList represents a list of endpoint slices
|
||||
type EndpointSliceList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of endpoint slices
|
||||
// +listType=set
|
||||
Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
86
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
Normal file
86
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_Endpoint = map[string]string{
|
||||
"": "Endpoint represents a single logical \"backend\" implementing a service.",
|
||||
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
|
||||
"conditions": "conditions contains information about the current status of the endpoint.",
|
||||
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
|
||||
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
|
||||
"topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.",
|
||||
}
|
||||
|
||||
func (Endpoint) SwaggerDoc() map[string]string {
|
||||
return map_Endpoint
|
||||
}
|
||||
|
||||
var map_EndpointConditions = map[string]string{
|
||||
"": "EndpointConditions represents the current condition of an endpoint.",
|
||||
"ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.",
|
||||
}
|
||||
|
||||
func (EndpointConditions) SwaggerDoc() map[string]string {
|
||||
return map_EndpointConditions
|
||||
}
|
||||
|
||||
var map_EndpointPort = map[string]string{
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
|
||||
}
|
||||
|
||||
func (EndpointPort) SwaggerDoc() map[string]string {
|
||||
return map_EndpointPort
|
||||
}
|
||||
|
||||
var map_EndpointSlice = map[string]string{
|
||||
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
|
||||
"metadata": "Standard object's metadata.",
|
||||
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
|
||||
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
|
||||
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
|
||||
}
|
||||
|
||||
func (EndpointSlice) SwaggerDoc() map[string]string {
|
||||
return map_EndpointSlice
|
||||
}
|
||||
|
||||
var map_EndpointSliceList = map[string]string{
|
||||
"": "EndpointSliceList represents a list of endpoint slices",
|
||||
"metadata": "Standard list metadata.",
|
||||
"items": "List of endpoint slices",
|
||||
}
|
||||
|
||||
func (EndpointSliceList) SwaggerDoc() map[string]string {
|
||||
return map_EndpointSliceList
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
// LabelServiceName is used to indicate the name of a Kubernetes service.
|
||||
LabelServiceName = "kubernetes.io/service-name"
|
||||
// LabelManagedBy is used to indicate the controller or entity that manages
|
||||
// an EndpointSlice. This label aims to enable different EndpointSlice
|
||||
// objects to be managed by different controllers or entities within the
|
||||
// same cluster. It is highly recommended to configure this label for all
|
||||
// EndpointSlices.
|
||||
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
|
||||
)
|
||||
|
|
@ -0,0 +1,195 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
|
||||
*out = *in
|
||||
if in.Addresses != nil {
|
||||
in, out := &in.Addresses, &out.Addresses
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Conditions.DeepCopyInto(&out.Conditions)
|
||||
if in.Hostname != nil {
|
||||
in, out := &in.Hostname, &out.Hostname
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.TargetRef != nil {
|
||||
in, out := &in.TargetRef, &out.TargetRef
|
||||
*out = new(v1.ObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Topology != nil {
|
||||
in, out := &in.Topology, &out.Topology
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
|
||||
func (in *Endpoint) DeepCopy() *Endpoint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Endpoint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
|
||||
*out = *in
|
||||
if in.Ready != nil {
|
||||
in, out := &in.Ready, &out.Ready
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
|
||||
func (in *EndpointConditions) DeepCopy() *EndpointConditions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointConditions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
|
||||
*out = *in
|
||||
if in.Name != nil {
|
||||
in, out := &in.Name, &out.Name
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Protocol != nil {
|
||||
in, out := &in.Protocol, &out.Protocol
|
||||
*out = new(v1.Protocol)
|
||||
**out = **in
|
||||
}
|
||||
if in.Port != nil {
|
||||
in, out := &in.Port, &out.Port
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AppProtocol != nil {
|
||||
in, out := &in.AppProtocol, &out.AppProtocol
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
|
||||
func (in *EndpointPort) DeepCopy() *EndpointPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.Endpoints != nil {
|
||||
in, out := &in.Endpoints, &out.Endpoints
|
||||
*out = make([]Endpoint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]EndpointPort, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
|
||||
func (in *EndpointSlice) DeepCopy() *EndpointSlice {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointSlice)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EndpointSlice) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]EndpointSlice, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
|
||||
func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EndpointSliceList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -239,10 +239,66 @@ func (m *GroupSubject) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
|
||||
|
||||
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
|
||||
func (*LimitResponse) ProtoMessage() {}
|
||||
func (*LimitResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{7}
|
||||
}
|
||||
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *LimitResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LimitResponse.Merge(m, src)
|
||||
}
|
||||
func (m *LimitResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *LimitResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LimitResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
|
||||
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
|
||||
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{8}
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src)
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
|
||||
|
||||
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
|
||||
func (*NonResourcePolicyRule) ProtoMessage() {}
|
||||
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{7}
|
||||
return fileDescriptor_45ba024d525b289b, []int{9}
|
||||
}
|
||||
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -270,7 +326,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
|
|||
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
|
||||
func (*PolicyRulesWithSubjects) ProtoMessage() {}
|
||||
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{8}
|
||||
return fileDescriptor_45ba024d525b289b, []int{10}
|
||||
}
|
||||
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -298,7 +354,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
|
|||
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
|
||||
func (*PriorityLevelConfiguration) ProtoMessage() {}
|
||||
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{9}
|
||||
return fileDescriptor_45ba024d525b289b, []int{11}
|
||||
}
|
||||
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -326,7 +382,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
|
|||
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
|
||||
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{10}
|
||||
return fileDescriptor_45ba024d525b289b, []int{12}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -354,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
|
|||
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
|
||||
func (*PriorityLevelConfigurationList) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{11}
|
||||
return fileDescriptor_45ba024d525b289b, []int{13}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -382,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
|
|||
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
|
||||
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{12}
|
||||
return fileDescriptor_45ba024d525b289b, []int{14}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -410,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
|
|||
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
|
||||
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{13}
|
||||
return fileDescriptor_45ba024d525b289b, []int{15}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -438,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
|
|||
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
|
||||
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{14}
|
||||
return fileDescriptor_45ba024d525b289b, []int{16}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -466,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
|
|||
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
|
||||
func (*QueuingConfiguration) ProtoMessage() {}
|
||||
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{15}
|
||||
return fileDescriptor_45ba024d525b289b, []int{17}
|
||||
}
|
||||
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -494,7 +550,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
|
|||
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
|
||||
func (*ResourcePolicyRule) ProtoMessage() {}
|
||||
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{16}
|
||||
return fileDescriptor_45ba024d525b289b, []int{18}
|
||||
}
|
||||
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -522,7 +578,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
|
|||
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
|
||||
func (*ServiceAccountSubject) ProtoMessage() {}
|
||||
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{17}
|
||||
return fileDescriptor_45ba024d525b289b, []int{19}
|
||||
}
|
||||
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -550,7 +606,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
|
|||
func (m *Subject) Reset() { *m = Subject{} }
|
||||
func (*Subject) ProtoMessage() {}
|
||||
func (*Subject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{18}
|
||||
return fileDescriptor_45ba024d525b289b, []int{20}
|
||||
}
|
||||
func (m *Subject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -578,7 +634,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
|
|||
func (m *UserSubject) Reset() { *m = UserSubject{} }
|
||||
func (*UserSubject) ProtoMessage() {}
|
||||
func (*UserSubject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{19}
|
||||
return fileDescriptor_45ba024d525b289b, []int{21}
|
||||
}
|
||||
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
|
@ -611,6 +667,8 @@ func init() {
|
|||
proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec")
|
||||
proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus")
|
||||
proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject")
|
||||
proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse")
|
||||
proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration")
|
||||
proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule")
|
||||
proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects")
|
||||
proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration")
|
||||
|
|
@ -631,94 +689,101 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_45ba024d525b289b = []byte{
|
||||
// 1386 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xdd, 0x6e, 0xd4, 0xc6,
|
||||
0x17, 0x8f, 0x37, 0xbb, 0x49, 0x76, 0x42, 0x3e, 0xfe, 0x93, 0x3f, 0xca, 0x36, 0x48, 0xbb, 0xa9,
|
||||
0x2b, 0x15, 0x28, 0x60, 0x13, 0x4a, 0x29, 0x15, 0x42, 0x28, 0x06, 0x95, 0xaf, 0x24, 0x4d, 0x26,
|
||||
0x40, 0x55, 0x44, 0x25, 0x26, 0xde, 0x89, 0x77, 0xc8, 0xae, 0xed, 0xce, 0xd8, 0x4b, 0x53, 0x71,
|
||||
0x51, 0xa9, 0x0f, 0x50, 0x1e, 0x80, 0x07, 0xe8, 0x4b, 0x54, 0xea, 0x25, 0xaa, 0x7a, 0xc1, 0x25,
|
||||
0x57, 0x2b, 0xb2, 0xbd, 0xed, 0x03, 0x54, 0x5c, 0x55, 0x33, 0x1e, 0xdb, 0xeb, 0xfd, 0xc8, 0x2e,
|
||||
0x8d, 0xc4, 0x55, 0xef, 0xec, 0x73, 0xce, 0xef, 0x77, 0xce, 0x9c, 0x39, 0x67, 0xce, 0x01, 0xb7,
|
||||
0xf6, 0x2e, 0x73, 0x83, 0x7a, 0xe6, 0x5e, 0xb8, 0x43, 0x98, 0x4b, 0x02, 0xc2, 0xcd, 0x26, 0x71,
|
||||
0xab, 0x1e, 0x33, 0x95, 0x02, 0xfb, 0xd4, 0xdc, 0xad, 0x7b, 0x4f, 0x6d, 0xcf, 0x0d, 0x98, 0x57,
|
||||
0x37, 0x9b, 0x2b, 0xb8, 0xee, 0xd7, 0xf0, 0x8a, 0xe9, 0x10, 0x97, 0x30, 0x1c, 0x90, 0xaa, 0xe1,
|
||||
0x33, 0x2f, 0xf0, 0x60, 0x25, 0x02, 0x18, 0xd8, 0xa7, 0x46, 0x07, 0xc0, 0x88, 0x01, 0x4b, 0xe7,
|
||||
0x1c, 0x1a, 0xd4, 0xc2, 0x1d, 0xc3, 0xf6, 0x1a, 0xa6, 0xe3, 0x39, 0x9e, 0x29, 0x71, 0x3b, 0xe1,
|
||||
0xae, 0xfc, 0x93, 0x3f, 0xf2, 0x2b, 0xe2, 0x5b, 0xba, 0x98, 0x06, 0xd0, 0xc0, 0x76, 0x8d, 0xba,
|
||||
0x84, 0xed, 0x9b, 0xfe, 0x9e, 0x23, 0x04, 0xdc, 0x6c, 0x90, 0x00, 0x9b, 0xcd, 0x9e, 0x28, 0x96,
|
||||
0xcc, 0x41, 0x28, 0x16, 0xba, 0x01, 0x6d, 0x90, 0x1e, 0xc0, 0xa5, 0x61, 0x00, 0x6e, 0xd7, 0x48,
|
||||
0x03, 0x77, 0xe3, 0xf4, 0x87, 0x60, 0xf1, 0xcb, 0xba, 0xf7, 0xf4, 0x06, 0xe5, 0x01, 0x75, 0x9d,
|
||||
0x90, 0xf2, 0x1a, 0x61, 0xeb, 0x24, 0xa8, 0x79, 0x55, 0x78, 0x0d, 0xe4, 0x83, 0x7d, 0x9f, 0x94,
|
||||
0xb4, 0x65, 0xed, 0x54, 0xd1, 0x3a, 0xf3, 0xb2, 0x55, 0x19, 0x6b, 0xb7, 0x2a, 0xf9, 0x7b, 0xfb,
|
||||
0x3e, 0x79, 0xdb, 0xaa, 0x9c, 0x18, 0x00, 0x13, 0x6a, 0x24, 0x81, 0xfa, 0x8b, 0x1c, 0x00, 0xc2,
|
||||
0x6a, 0x5b, 0xba, 0x86, 0x8f, 0xc1, 0x94, 0x38, 0x6e, 0x15, 0x07, 0x58, 0x72, 0x4e, 0x5f, 0x38,
|
||||
0x6f, 0xa4, 0xc9, 0x4e, 0xa2, 0x36, 0xfc, 0x3d, 0x47, 0x08, 0xb8, 0x21, 0xac, 0x8d, 0xe6, 0x8a,
|
||||
0xf1, 0xd5, 0xce, 0x13, 0x62, 0x07, 0xeb, 0x24, 0xc0, 0x16, 0x54, 0x51, 0x80, 0x54, 0x86, 0x12,
|
||||
0x56, 0xb8, 0x05, 0xf2, 0xdc, 0x27, 0x76, 0x29, 0x27, 0xd9, 0x4d, 0x63, 0xc8, 0x55, 0x1a, 0x69,
|
||||
0x70, 0xdb, 0x3e, 0xb1, 0xad, 0x63, 0xf1, 0x11, 0xc5, 0x1f, 0x92, 0x54, 0xf0, 0x1b, 0x30, 0xc1,
|
||||
0x03, 0x1c, 0x84, 0xbc, 0x34, 0x2e, 0x49, 0x57, 0xde, 0x85, 0x54, 0x02, 0xad, 0x59, 0x45, 0x3b,
|
||||
0x11, 0xfd, 0x23, 0x45, 0xa8, 0xbf, 0xce, 0x81, 0x85, 0xd4, 0xf8, 0xba, 0xe7, 0x56, 0x69, 0x40,
|
||||
0x3d, 0x17, 0x5e, 0xc9, 0xe4, 0xfd, 0x64, 0x57, 0xde, 0x17, 0xfb, 0x40, 0xd2, 0x9c, 0xc3, 0x2f,
|
||||
0x92, 0x78, 0x73, 0x12, 0xfe, 0x61, 0xd6, 0xf9, 0xdb, 0x56, 0x65, 0x2e, 0x81, 0x65, 0xe3, 0x81,
|
||||
0x4d, 0x00, 0xeb, 0x98, 0x07, 0xf7, 0x18, 0x76, 0x79, 0x44, 0x4b, 0x1b, 0x44, 0x1d, 0xfb, 0x93,
|
||||
0xd1, 0x6e, 0x4a, 0x20, 0xac, 0x25, 0xe5, 0x12, 0xae, 0xf5, 0xb0, 0xa1, 0x3e, 0x1e, 0xe0, 0xc7,
|
||||
0x60, 0x82, 0x11, 0xcc, 0x3d, 0xb7, 0x94, 0x97, 0x21, 0x27, 0xf9, 0x42, 0x52, 0x8a, 0x94, 0x16,
|
||||
0x9e, 0x06, 0x93, 0x0d, 0xc2, 0x39, 0x76, 0x48, 0xa9, 0x20, 0x0d, 0xe7, 0x94, 0xe1, 0xe4, 0x7a,
|
||||
0x24, 0x46, 0xb1, 0x5e, 0xff, 0x4d, 0x03, 0xb3, 0x69, 0x9e, 0xd6, 0x28, 0x0f, 0xe0, 0xa3, 0x9e,
|
||||
0xea, 0x33, 0x46, 0x3b, 0x93, 0x40, 0xcb, 0xda, 0x9b, 0x57, 0xee, 0xa6, 0x62, 0x49, 0x47, 0xe5,
|
||||
0x6d, 0x82, 0x02, 0x0d, 0x48, 0x43, 0x64, 0x7d, 0xfc, 0xd4, 0xf4, 0x85, 0x33, 0xef, 0x50, 0x25,
|
||||
0xd6, 0x8c, 0xe2, 0x2d, 0xdc, 0x16, 0x0c, 0x28, 0x22, 0xd2, 0xff, 0x1a, 0xef, 0x3c, 0x82, 0xa8,
|
||||
0x48, 0xf8, 0x8b, 0x06, 0x96, 0x7c, 0x46, 0x3d, 0x46, 0x83, 0xfd, 0x35, 0xd2, 0x24, 0xf5, 0xeb,
|
||||
0x9e, 0xbb, 0x4b, 0x9d, 0x90, 0x61, 0x91, 0x4b, 0x75, 0xaa, 0x1b, 0x43, 0x5d, 0x6f, 0x0e, 0xa4,
|
||||
0x40, 0x64, 0x97, 0x30, 0xe2, 0xda, 0xc4, 0xd2, 0x55, 0x4c, 0x4b, 0x87, 0x18, 0x1f, 0x12, 0x0b,
|
||||
0xbc, 0x03, 0x60, 0x03, 0x07, 0x22, 0xa7, 0xce, 0x26, 0x23, 0x36, 0xa9, 0x0a, 0x56, 0x59, 0x92,
|
||||
0x85, 0xb4, 0x3e, 0xd6, 0x7b, 0x2c, 0x50, 0x1f, 0x14, 0xfc, 0x49, 0x03, 0x0b, 0xd5, 0xde, 0x87,
|
||||
0x46, 0x55, 0xe6, 0xe5, 0x91, 0x52, 0xdd, 0xe7, 0xa1, 0xb2, 0x16, 0xdb, 0xad, 0xca, 0x42, 0x1f,
|
||||
0x05, 0xea, 0xe7, 0x0d, 0x7e, 0x0b, 0x0a, 0x2c, 0xac, 0x13, 0x5e, 0xca, 0xcb, 0x1b, 0x1e, 0xee,
|
||||
0x76, 0xd3, 0xab, 0x53, 0x7b, 0x1f, 0x09, 0xcc, 0xd7, 0x34, 0xa8, 0x6d, 0x87, 0xf2, 0xc5, 0xe2,
|
||||
0xe9, 0x75, 0x4b, 0x15, 0x8a, 0x58, 0xf5, 0x67, 0x60, 0xbe, 0xfb, 0xe1, 0x80, 0x35, 0x00, 0xec,
|
||||
0xb8, 0x57, 0x79, 0x49, 0x93, 0x7e, 0x2f, 0xbe, 0x43, 0x65, 0x25, 0x8d, 0x9e, 0x3e, 0x9b, 0x89,
|
||||
0x88, 0xa3, 0x0e, 0x6e, 0xfd, 0x3c, 0x38, 0x76, 0x93, 0x79, 0xa1, 0xaf, 0x82, 0x84, 0xcb, 0x20,
|
||||
0xef, 0xe2, 0x46, 0xfc, 0x04, 0x25, 0xef, 0xe2, 0x06, 0x6e, 0x10, 0x24, 0x35, 0xfa, 0x53, 0x70,
|
||||
0x7c, 0x43, 0x14, 0x0c, 0xf7, 0x42, 0x66, 0x93, 0xf4, 0xac, 0xb0, 0x02, 0x0a, 0x4d, 0xc2, 0x76,
|
||||
0xa2, 0x78, 0x8b, 0x56, 0x51, 0x9c, 0xf4, 0x81, 0x10, 0xa0, 0x48, 0x0e, 0xaf, 0x82, 0x39, 0x37,
|
||||
0x45, 0xde, 0x47, 0x6b, 0xbc, 0x34, 0x21, 0x4d, 0x17, 0xda, 0xad, 0xca, 0xdc, 0x46, 0x56, 0x85,
|
||||
0xba, 0x6d, 0xf5, 0x83, 0x1c, 0x58, 0x1c, 0x90, 0x5a, 0xf8, 0x00, 0x4c, 0x71, 0xf5, 0xad, 0xd2,
|
||||
0x75, 0x6a, 0x68, 0xba, 0x14, 0x38, 0xed, 0xee, 0x98, 0x0d, 0x25, 0x5c, 0xd0, 0x07, 0x33, 0x4c,
|
||||
0xc5, 0x20, 0x9d, 0xaa, 0x2e, 0xff, 0x74, 0x28, 0x79, 0x6f, 0x7e, 0xac, 0xe3, 0xca, 0xcf, 0x0c,
|
||||
0xea, 0x64, 0x44, 0x59, 0x07, 0xf0, 0x19, 0x98, 0xef, 0x38, 0x78, 0xe4, 0x74, 0x5c, 0x3a, 0xbd,
|
||||
0x34, 0xd4, 0x69, 0xdf, 0x7b, 0xb1, 0x4a, 0xca, 0xef, 0xfc, 0x46, 0x17, 0x2f, 0xea, 0xf1, 0xa4,
|
||||
0xff, 0x91, 0x03, 0x87, 0x34, 0xfe, 0x7b, 0x18, 0xe4, 0x38, 0x33, 0xc8, 0xaf, 0x1d, 0xe1, 0x49,
|
||||
0x1b, 0x38, 0xd8, 0x69, 0xd7, 0x60, 0x5f, 0x3d, 0x8a, 0x93, 0xc3, 0x07, 0xfd, 0xdf, 0x39, 0xf0,
|
||||
0xd1, 0x60, 0x70, 0x3a, 0xf8, 0xef, 0x66, 0x06, 0xff, 0xe7, 0x5d, 0x83, 0xff, 0xe4, 0x08, 0x14,
|
||||
0xff, 0x2d, 0x02, 0x5d, 0x8b, 0xc0, 0x1b, 0x0d, 0x94, 0x07, 0xe7, 0xed, 0x3d, 0x2c, 0x06, 0x8f,
|
||||
0xb3, 0x8b, 0xc1, 0x95, 0x23, 0x54, 0xd9, 0x80, 0x45, 0xe1, 0xe6, 0x61, 0xc5, 0x95, 0x4c, 0xf4,
|
||||
0x11, 0x9e, 0xf4, 0x5f, 0x0f, 0xcd, 0x95, 0xdc, 0x40, 0xae, 0x66, 0x2a, 0xf4, 0x74, 0x57, 0x85,
|
||||
0x7e, 0x90, 0x41, 0x6f, 0x85, 0x24, 0x24, 0xd4, 0x75, 0x3a, 0x6a, 0xf2, 0x11, 0x98, 0xfc, 0x2e,
|
||||
0x24, 0x21, 0x75, 0x1d, 0xd5, 0xd9, 0x9f, 0x0d, 0x4d, 0xc7, 0x56, 0x64, 0x9f, 0x4d, 0xc4, 0xb4,
|
||||
0xb8, 0x6b, 0xa5, 0x41, 0x31, 0xa5, 0xfe, 0x42, 0x03, 0xcb, 0xc3, 0x7a, 0x14, 0x7e, 0xdf, 0x67,
|
||||
0xa6, 0x1e, 0x65, 0x65, 0x1a, 0x7d, 0xc6, 0x3e, 0xcf, 0x81, 0xff, 0xf7, 0x3b, 0x0d, 0x7c, 0x04,
|
||||
0x4a, 0x98, 0xf3, 0x90, 0x91, 0xea, 0x75, 0xcf, 0xb5, 0x43, 0x26, 0xee, 0x6b, 0x7f, 0xbb, 0x86,
|
||||
0x19, 0xe1, 0x32, 0xd1, 0x05, 0x6b, 0x59, 0x51, 0x97, 0x56, 0x07, 0xd8, 0xa1, 0x81, 0x0c, 0xa2,
|
||||
0xa9, 0x44, 0x82, 0x08, 0x57, 0xdb, 0x57, 0xd2, 0x54, 0xf2, 0x7e, 0x38, 0x52, 0x5a, 0x78, 0x16,
|
||||
0x4c, 0xd5, 0xb0, 0x5b, 0xdd, 0xa6, 0x3f, 0x44, 0xad, 0x5e, 0x48, 0xcb, 0xfa, 0x96, 0x92, 0xa3,
|
||||
0xc4, 0x02, 0xde, 0x00, 0xf3, 0x12, 0xb7, 0x46, 0x5c, 0x27, 0xa8, 0xad, 0xd1, 0x06, 0x0d, 0x64,
|
||||
0xd3, 0x16, 0xd2, 0x39, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x08, 0xfd, 0x67, 0x0d, 0xc0, 0x7f, 0xb3,
|
||||
0x42, 0x9c, 0x01, 0x45, 0xec, 0x53, 0xb9, 0xb1, 0x44, 0x8d, 0x55, 0xb4, 0x66, 0xda, 0xad, 0x4a,
|
||||
0x71, 0x75, 0xf3, 0x76, 0x24, 0x44, 0xa9, 0x5e, 0x18, 0xc7, 0xb3, 0x35, 0x9a, 0xa1, 0xca, 0x38,
|
||||
0x76, 0xcc, 0x51, 0xaa, 0xd7, 0x9f, 0x80, 0xe3, 0xdb, 0x84, 0x35, 0xa9, 0x4d, 0x56, 0x6d, 0xdb,
|
||||
0x0b, 0xdd, 0x20, 0xde, 0x88, 0x4c, 0x50, 0x14, 0x4d, 0xc2, 0x7d, 0x6c, 0xc7, 0xe5, 0xff, 0x3f,
|
||||
0x75, 0xd2, 0xe2, 0x46, 0xac, 0x40, 0xa9, 0x4d, 0xd2, 0x6f, 0xb9, 0x81, 0xfd, 0xf6, 0x7b, 0x0e,
|
||||
0x4c, 0xa6, 0xf4, 0xf9, 0x3d, 0xea, 0x56, 0x15, 0xf3, 0x89, 0xd8, 0xfa, 0x2e, 0x75, 0xab, 0x6f,
|
||||
0x5b, 0x95, 0x69, 0x65, 0x26, 0x7e, 0x91, 0x34, 0x84, 0x77, 0x40, 0x3e, 0xe4, 0x84, 0xa9, 0x3e,
|
||||
0x3a, 0x3b, 0xb4, 0x82, 0xef, 0x73, 0xc2, 0xe2, 0x55, 0x67, 0x4a, 0x50, 0x0b, 0x01, 0x92, 0x1c,
|
||||
0x70, 0x03, 0x14, 0x1c, 0x91, 0x2b, 0xf5, 0xc4, 0x9f, 0x1b, 0x4a, 0xd6, 0xb9, 0x2b, 0x46, 0xd7,
|
||||
0x23, 0x25, 0x28, 0xa2, 0x81, 0x0c, 0xcc, 0xf2, 0x4c, 0x12, 0x65, 0x69, 0x8c, 0xb2, 0xba, 0xf4,
|
||||
0xcd, 0xbd, 0x05, 0xdb, 0xad, 0xca, 0x6c, 0x56, 0x85, 0xba, 0x3c, 0xe8, 0x26, 0x98, 0xee, 0x38,
|
||||
0xe2, 0xf0, 0xd7, 0xce, 0x32, 0x5e, 0x1e, 0x94, 0xc7, 0x5e, 0x1d, 0x94, 0xc7, 0x5e, 0x1f, 0x94,
|
||||
0xc7, 0x7e, 0x6c, 0x97, 0xb5, 0x97, 0xed, 0xb2, 0xf6, 0xaa, 0x5d, 0xd6, 0x5e, 0xb7, 0xcb, 0xda,
|
||||
0x9b, 0x76, 0x59, 0x7b, 0xfe, 0x67, 0x79, 0xec, 0xe1, 0x54, 0x1c, 0xda, 0x3f, 0x01, 0x00, 0x00,
|
||||
0xff, 0xff, 0x14, 0x4f, 0x2e, 0xc5, 0x62, 0x12, 0x00, 0x00,
|
||||
// 1502 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0xdb, 0x46,
|
||||
0x13, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x59, 0x27, 0xb0, 0xe0, 0x00, 0x92, 0xc3, 0x17, 0x78,
|
||||
0x93, 0xf7, 0x4d, 0x42, 0xc6, 0x69, 0x92, 0xa6, 0x08, 0x8a, 0xc0, 0x74, 0xda, 0x7c, 0xd9, 0xae,
|
||||
0xbd, 0x4e, 0x52, 0x34, 0x48, 0x81, 0xd0, 0xd4, 0x5a, 0xda, 0x58, 0x22, 0x59, 0x2e, 0xa9, 0xd4,
|
||||
0x45, 0x0e, 0x05, 0xfa, 0x07, 0xfa, 0x03, 0x72, 0xec, 0xa1, 0xe7, 0xfe, 0x82, 0x1e, 0x8d, 0xa2,
|
||||
0x87, 0x1c, 0x73, 0x12, 0x62, 0xf5, 0x5a, 0xf4, 0xdc, 0xe6, 0x54, 0xec, 0x72, 0x49, 0x8a, 0xfa,
|
||||
0xb0, 0x94, 0x1a, 0xc8, 0xa9, 0x37, 0x71, 0x3e, 0x9e, 0xd9, 0x99, 0x9d, 0x99, 0x7d, 0x04, 0x77,
|
||||
0xf6, 0xae, 0x33, 0x8d, 0x3a, 0xfa, 0x5e, 0xb0, 0x43, 0x3c, 0x9b, 0xf8, 0x84, 0xe9, 0x0d, 0x62,
|
||||
0x97, 0x1d, 0x4f, 0x97, 0x0a, 0xd3, 0xa5, 0xfa, 0x6e, 0xcd, 0x79, 0x6e, 0x39, 0xb6, 0xef, 0x39,
|
||||
0x35, 0xbd, 0xb1, 0x6c, 0xd6, 0xdc, 0xaa, 0xb9, 0xac, 0x57, 0x88, 0x4d, 0x3c, 0xd3, 0x27, 0x65,
|
||||
0xcd, 0xf5, 0x1c, 0xdf, 0x41, 0xa5, 0xd0, 0x41, 0x33, 0x5d, 0xaa, 0xb5, 0x39, 0x68, 0x91, 0xc3,
|
||||
0xe2, 0xc5, 0x0a, 0xf5, 0xab, 0xc1, 0x8e, 0x66, 0x39, 0x75, 0xbd, 0xe2, 0x54, 0x1c, 0x5d, 0xf8,
|
||||
0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x21, 0xde, 0xe2, 0x95, 0xe4, 0x00, 0x75, 0xd3,
|
||||
0xaa, 0x52, 0x9b, 0x78, 0xfb, 0xba, 0xbb, 0x57, 0xe1, 0x02, 0xa6, 0xd7, 0x89, 0x6f, 0xea, 0x8d,
|
||||
0xae, 0x53, 0x2c, 0xea, 0xfd, 0xbc, 0xbc, 0xc0, 0xf6, 0x69, 0x9d, 0x74, 0x39, 0x5c, 0x1b, 0xe4,
|
||||
0xc0, 0xac, 0x2a, 0xa9, 0x9b, 0x9d, 0x7e, 0xea, 0x63, 0x58, 0xf8, 0xb4, 0xe6, 0x3c, 0xbf, 0x45,
|
||||
0x99, 0x4f, 0xed, 0x4a, 0x40, 0x59, 0x95, 0x78, 0xeb, 0xc4, 0xaf, 0x3a, 0x65, 0x74, 0x13, 0xb2,
|
||||
0xfe, 0xbe, 0x4b, 0x0a, 0xca, 0x92, 0x72, 0x2e, 0x6f, 0x9c, 0x3f, 0x68, 0x96, 0x46, 0x5a, 0xcd,
|
||||
0x52, 0xf6, 0xc1, 0xbe, 0x4b, 0xde, 0x36, 0x4b, 0xa7, 0xfb, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea,
|
||||
0xcb, 0x0c, 0x00, 0xb7, 0xda, 0x16, 0xa1, 0xd1, 0x53, 0x98, 0xe0, 0xe9, 0x96, 0x4d, 0xdf, 0x14,
|
||||
0x98, 0x93, 0x97, 0x2f, 0x69, 0x49, 0xb1, 0xe3, 0x53, 0x6b, 0xee, 0x5e, 0x85, 0x0b, 0x98, 0xc6,
|
||||
0xad, 0xb5, 0xc6, 0xb2, 0xf6, 0xd9, 0xce, 0x33, 0x62, 0xf9, 0xeb, 0xc4, 0x37, 0x0d, 0x24, 0x4f,
|
||||
0x01, 0x89, 0x0c, 0xc7, 0xa8, 0x68, 0x0b, 0xb2, 0xcc, 0x25, 0x56, 0x21, 0x23, 0xd0, 0x75, 0x6d,
|
||||
0xc0, 0x55, 0x6a, 0xc9, 0xe1, 0xb6, 0x5d, 0x62, 0x19, 0x53, 0x51, 0x8a, 0xfc, 0x0b, 0x0b, 0x28,
|
||||
0xf4, 0x05, 0x8c, 0x31, 0xdf, 0xf4, 0x03, 0x56, 0x18, 0x15, 0xa0, 0xcb, 0xef, 0x02, 0x2a, 0x1c,
|
||||
0x8d, 0x19, 0x09, 0x3b, 0x16, 0x7e, 0x63, 0x09, 0xa8, 0xbe, 0xce, 0xc0, 0x7c, 0x62, 0xbc, 0xea,
|
||||
0xd8, 0x65, 0xea, 0x53, 0xc7, 0x46, 0x37, 0x52, 0x75, 0x3f, 0xdb, 0x51, 0xf7, 0x85, 0x1e, 0x2e,
|
||||
0x49, 0xcd, 0xd1, 0x47, 0xf1, 0x79, 0x33, 0xc2, 0xfd, 0x4c, 0x3a, 0xf8, 0xdb, 0x66, 0x69, 0x36,
|
||||
0x76, 0x4b, 0x9f, 0x07, 0x35, 0x00, 0xd5, 0x4c, 0xe6, 0x3f, 0xf0, 0x4c, 0x9b, 0x85, 0xb0, 0xb4,
|
||||
0x4e, 0x64, 0xda, 0xff, 0x1f, 0xee, 0xa6, 0xb8, 0x87, 0xb1, 0x28, 0x43, 0xa2, 0xb5, 0x2e, 0x34,
|
||||
0xdc, 0x23, 0x02, 0xfa, 0x2f, 0x8c, 0x79, 0xc4, 0x64, 0x8e, 0x5d, 0xc8, 0x8a, 0x23, 0xc7, 0xf5,
|
||||
0xc2, 0x42, 0x8a, 0xa5, 0x16, 0xfd, 0x0f, 0xc6, 0xeb, 0x84, 0x31, 0xb3, 0x42, 0x0a, 0x39, 0x61,
|
||||
0x38, 0x2b, 0x0d, 0xc7, 0xd7, 0x43, 0x31, 0x8e, 0xf4, 0xea, 0xcf, 0x0a, 0xcc, 0x24, 0x75, 0x5a,
|
||||
0xa3, 0xcc, 0x47, 0x4f, 0xba, 0xba, 0x4f, 0x1b, 0x2e, 0x27, 0xee, 0x2d, 0x7a, 0x6f, 0x4e, 0x86,
|
||||
0x9b, 0x88, 0x24, 0x6d, 0x9d, 0xb7, 0x09, 0x39, 0xea, 0x93, 0x3a, 0xaf, 0xfa, 0xe8, 0xb9, 0xc9,
|
||||
0xcb, 0xe7, 0xdf, 0xa1, 0x4b, 0x8c, 0x69, 0x89, 0x9b, 0xbb, 0xcb, 0x11, 0x70, 0x08, 0xa4, 0xfe,
|
||||
0x3e, 0xda, 0x9e, 0x02, 0xef, 0x48, 0xf4, 0xa3, 0x02, 0x8b, 0xae, 0x47, 0x1d, 0x8f, 0xfa, 0xfb,
|
||||
0x6b, 0xa4, 0x41, 0x6a, 0xab, 0x8e, 0xbd, 0x4b, 0x2b, 0x81, 0x67, 0xf2, 0x5a, 0xca, 0xac, 0x6e,
|
||||
0x0d, 0x0c, 0xbd, 0xd9, 0x17, 0x02, 0x93, 0x5d, 0xe2, 0x11, 0xdb, 0x22, 0x86, 0x2a, 0xcf, 0xb4,
|
||||
0x78, 0x84, 0xf1, 0x11, 0x67, 0x41, 0xf7, 0x00, 0xd5, 0x4d, 0x9f, 0xd7, 0xb4, 0xb2, 0xe9, 0x11,
|
||||
0x8b, 0x94, 0x39, 0xaa, 0x68, 0xc9, 0x5c, 0xd2, 0x1f, 0xeb, 0x5d, 0x16, 0xb8, 0x87, 0x17, 0xfa,
|
||||
0x4e, 0x81, 0xf9, 0x72, 0xf7, 0xa2, 0x91, 0x9d, 0x79, 0x7d, 0xa8, 0x52, 0xf7, 0x58, 0x54, 0xc6,
|
||||
0x42, 0xab, 0x59, 0x9a, 0xef, 0xa1, 0xc0, 0xbd, 0xa2, 0xa1, 0x2f, 0x21, 0xe7, 0x05, 0x35, 0xc2,
|
||||
0x0a, 0x59, 0x71, 0xc3, 0x83, 0xc3, 0x6e, 0x3a, 0x35, 0x6a, 0xed, 0x63, 0xee, 0xf3, 0x39, 0xf5,
|
||||
0xab, 0xdb, 0x81, 0xd8, 0x58, 0x2c, 0xb9, 0x6e, 0xa1, 0xc2, 0x21, 0xaa, 0xfa, 0x02, 0xe6, 0x3a,
|
||||
0x17, 0x07, 0xaa, 0x02, 0x58, 0xd1, 0xac, 0xb2, 0x82, 0x22, 0xe2, 0x5e, 0x79, 0x87, 0xce, 0x8a,
|
||||
0x07, 0x3d, 0x59, 0x9b, 0xb1, 0x88, 0xe1, 0x36, 0x6c, 0xf5, 0x12, 0x4c, 0xdd, 0xf6, 0x9c, 0xc0,
|
||||
0x95, 0x87, 0x44, 0x4b, 0x90, 0xb5, 0xcd, 0x7a, 0xb4, 0x82, 0xe2, 0xbd, 0xb8, 0x61, 0xd6, 0x09,
|
||||
0x16, 0x1a, 0xf5, 0x07, 0x05, 0xa6, 0xd7, 0x68, 0x9d, 0xfa, 0x98, 0x30, 0xd7, 0xb1, 0x19, 0x41,
|
||||
0x57, 0x53, 0x6b, 0xeb, 0x4c, 0xc7, 0xda, 0x3a, 0x91, 0x32, 0x6e, 0x5b, 0x58, 0x4f, 0x60, 0xfc,
|
||||
0xab, 0x80, 0x04, 0xd4, 0xae, 0xc8, 0xb5, 0x7d, 0x75, 0x60, 0x86, 0x5b, 0xa1, 0x7d, 0xaa, 0xe3,
|
||||
0x8c, 0x49, 0xbe, 0x08, 0xa4, 0x06, 0x47, 0x90, 0xea, 0x1f, 0x0a, 0x9c, 0x11, 0x91, 0x49, 0xb9,
|
||||
0x7f, 0x27, 0xa3, 0x27, 0x50, 0x30, 0x19, 0x0b, 0x3c, 0x52, 0x5e, 0x75, 0x6c, 0x2b, 0xf0, 0xf8,
|
||||
0x0c, 0xec, 0x6f, 0x57, 0x4d, 0x8f, 0x30, 0x91, 0x4e, 0xce, 0x58, 0x92, 0xe9, 0x14, 0x56, 0xfa,
|
||||
0xd8, 0xe1, 0xbe, 0x08, 0x68, 0x0f, 0xa6, 0x6b, 0xed, 0xc9, 0xcb, 0x3c, 0xb5, 0x81, 0x79, 0xa6,
|
||||
0x4a, 0x66, 0x9c, 0x92, 0x47, 0x48, 0x97, 0x1d, 0xa7, 0xb1, 0xd5, 0xe7, 0x70, 0x6a, 0x83, 0x0f,
|
||||
0x32, 0x73, 0x02, 0xcf, 0x22, 0x49, 0x0f, 0xa2, 0x12, 0xe4, 0x1a, 0xc4, 0xdb, 0x09, 0xfb, 0x28,
|
||||
0x6f, 0xe4, 0x79, 0x07, 0x3e, 0xe2, 0x02, 0x1c, 0xca, 0xd1, 0xc7, 0x30, 0x6b, 0x27, 0x9e, 0x0f,
|
||||
0xf1, 0x1a, 0x2b, 0x8c, 0x09, 0xd3, 0xf9, 0x56, 0xb3, 0x34, 0xbb, 0x91, 0x56, 0xe1, 0x4e, 0x5b,
|
||||
0xf5, 0x30, 0x03, 0x0b, 0x7d, 0x5a, 0x1e, 0x3d, 0x82, 0x09, 0x26, 0x7f, 0xcb, 0x36, 0x3e, 0x37,
|
||||
0x30, 0x79, 0xe9, 0x9c, 0x6c, 0xdd, 0x08, 0x0d, 0xc7, 0x58, 0xc8, 0x85, 0x69, 0x4f, 0x9e, 0x41,
|
||||
0x04, 0x95, 0xdb, 0xf7, 0x83, 0x81, 0xe0, 0xdd, 0xf5, 0x49, 0xca, 0x8b, 0xdb, 0x11, 0x71, 0x3a,
|
||||
0x00, 0x7a, 0x01, 0x73, 0x6d, 0x89, 0x87, 0x41, 0x47, 0x45, 0xd0, 0x6b, 0x03, 0x83, 0xf6, 0xbc,
|
||||
0x17, 0xa3, 0x20, 0xe3, 0xce, 0x6d, 0x74, 0xe0, 0xe2, 0xae, 0x48, 0xea, 0xaf, 0x19, 0x38, 0x62,
|
||||
0x21, 0xbf, 0x07, 0x82, 0x65, 0xa6, 0x08, 0xd6, 0xcd, 0x63, 0x3c, 0x35, 0x7d, 0x09, 0x17, 0xed,
|
||||
0x20, 0x5c, 0x2b, 0xc7, 0x09, 0x72, 0x34, 0x01, 0xfb, 0x33, 0x03, 0xff, 0xe9, 0xef, 0x9c, 0x10,
|
||||
0xb2, 0xfb, 0xa9, 0xcd, 0xf6, 0x61, 0xc7, 0x66, 0x3b, 0x3b, 0x04, 0xc4, 0xbf, 0x04, 0xad, 0x83,
|
||||
0xa0, 0xbd, 0x51, 0xa0, 0xd8, 0xbf, 0x6e, 0xef, 0x81, 0xb0, 0x3d, 0x4d, 0x13, 0xb6, 0x1b, 0xc7,
|
||||
0xe8, 0xb2, 0x3e, 0x04, 0xee, 0xf6, 0x51, 0xcd, 0x15, 0x33, 0xad, 0x21, 0x9e, 0xda, 0x83, 0x23,
|
||||
0x6b, 0x25, 0x98, 0xe1, 0x80, 0xbf, 0x0c, 0x29, 0xef, 0x4f, 0x6c, 0x73, 0xa7, 0x46, 0xea, 0xc4,
|
||||
0xf6, 0x65, 0x47, 0x52, 0x18, 0xaf, 0x85, 0x4f, 0xa4, 0x9c, 0x6b, 0x63, 0xb8, 0x97, 0xe9, 0xa8,
|
||||
0x27, 0x35, 0x7c, 0x8e, 0xa5, 0x19, 0x8e, 0xf0, 0xd5, 0x97, 0x0a, 0x2c, 0x0d, 0x1a, 0x57, 0xf4,
|
||||
0x75, 0x0f, 0xda, 0x73, 0x1c, 0x56, 0x3b, 0x3c, 0x0d, 0xfa, 0x49, 0x81, 0x93, 0xbd, 0xc8, 0x05,
|
||||
0x9f, 0x00, 0xce, 0x28, 0x62, 0x3a, 0x10, 0x4f, 0xc0, 0x96, 0x90, 0x62, 0xa9, 0x45, 0x17, 0x60,
|
||||
0xa2, 0x6a, 0xda, 0xe5, 0x6d, 0xfa, 0x4d, 0x44, 0x76, 0xe3, 0x1e, 0xbc, 0x23, 0xe5, 0x38, 0xb6,
|
||||
0x40, 0xb7, 0x60, 0x4e, 0xf8, 0xad, 0x11, 0xbb, 0xe2, 0x57, 0x45, 0xb1, 0xc4, 0x34, 0xe7, 0x92,
|
||||
0x47, 0x61, 0xab, 0x43, 0x8f, 0xbb, 0x3c, 0xd4, 0xbf, 0x14, 0x40, 0xff, 0xe4, 0xbd, 0x3f, 0x0f,
|
||||
0x79, 0xd3, 0xa5, 0x82, 0xf6, 0x85, 0x53, 0x90, 0x37, 0xa6, 0x5b, 0xcd, 0x52, 0x7e, 0x65, 0xf3,
|
||||
0x6e, 0x28, 0xc4, 0x89, 0x9e, 0x1b, 0x47, 0x0f, 0x61, 0xf8, 0xe0, 0x49, 0xe3, 0x28, 0x30, 0xc3,
|
||||
0x89, 0x1e, 0x5d, 0x87, 0x29, 0xab, 0x16, 0x30, 0x9f, 0x78, 0xdb, 0x96, 0xe3, 0x12, 0xb1, 0x35,
|
||||
0x26, 0x8c, 0x93, 0x32, 0xa7, 0xa9, 0xd5, 0x36, 0x1d, 0x4e, 0x59, 0x22, 0x0d, 0x80, 0xb7, 0x3c,
|
||||
0x73, 0x4d, 0x1e, 0x27, 0x27, 0xe2, 0xcc, 0xf0, 0x0b, 0xdb, 0x88, 0xa5, 0xb8, 0xcd, 0x42, 0x7d,
|
||||
0x06, 0xa7, 0xb6, 0x89, 0xd7, 0xa0, 0x16, 0x59, 0xb1, 0x2c, 0x27, 0xb0, 0xfd, 0x88, 0xc0, 0xea,
|
||||
0x90, 0x8f, 0xcd, 0xe4, 0x54, 0x9c, 0x90, 0xf1, 0xf3, 0x31, 0x16, 0x4e, 0x6c, 0xe2, 0x31, 0xcc,
|
||||
0xf4, 0x1d, 0xc3, 0x5f, 0x32, 0x30, 0x9e, 0xc0, 0x67, 0xf7, 0xa8, 0x5d, 0x96, 0xc8, 0xa7, 0x23,
|
||||
0xeb, 0xfb, 0xd4, 0x2e, 0xbf, 0x6d, 0x96, 0x26, 0xa5, 0x19, 0xff, 0xc4, 0xc2, 0x10, 0xdd, 0x83,
|
||||
0x6c, 0xc0, 0x88, 0x27, 0x07, 0xec, 0xc2, 0xc0, 0x6e, 0x7e, 0xc8, 0x88, 0x17, 0x31, 0xa0, 0x09,
|
||||
0x0e, 0xcd, 0x05, 0x58, 0x60, 0xa0, 0x0d, 0xc8, 0x55, 0xf8, 0xad, 0xc8, 0xcd, 0x7f, 0x71, 0x20,
|
||||
0x58, 0x3b, 0xb5, 0x0f, 0x1b, 0x41, 0x48, 0x70, 0x08, 0x83, 0x3c, 0x98, 0x61, 0xa9, 0x22, 0x8a,
|
||||
0x0b, 0x1b, 0x86, 0xd1, 0xf4, 0xac, 0xbd, 0x81, 0x5a, 0xcd, 0xd2, 0x4c, 0x5a, 0x85, 0x3b, 0x22,
|
||||
0xa8, 0x3a, 0x4c, 0xb6, 0xa5, 0x38, 0x78, 0x09, 0x1a, 0xda, 0xc1, 0x61, 0x71, 0xe4, 0xd5, 0x61,
|
||||
0x71, 0xe4, 0xf5, 0x61, 0x71, 0xe4, 0xdb, 0x56, 0x51, 0x39, 0x68, 0x15, 0x95, 0x57, 0xad, 0xa2,
|
||||
0xf2, 0xba, 0x55, 0x54, 0xde, 0xb4, 0x8a, 0xca, 0xf7, 0xbf, 0x15, 0x47, 0x1e, 0x4f, 0x44, 0x47,
|
||||
0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xb3, 0x17, 0x48, 0x11, 0x14, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) {
|
||||
|
|
@ -1029,6 +1094,82 @@ func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *LimitResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Queuing != nil {
|
||||
{
|
||||
size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
i -= len(m.Type)
|
||||
copy(dAtA[i:], m.Type)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
{
|
||||
size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
|
@ -1336,9 +1477,9 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int,
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Queuing != nil {
|
||||
if m.Limited != nil {
|
||||
{
|
||||
size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i])
|
||||
size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -1415,14 +1556,11 @@ func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = l
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
dAtA[i] = 0x18
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Queues))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares))
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Queues))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
return len(dAtA) - i, nil
|
||||
|
|
@ -1448,6 +1586,23 @@ func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Namespaces) > 0 {
|
||||
for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Namespaces[iNdEx])
|
||||
copy(dAtA[i:], m.Namespaces[iNdEx])
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
}
|
||||
i--
|
||||
if m.ClusterScope {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
if len(m.Resources) > 0 {
|
||||
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Resources[iNdEx])
|
||||
|
|
@ -1724,6 +1879,33 @@ func (m *GroupSubject) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *LimitResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Type)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if m.Queuing != nil {
|
||||
l = m.Queuing.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *LimitedPriorityLevelConfiguration) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares))
|
||||
l = m.LimitResponse.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *NonResourcePolicyRule) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
|
|
@ -1842,8 +2024,8 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) {
|
|||
_ = l
|
||||
l = len(m.Type)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if m.Queuing != nil {
|
||||
l = m.Queuing.Size()
|
||||
if m.Limited != nil {
|
||||
l = m.Limited.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
|
|
@ -1870,7 +2052,6 @@ func (m *QueuingConfiguration) Size() (n int) {
|
|||
}
|
||||
var l int
|
||||
_ = l
|
||||
n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares))
|
||||
n += 1 + sovGenerated(uint64(m.Queues))
|
||||
n += 1 + sovGenerated(uint64(m.HandSize))
|
||||
n += 1 + sovGenerated(uint64(m.QueueLengthLimit))
|
||||
|
|
@ -1901,6 +2082,13 @@ func (m *ResourcePolicyRule) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
n += 2
|
||||
if len(m.Namespaces) > 0 {
|
||||
for _, s := range m.Namespaces {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
|
@ -2052,6 +2240,28 @@ func (this *GroupSubject) String() string {
|
|||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *LimitResponse) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&LimitResponse{`,
|
||||
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
|
||||
`Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *LimitedPriorityLevelConfiguration) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`,
|
||||
`AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`,
|
||||
`LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *NonResourcePolicyRule) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
|
|
@ -2148,7 +2358,7 @@ func (this *PriorityLevelConfigurationSpec) String() string {
|
|||
}
|
||||
s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`,
|
||||
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
|
||||
`Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`,
|
||||
`Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
|
@ -2173,7 +2383,6 @@ func (this *QueuingConfiguration) String() string {
|
|||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&QueuingConfiguration{`,
|
||||
`AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`,
|
||||
`Queues:` + fmt.Sprintf("%v", this.Queues) + `,`,
|
||||
`HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`,
|
||||
`QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`,
|
||||
|
|
@ -2189,6 +2398,8 @@ func (this *ResourcePolicyRule) String() string {
|
|||
`Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`,
|
||||
`APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`,
|
||||
`Resources:` + fmt.Sprintf("%v", this.Resources) + `,`,
|
||||
`ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`,
|
||||
`Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
|
@ -3153,6 +3364,232 @@ func (m *GroupSubject) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *LimitResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Type = LimitResponseType(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Queuing == nil {
|
||||
m.Queuing = &QueuingConfiguration{}
|
||||
}
|
||||
if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType)
|
||||
}
|
||||
m.AssuredConcurrencyShares = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.AssuredConcurrencyShares |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
|
@ -4055,11 +4492,11 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Type = PriorityLevelQueueingType(dAtA[iNdEx:postIndex])
|
||||
m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType)
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
|
|
@ -4086,10 +4523,10 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Queuing == nil {
|
||||
m.Queuing = &QueuingConfiguration{}
|
||||
if m.Limited == nil {
|
||||
m.Limited = &LimitedPriorityLevelConfiguration{}
|
||||
}
|
||||
if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
|
|
@ -4234,25 +4671,6 @@ func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType)
|
||||
}
|
||||
m.AssuredConcurrencyShares = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.AssuredConcurrencyShares |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType)
|
||||
}
|
||||
|
|
@ -4271,7 +4689,7 @@ func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType)
|
||||
}
|
||||
|
|
@ -4290,7 +4708,7 @@ func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType)
|
||||
}
|
||||
|
|
@ -4458,6 +4876,58 @@ func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.ClusterScope = bool(v != 0)
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
|
|
|||
|
|
@ -133,6 +133,52 @@ message GroupSubject {
|
|||
optional string name = 1;
|
||||
}
|
||||
|
||||
// LimitResponse defines how to handle requests that can not be executed right now.
|
||||
// +union
|
||||
message LimitResponse {
|
||||
// `type` is "Queue" or "Reject".
|
||||
// "Queue" means that requests that can not be executed upon arrival
|
||||
// are held in a queue until they can be executed or a queuing limit
|
||||
// is reached.
|
||||
// "Reject" means that requests that can not be executed upon arrival
|
||||
// are rejected.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
optional string type = 1;
|
||||
|
||||
// `queuing` holds the configuration parameters for queuing.
|
||||
// This field may be non-empty only if `type` is `"Queue"`.
|
||||
// +optional
|
||||
optional QueuingConfiguration queuing = 2;
|
||||
}
|
||||
|
||||
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
|
||||
// It addresses two issues:
|
||||
// * How are requests for this priority level limited?
|
||||
// * What should be done with requests that exceed the limit?
|
||||
message LimitedPriorityLevelConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
optional int32 assuredConcurrencyShares = 1;
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
optional LimitResponse limitResponse = 2;
|
||||
}
|
||||
|
||||
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
|
||||
// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
|
||||
// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
|
||||
|
|
@ -240,23 +286,25 @@ message PriorityLevelConfigurationReference {
|
|||
optional string name = 1;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationSpec is specification of a priority level
|
||||
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
|
||||
// +union
|
||||
message PriorityLevelConfigurationSpec {
|
||||
// `type` indicates whether this priority level does
|
||||
// queuing or is exempt. Valid values are "Queuing" and "Exempt".
|
||||
// "Exempt" means that requests of this priority level are not subject
|
||||
// to concurrency limits (and thus are never queued) and do not detract
|
||||
// from the concurrency available for non-exempt requests. The "Exempt"
|
||||
// type is useful for apiserver self-requests and system administrator use.
|
||||
// `type` indicates whether this priority level is subject to
|
||||
// limitation on request execution. A value of `"Exempt"` means
|
||||
// that requests of this priority level are not subject to a limit
|
||||
// (and thus are never queued) and do not detract from the
|
||||
// capacity made available to other priority levels. A value of
|
||||
// `"Limited"` means that (a) requests of this priority level
|
||||
// _are_ subject to limits and (b) some of the server's limited
|
||||
// capacity is made available exclusively to this priority level.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
optional string type = 1;
|
||||
|
||||
// `queuing` holds the configuration parameters that are
|
||||
// only meaningful for a priority level that does queuing (i.e.,
|
||||
// is not exempt). This field must be non-empty if and only if
|
||||
// `queuingType` is `"Queuing"`.
|
||||
// `limited` specifies how requests are handled for a Limited priority level.
|
||||
// This field must be non-empty if and only if `type` is `"Limited"`.
|
||||
// +optional
|
||||
optional QueuingConfiguration queuing = 2;
|
||||
optional LimitedPriorityLevelConfiguration limited = 2;
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
|
||||
|
|
@ -268,22 +316,8 @@ message PriorityLevelConfigurationStatus {
|
|||
repeated PriorityLevelConfigurationCondition conditions = 1;
|
||||
}
|
||||
|
||||
// QueuingConfiguration holds the configuration parameters that are specific to a priority level that is subject to concurrency controls
|
||||
// QueuingConfiguration holds the configuration parameters for queuing
|
||||
message QueuingConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) must be a positive number. The
|
||||
// server's concurrency limit (SCL) is divided among the
|
||||
// concurrency-controlled priority levels in proportion to their
|
||||
// assured concurrency shares. This produces the assured
|
||||
// concurrency value (ACV) for each such priority level:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
optional int32 assuredConcurrencyShares = 1;
|
||||
|
||||
// `queues` is the number of queues for this priority level. The
|
||||
// queues exist independently at each apiserver. The value must be
|
||||
// positive. Setting it to 1 effectively precludes
|
||||
|
|
@ -291,7 +325,7 @@ message QueuingConfiguration {
|
|||
// associated flow schemas irrelevant. This field has a default
|
||||
// value of 64.
|
||||
// +optional
|
||||
optional int32 queues = 2;
|
||||
optional int32 queues = 1;
|
||||
|
||||
// `handSize` is a small positive number that configures the
|
||||
// shuffle sharding of requests into queues. When enqueuing a request
|
||||
|
|
@ -305,40 +339,63 @@ message QueuingConfiguration {
|
|||
// documentation for more extensive guidance on setting this
|
||||
// field. This field has a default value of 8.
|
||||
// +optional
|
||||
optional int32 handSize = 3;
|
||||
optional int32 handSize = 2;
|
||||
|
||||
// `queueLengthLimit` is the maximum number of requests allowed to
|
||||
// be waiting in a given queue of this priority level at a time;
|
||||
// excess requests are rejected. This value must be positive. If
|
||||
// not specified, it will be defaulted to 50.
|
||||
// +optional
|
||||
optional int32 queueLengthLimit = 4;
|
||||
optional int32 queueLengthLimit = 3;
|
||||
}
|
||||
|
||||
// ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target
|
||||
// resource. A ResourcePolicyRule matches a request if and only if: (a) at least one member
|
||||
// of verbs matches the request, (b) at least one member of apiGroups matches the request, and (c) at least one member
|
||||
// of resources matches the request.
|
||||
// ResourcePolicyRule is a predicate that matches some resource
|
||||
// requests, testing the request's verb and the target resource. A
|
||||
// ResourcePolicyRule matches a resource request if and only if: (a)
|
||||
// at least one member of verbs matches the request, (b) at least one
|
||||
// member of apiGroups matches the request, (c) at least one member of
|
||||
// resources matches the request, and (d) least one member of
|
||||
// namespaces matches the request.
|
||||
message ResourcePolicyRule {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs. if it is present, it must be the only entry.
|
||||
// "*" matches all verbs and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string verbs = 1;
|
||||
|
||||
// `apiGroups` is a list of matching API groups and may not be empty.
|
||||
// "*" matches all api-groups. if it is present, it must be the only entry.
|
||||
// "*" matches all API groups and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
repeated string apiGroups = 2;
|
||||
|
||||
// `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource.
|
||||
// For example, [ "services", "nodes/status" ].
|
||||
// This list may not be empty.
|
||||
// "*" matches all resources. if it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
// `resources` is a list of matching resources (i.e., lowercase
|
||||
// and plural) with, if desired, subresource. For example, [
|
||||
// "services", "nodes/status" ]. This list may not be empty.
|
||||
// "*" matches all resources and, if present, must be the only entry.
|
||||
// Required.
|
||||
// +listType=set
|
||||
repeated string resources = 3;
|
||||
|
||||
// `clusterScope` indicates whether to match requests that do not
|
||||
// specify a namespace (which happens either because the resource
|
||||
// is not namespaced or the request targets all namespaces).
|
||||
// If this field is omitted or false then the `namespaces` field
|
||||
// must contain a non-empty list.
|
||||
// +optional
|
||||
optional bool clusterScope = 4;
|
||||
|
||||
// `namespaces` is a list of target namespaces that restricts
|
||||
// matches. A request that specifies a target namespace matches
|
||||
// only if either (a) this list contains that target namespace or
|
||||
// (b) this list contains "*". Note that "*" matches any
|
||||
// specified namespace but does not match a request that _does
|
||||
// not specify_ a namespace (see the `clusterScope` field for
|
||||
// that).
|
||||
// This list may be empty, but only if `clusterScope` is true.
|
||||
// +optional
|
||||
// +listType=set
|
||||
repeated string namespaces = 5;
|
||||
}
|
||||
|
||||
// ServiceAccountSubject holds detailed information for service-account-kind subject.
|
||||
|
|
|
|||
|
|
@ -26,8 +26,9 @@ const (
|
|||
ResourceAll = "*"
|
||||
VerbAll = "*"
|
||||
NonResourceAll = "*"
|
||||
NameAll = "*"
|
||||
|
||||
NameAll = "*"
|
||||
NamespaceEvery = "*" // matches every particular namespace
|
||||
)
|
||||
|
||||
// System preset priority level names
|
||||
|
|
@ -210,28 +211,53 @@ type ServiceAccountSubject struct {
|
|||
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
|
||||
}
|
||||
|
||||
// ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target
|
||||
// resource. A ResourcePolicyRule matches a request if and only if: (a) at least one member
|
||||
// of verbs matches the request, (b) at least one member of apiGroups matches the request, and (c) at least one member
|
||||
// of resources matches the request.
|
||||
// ResourcePolicyRule is a predicate that matches some resource
|
||||
// requests, testing the request's verb and the target resource. A
|
||||
// ResourcePolicyRule matches a resource request if and only if: (a)
|
||||
// at least one member of verbs matches the request, (b) at least one
|
||||
// member of apiGroups matches the request, (c) at least one member of
|
||||
// resources matches the request, and (d) least one member of
|
||||
// namespaces matches the request.
|
||||
type ResourcePolicyRule struct {
|
||||
// `verbs` is a list of matching verbs and may not be empty.
|
||||
// "*" matches all verbs. if it is present, it must be the only entry.
|
||||
// "*" matches all verbs and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
|
||||
|
||||
// `apiGroups` is a list of matching API groups and may not be empty.
|
||||
// "*" matches all api-groups. if it is present, it must be the only entry.
|
||||
// "*" matches all API groups and, if present, must be the only entry.
|
||||
// +listType=set
|
||||
// Required.
|
||||
APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"`
|
||||
// `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource.
|
||||
// For example, [ "services", "nodes/status" ].
|
||||
// This list may not be empty.
|
||||
// "*" matches all resources. if it is present, it must be the only entry.
|
||||
// +listType=set
|
||||
|
||||
// `resources` is a list of matching resources (i.e., lowercase
|
||||
// and plural) with, if desired, subresource. For example, [
|
||||
// "services", "nodes/status" ]. This list may not be empty.
|
||||
// "*" matches all resources and, if present, must be the only entry.
|
||||
// Required.
|
||||
// +listType=set
|
||||
Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"`
|
||||
|
||||
// `clusterScope` indicates whether to match requests that do not
|
||||
// specify a namespace (which happens either because the resource
|
||||
// is not namespaced or the request targets all namespaces).
|
||||
// If this field is omitted or false then the `namespaces` field
|
||||
// must contain a non-empty list.
|
||||
// +optional
|
||||
ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"`
|
||||
|
||||
// `namespaces` is a list of target namespaces that restricts
|
||||
// matches. A request that specifies a target namespace matches
|
||||
// only if either (a) this list contains that target namespace or
|
||||
// (b) this list contains "*". Note that "*" matches any
|
||||
// specified namespace but does not match a request that _does
|
||||
// not specify_ a namespace (see the `clusterScope` field for
|
||||
// that).
|
||||
// This list may be empty, but only if `clusterScope` is true.
|
||||
// +optional
|
||||
// +listType=set
|
||||
Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"`
|
||||
}
|
||||
|
||||
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
|
||||
|
|
@ -320,44 +346,53 @@ type PriorityLevelConfigurationList struct {
|
|||
Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationSpec is specification of a priority level
|
||||
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
|
||||
// +union
|
||||
type PriorityLevelConfigurationSpec struct {
|
||||
// `type` indicates whether this priority level does
|
||||
// queuing or is exempt. Valid values are "Queuing" and "Exempt".
|
||||
// "Exempt" means that requests of this priority level are not subject
|
||||
// to concurrency limits (and thus are never queued) and do not detract
|
||||
// from the concurrency available for non-exempt requests. The "Exempt"
|
||||
// type is useful for apiserver self-requests and system administrator use.
|
||||
// `type` indicates whether this priority level is subject to
|
||||
// limitation on request execution. A value of `"Exempt"` means
|
||||
// that requests of this priority level are not subject to a limit
|
||||
// (and thus are never queued) and do not detract from the
|
||||
// capacity made available to other priority levels. A value of
|
||||
// `"Limited"` means that (a) requests of this priority level
|
||||
// _are_ subject to limits and (b) some of the server's limited
|
||||
// capacity is made available exclusively to this priority level.
|
||||
// Required.
|
||||
Type PriorityLevelQueueingType `json:"type" protobuf:"varint,1,opt,name=type"`
|
||||
// +unionDiscriminator
|
||||
Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"`
|
||||
|
||||
// `queuing` holds the configuration parameters that are
|
||||
// only meaningful for a priority level that does queuing (i.e.,
|
||||
// is not exempt). This field must be non-empty if and only if
|
||||
// `queuingType` is `"Queuing"`.
|
||||
// `limited` specifies how requests are handled for a Limited priority level.
|
||||
// This field must be non-empty if and only if `type` is `"Limited"`.
|
||||
// +optional
|
||||
Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"`
|
||||
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
|
||||
}
|
||||
|
||||
// PriorityLevelQueueingType identifies the queuing nature of a priority level
|
||||
type PriorityLevelQueueingType string
|
||||
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
|
||||
type PriorityLevelEnablement string
|
||||
|
||||
// Supported queuing types.
|
||||
// Supported priority level enablement values.
|
||||
const (
|
||||
// PriorityLevelQueuingTypeQueueing is the PriorityLevelQueueingType for priority levels that queue
|
||||
PriorityLevelQueuingTypeQueueing PriorityLevelQueueingType = "Queuing"
|
||||
// PriorityLevelEnablementExempt means that requests are not subject to limits
|
||||
PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt"
|
||||
|
||||
// PriorityLevelQueuingTypeExempt is the PriorityLevelQueueingType for priority levels that are exempt from concurrency controls
|
||||
PriorityLevelQueuingTypeExempt PriorityLevelQueueingType = "Exempt"
|
||||
// PriorityLevelEnablementLimited means that requests are subject to limits
|
||||
PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited"
|
||||
)
|
||||
|
||||
// QueuingConfiguration holds the configuration parameters that are specific to a priority level that is subject to concurrency controls
|
||||
type QueuingConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) must be a positive number. The
|
||||
// server's concurrency limit (SCL) is divided among the
|
||||
// concurrency-controlled priority levels in proportion to their
|
||||
// assured concurrency shares. This produces the assured
|
||||
// concurrency value (ACV) for each such priority level:
|
||||
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
|
||||
// It addresses two issues:
|
||||
// * How are requests for this priority level limited?
|
||||
// * What should be done with requests that exceed the limit?
|
||||
type LimitedPriorityLevelConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
//
|
||||
|
|
@ -367,6 +402,43 @@ type QueuingConfiguration struct {
|
|||
// +optional
|
||||
AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"`
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"`
|
||||
}
|
||||
|
||||
// LimitResponse defines how to handle requests that can not be executed right now.
|
||||
// +union
|
||||
type LimitResponse struct {
|
||||
// `type` is "Queue" or "Reject".
|
||||
// "Queue" means that requests that can not be executed upon arrival
|
||||
// are held in a queue until they can be executed or a queuing limit
|
||||
// is reached.
|
||||
// "Reject" means that requests that can not be executed upon arrival
|
||||
// are rejected.
|
||||
// Required.
|
||||
// +unionDiscriminator
|
||||
Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"`
|
||||
|
||||
// `queuing` holds the configuration parameters for queuing.
|
||||
// This field may be non-empty only if `type` is `"Queue"`.
|
||||
// +optional
|
||||
Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"`
|
||||
}
|
||||
|
||||
// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now
|
||||
type LimitResponseType string
|
||||
|
||||
// Supported limit responses.
|
||||
const (
|
||||
// LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit
|
||||
LimitResponseTypeQueue LimitResponseType = "Queue"
|
||||
|
||||
// LimitResponseTypeReject means that requests that can not be executed right now are rejected
|
||||
LimitResponseTypeReject LimitResponseType = "Reject"
|
||||
)
|
||||
|
||||
// QueuingConfiguration holds the configuration parameters for queuing
|
||||
type QueuingConfiguration struct {
|
||||
// `queues` is the number of queues for this priority level. The
|
||||
// queues exist independently at each apiserver. The value must be
|
||||
// positive. Setting it to 1 effectively precludes
|
||||
|
|
@ -374,7 +446,7 @@ type QueuingConfiguration struct {
|
|||
// associated flow schemas irrelevant. This field has a default
|
||||
// value of 64.
|
||||
// +optional
|
||||
Queues int32 `json:"queues" protobuf:"varint,2,opt,name=queues"`
|
||||
Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"`
|
||||
|
||||
// `handSize` is a small positive number that configures the
|
||||
// shuffle sharding of requests into queues. When enqueuing a request
|
||||
|
|
@ -388,14 +460,14 @@ type QueuingConfiguration struct {
|
|||
// documentation for more extensive guidance on setting this
|
||||
// field. This field has a default value of 8.
|
||||
// +optional
|
||||
HandSize int32 `json:"handSize" protobuf:"varint,3,opt,name=handSize"`
|
||||
HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"`
|
||||
|
||||
// `queueLengthLimit` is the maximum number of requests allowed to
|
||||
// be waiting in a given queue of this priority level at a time;
|
||||
// excess requests are rejected. This value must be positive. If
|
||||
// not specified, it will be defaulted to 50.
|
||||
// +optional
|
||||
QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,4,opt,name=queueLengthLimit"`
|
||||
QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type
|
||||
|
|
|
|||
|
|
@ -100,6 +100,26 @@ func (GroupSubject) SwaggerDoc() map[string]string {
|
|||
return map_GroupSubject
|
||||
}
|
||||
|
||||
var map_LimitResponse = map[string]string{
|
||||
"": "LimitResponse defines how to handle requests that can not be executed right now.",
|
||||
"type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.",
|
||||
"queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.",
|
||||
}
|
||||
|
||||
func (LimitResponse) SwaggerDoc() map[string]string {
|
||||
return map_LimitResponse
|
||||
}
|
||||
|
||||
var map_LimitedPriorityLevelConfiguration = map[string]string{
|
||||
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
|
||||
}
|
||||
|
||||
func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_LimitedPriorityLevelConfiguration
|
||||
}
|
||||
|
||||
var map_NonResourcePolicyRule = map[string]string{
|
||||
"": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.",
|
||||
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.",
|
||||
|
|
@ -165,9 +185,9 @@ func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_PriorityLevelConfigurationSpec = map[string]string{
|
||||
"": "PriorityLevelConfigurationSpec is specification of a priority level",
|
||||
"type": "`type` indicates whether this priority level does queuing or is exempt. Valid values are \"Queuing\" and \"Exempt\". \"Exempt\" means that requests of this priority level are not subject to concurrency limits (and thus are never queued) and do not detract from the concurrency available for non-exempt requests. The \"Exempt\" type is useful for apiserver self-requests and system administrator use. Required.",
|
||||
"queuing": "`queuing` holds the configuration parameters that are only meaningful for a priority level that does queuing (i.e., is not exempt). This field must be non-empty if and only if `queuingType` is `\"Queuing\"`.",
|
||||
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
|
||||
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
|
||||
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
|
||||
}
|
||||
|
||||
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
|
||||
|
|
@ -184,11 +204,10 @@ func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_QueuingConfiguration = map[string]string{
|
||||
"": "QueuingConfiguration holds the configuration parameters that are specific to a priority level that is subject to concurrency controls",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.",
|
||||
"queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.",
|
||||
"handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.",
|
||||
"queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.",
|
||||
"": "QueuingConfiguration holds the configuration parameters for queuing",
|
||||
"queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.",
|
||||
"handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.",
|
||||
"queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.",
|
||||
}
|
||||
|
||||
func (QueuingConfiguration) SwaggerDoc() map[string]string {
|
||||
|
|
@ -196,10 +215,12 @@ func (QueuingConfiguration) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ResourcePolicyRule = map[string]string{
|
||||
"": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, and (c) at least one member of resources matches the request.",
|
||||
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. if it is present, it must be the only entry. Required.",
|
||||
"apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all api-groups. if it is present, it must be the only entry. Required.",
|
||||
"resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources. if it is present, it must be the only entry. Required.",
|
||||
"": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.",
|
||||
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.",
|
||||
"apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.",
|
||||
"resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.",
|
||||
"clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.",
|
||||
"namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.",
|
||||
}
|
||||
|
||||
func (ResourcePolicyRule) SwaggerDoc() map[string]string {
|
||||
|
|
|
|||
|
|
@ -186,6 +186,44 @@ func (in *GroupSubject) DeepCopy() *GroupSubject {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitResponse) DeepCopyInto(out *LimitResponse) {
|
||||
*out = *in
|
||||
if in.Queuing != nil {
|
||||
in, out := &in.Queuing, &out.Queuing
|
||||
*out = new(QueuingConfiguration)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse.
|
||||
func (in *LimitResponse) DeepCopy() *LimitResponse {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LimitResponse)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) {
|
||||
*out = *in
|
||||
in.LimitResponse.DeepCopyInto(&out.LimitResponse)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration.
|
||||
func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LimitedPriorityLevelConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) {
|
||||
*out = *in
|
||||
|
|
@ -346,10 +384,10 @@ func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigur
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) {
|
||||
*out = *in
|
||||
if in.Queuing != nil {
|
||||
in, out := &in.Queuing, &out.Queuing
|
||||
*out = new(QueuingConfiguration)
|
||||
**out = **in
|
||||
if in.Limited != nil {
|
||||
in, out := &in.Limited, &out.Limited
|
||||
*out = new(LimitedPriorityLevelConfiguration)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -421,6 +459,11 @@ func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package v1
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
|
@ -26,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
|
|
@ -81,7 +83,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
|
|||
|
||||
Convert_Slice_string_To_Slice_int32,
|
||||
|
||||
Convert_Slice_string_To_v1_DeletionPropagation,
|
||||
Convert_Slice_string_To_Pointer_v1_DeletionPropagation,
|
||||
|
||||
Convert_Slice_string_To_v1_IncludeObjectPolicy,
|
||||
)
|
||||
|
|
@ -352,13 +354,16 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio
|
|||
return nil
|
||||
}
|
||||
|
||||
// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
|
||||
func Convert_Slice_string_To_v1_DeletionPropagation(in *[]string, out *DeletionPropagation, s conversion.Scope) error {
|
||||
// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
|
||||
func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error {
|
||||
var str string
|
||||
if len(*in) > 0 {
|
||||
*out = DeletionPropagation((*in)[0])
|
||||
str = (*in)[0]
|
||||
} else {
|
||||
*out = ""
|
||||
str = ""
|
||||
}
|
||||
temp := DeletionPropagation(str)
|
||||
*out = &temp
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -369,3 +374,33 @@ func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeOb
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions.
|
||||
func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
|
||||
if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uid := types.UID("")
|
||||
if values, ok := (*in)["uid"]; ok && len(values) > 0 {
|
||||
uid = types.UID(values[0])
|
||||
}
|
||||
|
||||
resourceVersion := ""
|
||||
if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 {
|
||||
resourceVersion = values[0]
|
||||
}
|
||||
|
||||
if len(uid) > 0 || len(resourceVersion) > 0 {
|
||||
if out.Preconditions == nil {
|
||||
out.Preconditions = &Preconditions{}
|
||||
}
|
||||
if len(uid) > 0 {
|
||||
out.Preconditions.UID = &uid
|
||||
}
|
||||
if len(resourceVersion) > 0 {
|
||||
out.Preconditions.ResourceVersion = &resourceVersion
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -163,6 +163,7 @@ message DeleteOptions {
|
|||
|
||||
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
|
||||
// returned.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
optional Preconditions preconditions = 2;
|
||||
|
||||
|
|
|
|||
|
|
@ -455,6 +455,7 @@ const (
|
|||
DryRunAll = "All"
|
||||
)
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DeleteOptions may be provided when deleting an API object.
|
||||
|
|
@ -470,6 +471,7 @@ type DeleteOptions struct {
|
|||
|
||||
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
|
||||
// returned.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,11 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -120,6 +125,11 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -130,11 +140,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (*DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_v1_DeletionPropagation(a.(*[]string), b.(*DeletionPropagation), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -210,6 +215,11 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope)
|
||||
}); err != nil {
|
||||
|
|
@ -291,6 +301,39 @@ func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions,
|
|||
return autoConvert_url_Values_To_v1_CreateOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.GracePeriodSeconds = nil
|
||||
}
|
||||
// INFO: in.Preconditions opted out of conversion generation
|
||||
if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.OrphanDependents = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 {
|
||||
if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.PropagationPolicy = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
|
||||
out.DryRun = *(*[]string)(unsafe.Pointer(&values))
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error {
|
||||
// WARNING: Field TypeMeta does not have json tag, skipping.
|
||||
|
||||
|
|
|
|||
|
|
@ -54,6 +54,11 @@ type Selector interface {
|
|||
|
||||
// Make a deep copy of the selector.
|
||||
DeepCopySelector() Selector
|
||||
|
||||
// RequiresExactMatch allows a caller to introspect whether a given selector
|
||||
// requires a single specific label to be set, and if so returns the value it
|
||||
// requires.
|
||||
RequiresExactMatch(label string) (value string, found bool)
|
||||
}
|
||||
|
||||
// Everything returns a selector that matches all labels.
|
||||
|
|
@ -63,12 +68,13 @@ func Everything() Selector {
|
|||
|
||||
type nothingSelector struct{}
|
||||
|
||||
func (n nothingSelector) Matches(_ Labels) bool { return false }
|
||||
func (n nothingSelector) Empty() bool { return false }
|
||||
func (n nothingSelector) String() string { return "" }
|
||||
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
|
||||
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
|
||||
func (n nothingSelector) DeepCopySelector() Selector { return n }
|
||||
func (n nothingSelector) Matches(_ Labels) bool { return false }
|
||||
func (n nothingSelector) Empty() bool { return false }
|
||||
func (n nothingSelector) String() string { return "" }
|
||||
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
|
||||
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
|
||||
func (n nothingSelector) DeepCopySelector() Selector { return n }
|
||||
func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false }
|
||||
|
||||
// Nothing returns a selector that matches no labels
|
||||
func Nothing() Selector {
|
||||
|
|
@ -358,6 +364,23 @@ func (lsel internalSelector) String() string {
|
|||
return strings.Join(reqs, ",")
|
||||
}
|
||||
|
||||
// RequiresExactMatch introspect whether a given selector requires a single specific field
|
||||
// to be set, and if so returns the value it requires.
|
||||
func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) {
|
||||
for ix := range lsel {
|
||||
if lsel[ix].key == label {
|
||||
switch lsel[ix].operator {
|
||||
case selection.Equals, selection.DoubleEquals, selection.In:
|
||||
if len(lsel[ix].strValues) == 1 {
|
||||
return lsel[ix].strValues[0], true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Token represents constant definition for lexer token
|
||||
type Token int
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
utilclock "k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// NewExpiring returns an initialized expiring cache.
|
||||
func NewExpiring() *Expiring {
|
||||
return NewExpiringWithClock(utilclock.RealClock{})
|
||||
}
|
||||
|
||||
// NewExpiringWithClock is like NewExpiring but allows passing in a custom
|
||||
// clock for testing.
|
||||
func NewExpiringWithClock(clock utilclock.Clock) *Expiring {
|
||||
return &Expiring{
|
||||
clock: clock,
|
||||
cache: make(map[interface{}]entry),
|
||||
}
|
||||
}
|
||||
|
||||
// Expiring is a map whose entries expire after a per-entry timeout.
|
||||
type Expiring struct {
|
||||
clock utilclock.Clock
|
||||
|
||||
// mu protects the below fields
|
||||
mu sync.RWMutex
|
||||
// cache is the internal map that backs the cache.
|
||||
cache map[interface{}]entry
|
||||
// generation is used as a cheap resource version for cache entries. Cleanups
|
||||
// are scheduled with a key and generation. When the cleanup runs, it first
|
||||
// compares its generation with the current generation of the entry. It
|
||||
// deletes the entry iff the generation matches. This prevents cleanups
|
||||
// scheduled for earlier versions of an entry from deleting later versions of
|
||||
// an entry when Set() is called multiple times with the same key.
|
||||
//
|
||||
// The integer value of the generation of an entry is meaningless.
|
||||
generation uint64
|
||||
|
||||
heap expiringHeap
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
val interface{}
|
||||
expiry time.Time
|
||||
generation uint64
|
||||
}
|
||||
|
||||
// Get looks up an entry in the cache.
|
||||
func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
e, ok := c.cache[key]
|
||||
if !ok || !c.clock.Now().Before(e.expiry) {
|
||||
return nil, false
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
|
||||
// Set sets a key/value/expiry entry in the map, overwriting any previous entry
|
||||
// with the same key. The entry expires at the given expiry time, but its TTL
|
||||
// may be lengthened or shortened by additional calls to Set(). Garbage
|
||||
// collection of expired entries occurs during calls to Set(), however calls to
|
||||
// Get() will not return expired entries that have not yet been garbage
|
||||
// collected.
|
||||
func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) {
|
||||
now := c.clock.Now()
|
||||
expiry := now.Add(ttl)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.generation++
|
||||
|
||||
c.cache[key] = entry{
|
||||
val: val,
|
||||
expiry: expiry,
|
||||
generation: c.generation,
|
||||
}
|
||||
|
||||
// Run GC inline before pushing the new entry.
|
||||
c.gc(now)
|
||||
|
||||
heap.Push(&c.heap, &expiringHeapEntry{
|
||||
key: key,
|
||||
expiry: expiry,
|
||||
generation: c.generation,
|
||||
})
|
||||
}
|
||||
|
||||
// Delete deletes an entry in the map.
|
||||
func (c *Expiring) Delete(key interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.del(key, 0)
|
||||
}
|
||||
|
||||
// del deletes the entry for the given key. The generation argument is the
|
||||
// generation of the entry that should be deleted. If the generation has been
|
||||
// changed (e.g. if a set has occurred on an existing element but the old
|
||||
// cleanup still runs), this is a noop. If the generation argument is 0, the
|
||||
// entry's generation is ignored and the entry is deleted.
|
||||
//
|
||||
// del must be called under the write lock.
|
||||
func (c *Expiring) del(key interface{}, generation uint64) {
|
||||
e, ok := c.cache[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if generation != 0 && generation != e.generation {
|
||||
return
|
||||
}
|
||||
delete(c.cache, key)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Expiring) Len() int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return len(c.cache)
|
||||
}
|
||||
|
||||
func (c *Expiring) gc(now time.Time) {
|
||||
for {
|
||||
// Return from gc if the heap is empty or the next element is not yet
|
||||
// expired.
|
||||
//
|
||||
// heap[0] is a peek at the next element in the heap, which is not obvious
|
||||
// from looking at the (*expiringHeap).Pop() implmentation below.
|
||||
// heap.Pop() swaps the first entry with the last entry of the heap, then
|
||||
// calls (*expiringHeap).Pop() which returns the last element.
|
||||
if len(c.heap) == 0 || now.Before(c.heap[0].expiry) {
|
||||
return
|
||||
}
|
||||
cleanup := heap.Pop(&c.heap).(*expiringHeapEntry)
|
||||
c.del(cleanup.key, cleanup.generation)
|
||||
}
|
||||
}
|
||||
|
||||
type expiringHeapEntry struct {
|
||||
key interface{}
|
||||
expiry time.Time
|
||||
generation uint64
|
||||
}
|
||||
|
||||
// expiringHeap is a min-heap ordered by expiration time of its entries. The
|
||||
// expiring cache uses this as a priority queue to efficiently organize entries
|
||||
// which will be garbage collected once they expire.
|
||||
type expiringHeap []*expiringHeapEntry
|
||||
|
||||
var _ heap.Interface = &expiringHeap{}
|
||||
|
||||
func (cq expiringHeap) Len() int {
|
||||
return len(cq)
|
||||
}
|
||||
|
||||
func (cq expiringHeap) Less(i, j int) bool {
|
||||
return cq[i].expiry.Before(cq[j].expiry)
|
||||
}
|
||||
|
||||
func (cq expiringHeap) Swap(i, j int) {
|
||||
cq[i], cq[j] = cq[j], cq[i]
|
||||
}
|
||||
|
||||
func (cq *expiringHeap) Push(c interface{}) {
|
||||
*cq = append(*cq, c.(*expiringHeapEntry))
|
||||
}
|
||||
|
||||
func (cq *expiringHeap) Pop() interface{} {
|
||||
c := (*cq)[cq.Len()-1]
|
||||
*cq = (*cq)[:cq.Len()-1]
|
||||
return c
|
||||
}
|
||||
|
|
@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[
|
|||
func extractStackCreator() (string, int, bool) {
|
||||
stack := debug.Stack()
|
||||
matches := stackCreator.FindStringSubmatch(string(stack))
|
||||
if matches == nil || len(matches) != 4 {
|
||||
if len(matches) != 4 {
|
||||
return "", 0, false
|
||||
}
|
||||
line, err := strconv.Atoi(matches[3])
|
||||
|
|
|
|||
|
|
@ -309,6 +309,26 @@ func IsValidIP(value string) []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
|
||||
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := net.ParseIP(value)
|
||||
if ip == nil || ip.To4() == nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
|
||||
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := net.ParseIP(value)
|
||||
if ip == nil || ip.To4() != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
const percentFmt string = "[0-9]+%"
|
||||
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
|
||||
|
||||
|
|
|
|||
|
|
@ -463,6 +463,13 @@ func setDiscoveryDefaults(config *restclient.Config) error {
|
|||
if config.Timeout == 0 {
|
||||
config.Timeout = defaultTimeout
|
||||
}
|
||||
if config.Burst == 0 && config.QPS < 100 {
|
||||
// discovery is expected to be bursty, increase the default burst
|
||||
// to accommodate looking up resource info for many API groups.
|
||||
// matches burst set by ConfigFlags#ToDiscoveryClient().
|
||||
// see https://issue.k8s.io/86149
|
||||
config.Burst = 100
|
||||
}
|
||||
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
|
||||
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
|
||||
if len(config.UserAgent) == 0 {
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ import (
|
|||
coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1"
|
||||
discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1"
|
||||
eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1"
|
||||
|
|
@ -88,6 +89,7 @@ type Interface interface {
|
|||
CoordinationV1() coordinationv1.CoordinationV1Interface
|
||||
CoreV1() corev1.CoreV1Interface
|
||||
DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface
|
||||
DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface
|
||||
EventsV1beta1() eventsv1beta1.EventsV1beta1Interface
|
||||
ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface
|
||||
FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface
|
||||
|
|
@ -133,6 +135,7 @@ type Clientset struct {
|
|||
coordinationV1 *coordinationv1.CoordinationV1Client
|
||||
coreV1 *corev1.CoreV1Client
|
||||
discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client
|
||||
discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client
|
||||
eventsV1beta1 *eventsv1beta1.EventsV1beta1Client
|
||||
extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client
|
||||
flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client
|
||||
|
|
@ -258,6 +261,11 @@ func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Inter
|
|||
return c.discoveryV1alpha1
|
||||
}
|
||||
|
||||
// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client
|
||||
func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface {
|
||||
return c.discoveryV1beta1
|
||||
}
|
||||
|
||||
// EventsV1beta1 retrieves the EventsV1beta1Client
|
||||
func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface {
|
||||
return c.eventsV1beta1
|
||||
|
|
@ -453,6 +461,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -558,6 +570,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
|
|||
cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c)
|
||||
cs.coreV1 = corev1.NewForConfigOrDie(c)
|
||||
cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c)
|
||||
cs.discoveryV1beta1 = discoveryv1beta1.NewForConfigOrDie(c)
|
||||
cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c)
|
||||
cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c)
|
||||
cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c)
|
||||
|
|
@ -605,6 +618,7 @@ func New(c rest.Interface) *Clientset {
|
|||
cs.coordinationV1 = coordinationv1.New(c)
|
||||
cs.coreV1 = corev1.New(c)
|
||||
cs.discoveryV1alpha1 = discoveryv1alpha1.New(c)
|
||||
cs.discoveryV1beta1 = discoveryv1beta1.New(c)
|
||||
cs.eventsV1beta1 = eventsv1beta1.New(c)
|
||||
cs.extensionsV1beta1 = extensionsv1beta1.New(c)
|
||||
cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c)
|
||||
|
|
|
|||
|
|
@ -66,6 +66,8 @@ import (
|
|||
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
||||
discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1"
|
||||
fakediscoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake"
|
||||
discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1"
|
||||
fakediscoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake"
|
||||
eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1"
|
||||
fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake"
|
||||
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
|
|
@ -257,6 +259,11 @@ func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Inter
|
|||
return &fakediscoveryv1alpha1.FakeDiscoveryV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client
|
||||
func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface {
|
||||
return &fakediscoveryv1beta1.FakeDiscoveryV1beta1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// EventsV1beta1 retrieves the EventsV1beta1Client
|
||||
func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface {
|
||||
return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake}
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ import (
|
|||
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||
|
|
@ -90,6 +91,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
coordinationv1.AddToScheme,
|
||||
corev1.AddToScheme,
|
||||
discoveryv1alpha1.AddToScheme,
|
||||
discoveryv1beta1.AddToScheme,
|
||||
eventsv1beta1.AddToScheme,
|
||||
extensionsv1beta1.AddToScheme,
|
||||
flowcontrolv1alpha1.AddToScheme,
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ import (
|
|||
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||
|
|
@ -90,6 +91,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
coordinationv1.AddToScheme,
|
||||
corev1.AddToScheme,
|
||||
discoveryv1alpha1.AddToScheme,
|
||||
discoveryv1beta1.AddToScheme,
|
||||
eventsv1beta1.AddToScheme,
|
||||
extensionsv1beta1.AddToScheme,
|
||||
flowcontrolv1alpha1.AddToScheme,
|
||||
|
|
|
|||
89
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type DiscoveryV1beta1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
EndpointSlicesGetter
|
||||
}
|
||||
|
||||
// DiscoveryV1beta1Client is used to interact with features provided by the discovery.k8s.io group.
|
||||
type DiscoveryV1beta1Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceInterface {
|
||||
return newEndpointSlices(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new DiscoveryV1beta1Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DiscoveryV1beta1Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new DiscoveryV1beta1Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *DiscoveryV1beta1Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new DiscoveryV1beta1Client for the given RESTClient.
|
||||
func New(c rest.Interface) *DiscoveryV1beta1Client {
|
||||
return &DiscoveryV1beta1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1beta1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *DiscoveryV1beta1Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1beta1
|
||||
174
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
generated
vendored
Normal file
174
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
generated
vendored
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// EndpointSlicesGetter has a method to return a EndpointSliceInterface.
|
||||
// A group's client should implement this interface.
|
||||
type EndpointSlicesGetter interface {
|
||||
EndpointSlices(namespace string) EndpointSliceInterface
|
||||
}
|
||||
|
||||
// EndpointSliceInterface has methods to work with EndpointSlice resources.
|
||||
type EndpointSliceInterface interface {
|
||||
Create(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error)
|
||||
Update(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error)
|
||||
List(opts v1.ListOptions) (*v1beta1.EndpointSliceList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error)
|
||||
EndpointSliceExpansion
|
||||
}
|
||||
|
||||
// endpointSlices implements EndpointSliceInterface
|
||||
type endpointSlices struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newEndpointSlices returns a EndpointSlices
|
||||
func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices {
|
||||
return &endpointSlices{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
|
||||
func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
|
||||
result = &v1beta1.EndpointSlice{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
|
||||
func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.EndpointSliceList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested endpointSlices.
|
||||
func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
|
||||
func (c *endpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
|
||||
result = &v1beta1.EndpointSlice{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
Body(endpointSlice).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
|
||||
func (c *endpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
|
||||
result = &v1beta1.EndpointSlice{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
Name(endpointSlice.Name).
|
||||
Body(endpointSlice).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
|
||||
func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched endpointSlice.
|
||||
func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
|
||||
result = &v1beta1.EndpointSlice{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("endpointslices").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
20
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go
generated
vendored
Normal file
20
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// Package fake has the automatically generated clients.
|
||||
package fake
|
||||
40
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go
generated
vendored
Normal file
40
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
type FakeDiscoveryV1beta1 struct {
|
||||
*testing.Fake
|
||||
}
|
||||
|
||||
func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface {
|
||||
return &FakeEndpointSlices{c, namespace}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeDiscoveryV1beta1) RESTClient() rest.Interface {
|
||||
var ret *rest.RESTClient
|
||||
return ret
|
||||
}
|
||||
128
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go
generated
vendored
Normal file
128
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeEndpointSlices implements EndpointSliceInterface
|
||||
type FakeEndpointSlices struct {
|
||||
Fake *FakeDiscoveryV1beta1
|
||||
ns string
|
||||
}
|
||||
|
||||
var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.io", Version: "v1beta1", Resource: "endpointslices"}
|
||||
|
||||
var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSlice"}
|
||||
|
||||
// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
|
||||
func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.EndpointSlice), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
|
||||
func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta}
|
||||
for _, item := range obj.(*v1beta1.EndpointSliceList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested endpointSlices.
|
||||
func (c *FakeEndpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
|
||||
func (c *FakeEndpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.EndpointSlice), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
|
||||
func (c *FakeEndpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.EndpointSlice), err
|
||||
}
|
||||
|
||||
// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched endpointSlice.
|
||||
func (c *FakeEndpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1beta1.EndpointSlice), err
|
||||
}
|
||||
21
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go
generated
vendored
Normal file
21
vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
type EndpointSliceExpansion interface{}
|
||||
|
|
@ -287,7 +287,7 @@ func (ts *azureTokenSource) refreshToken(token *azureToken) (*azureToken, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, token.tenantID)
|
||||
oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, token.tenantID, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err)
|
||||
}
|
||||
|
|
@ -344,7 +344,7 @@ func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID strin
|
|||
}
|
||||
|
||||
func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID)
|
||||
oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(ts.environment.ActiveDirectoryEndpoint, ts.tenantID, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ import (
|
|||
)
|
||||
|
||||
const execInfoEnv = "KUBERNETES_EXEC_INFO"
|
||||
const onRotateListWarningLength = 1000
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
|
|
@ -164,7 +165,7 @@ type Authenticator struct {
|
|||
cachedCreds *credentials
|
||||
exp time.Time
|
||||
|
||||
onRotate func()
|
||||
onRotateList []func()
|
||||
}
|
||||
|
||||
type credentials struct {
|
||||
|
|
@ -191,7 +192,15 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
|
|||
dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext
|
||||
}
|
||||
d := connrotation.NewDialer(dial)
|
||||
a.onRotate = d.CloseAll
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.onRotateList = append(a.onRotateList, d.CloseAll)
|
||||
onRotateListLength := len(a.onRotateList)
|
||||
if onRotateListLength > onRotateListWarningLength {
|
||||
klog.Warningf("constructing many client instances from the same exec auth config can cause performance problems during cert rotation and can exhaust available network connections; %d clients constructed calling %q", onRotateListLength, a.cmd)
|
||||
}
|
||||
|
||||
c.Dial = d.DialContext
|
||||
|
||||
return nil
|
||||
|
|
@ -353,8 +362,10 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
|
|||
a.cachedCreds = newCreds
|
||||
// Only close all connections when TLS cert rotates. Token rotation doesn't
|
||||
// need the extra noise.
|
||||
if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
|
||||
a.onRotate()
|
||||
if len(a.onRotateList) > 0 && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
|
||||
for _, onRotate := range a.onRotateList {
|
||||
onRotate()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package rest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -51,6 +49,28 @@ type Interface interface {
|
|||
APIVersion() schema.GroupVersion
|
||||
}
|
||||
|
||||
// ClientContentConfig controls how RESTClient communicates with the server.
|
||||
//
|
||||
// TODO: ContentConfig will be updated to accept a Negotiator instead of a
|
||||
// NegotiatedSerializer and NegotiatedSerializer will be removed.
|
||||
type ClientContentConfig struct {
|
||||
// AcceptContentTypes specifies the types the client will accept and is optional.
|
||||
// If not set, ContentType will be used to define the Accept header
|
||||
AcceptContentTypes string
|
||||
// ContentType specifies the wire format used to communicate with the server.
|
||||
// This value will be set as the Accept header on requests made to the server if
|
||||
// AcceptContentTypes is not set, and as the default content type on any object
|
||||
// sent to the server. If not set, "application/json" is used.
|
||||
ContentType string
|
||||
// GroupVersion is the API version to talk to. Must be provided when initializing
|
||||
// a RESTClient directly. When initializing a Client, will be set with the default
|
||||
// code version. This is used as the default group version for VersionedParams.
|
||||
GroupVersion schema.GroupVersion
|
||||
// Negotiator is used for obtaining encoders and decoders for multiple
|
||||
// supported media types.
|
||||
Negotiator runtime.ClientNegotiator
|
||||
}
|
||||
|
||||
// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
|
||||
// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
|
||||
// of one or more resources. The server should return a decodable API resource
|
||||
|
|
@ -64,34 +84,27 @@ type RESTClient struct {
|
|||
// versionedAPIPath is a path segment connecting the base URL to the resource root
|
||||
versionedAPIPath string
|
||||
|
||||
// contentConfig is the information used to communicate with the server.
|
||||
contentConfig ContentConfig
|
||||
|
||||
// serializers contain all serializers for underlying content type.
|
||||
serializers Serializers
|
||||
// content describes how a RESTClient encodes and decodes responses.
|
||||
content ClientContentConfig
|
||||
|
||||
// creates BackoffManager that is passed to requests.
|
||||
createBackoffMgr func() BackoffManager
|
||||
|
||||
// TODO extract this into a wrapper interface via the RESTClient interface in kubectl.
|
||||
Throttle flowcontrol.RateLimiter
|
||||
// rateLimiter is shared among all requests created by this client unless specifically
|
||||
// overridden.
|
||||
rateLimiter flowcontrol.RateLimiter
|
||||
|
||||
// Set specific behavior of the client. If not set http.DefaultClient will be used.
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
type Serializers struct {
|
||||
Encoder runtime.Encoder
|
||||
Decoder runtime.Decoder
|
||||
StreamingSerializer runtime.Serializer
|
||||
Framer runtime.Framer
|
||||
RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error)
|
||||
}
|
||||
|
||||
// NewRESTClient creates a new RESTClient. This client performs generic REST functions
|
||||
// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and
|
||||
// decoding of responses from the server.
|
||||
func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
|
||||
// such as Get, Put, Post, and Delete on specified paths.
|
||||
func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
|
||||
if len(config.ContentType) == 0 {
|
||||
config.ContentType = "application/json"
|
||||
}
|
||||
|
||||
base := *baseURL
|
||||
if !strings.HasSuffix(base.Path, "/") {
|
||||
base.Path += "/"
|
||||
|
|
@ -99,31 +112,14 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConf
|
|||
base.RawQuery = ""
|
||||
base.Fragment = ""
|
||||
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if len(config.ContentType) == 0 {
|
||||
config.ContentType = "application/json"
|
||||
}
|
||||
serializers, err := createSerializers(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var throttle flowcontrol.RateLimiter
|
||||
if maxQPS > 0 && rateLimiter == nil {
|
||||
throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst)
|
||||
} else if rateLimiter != nil {
|
||||
throttle = rateLimiter
|
||||
}
|
||||
return &RESTClient{
|
||||
base: &base,
|
||||
versionedAPIPath: versionedAPIPath,
|
||||
contentConfig: config,
|
||||
serializers: *serializers,
|
||||
content: config,
|
||||
createBackoffMgr: readExpBackoffConfig,
|
||||
Throttle: throttle,
|
||||
Client: client,
|
||||
rateLimiter: rateLimiter,
|
||||
|
||||
Client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -132,7 +128,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
|
|||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.Throttle
|
||||
return c.rateLimiter
|
||||
}
|
||||
|
||||
// readExpBackoffConfig handles the internal logic of determining what the
|
||||
|
|
@ -153,58 +149,6 @@ func readExpBackoffConfig() BackoffManager {
|
|||
time.Duration(backoffDurationInt)*time.Second)}
|
||||
}
|
||||
|
||||
// createSerializers creates all necessary serializers for given contentType.
|
||||
// TODO: the negotiated serializer passed to this method should probably return
|
||||
// serializers that control decoding and versioning without this package
|
||||
// being aware of the types. Depends on whether RESTClient must deal with
|
||||
// generic infrastructure.
|
||||
func createSerializers(config ContentConfig) (*Serializers, error) {
|
||||
mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes()
|
||||
contentType := config.ContentType
|
||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err)
|
||||
}
|
||||
info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType)
|
||||
if !ok {
|
||||
if len(contentType) != 0 || len(mediaTypes) == 0 {
|
||||
return nil, fmt.Errorf("no serializers registered for %s", contentType)
|
||||
}
|
||||
info = mediaTypes[0]
|
||||
}
|
||||
|
||||
internalGV := schema.GroupVersions{
|
||||
{
|
||||
Group: config.GroupVersion.Group,
|
||||
Version: runtime.APIVersionInternal,
|
||||
},
|
||||
// always include the legacy group as a decoding target to handle non-error `Status` return types
|
||||
{
|
||||
Group: "",
|
||||
Version: runtime.APIVersionInternal,
|
||||
},
|
||||
}
|
||||
|
||||
s := &Serializers{
|
||||
Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion),
|
||||
Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV),
|
||||
|
||||
RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
|
||||
info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("serializer for %s not registered", contentType)
|
||||
}
|
||||
return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil
|
||||
},
|
||||
}
|
||||
if info.StreamSerializer != nil {
|
||||
s.StreamingSerializer = info.StreamSerializer.Serializer
|
||||
s.Framer = info.StreamSerializer.Framer
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Verb begins a request with a verb (GET, POST, PUT, DELETE).
|
||||
//
|
||||
// Example usage of RESTClient's request building interface:
|
||||
|
|
@ -219,12 +163,7 @@ func createSerializers(config ContentConfig) (*Serializers, error) {
|
|||
// list, ok := resp.(*api.PodList)
|
||||
//
|
||||
func (c *RESTClient) Verb(verb string) *Request {
|
||||
backoff := c.createBackoffMgr()
|
||||
|
||||
if c.Client == nil {
|
||||
return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0)
|
||||
}
|
||||
return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout)
|
||||
return NewRequest(c).Verb(verb)
|
||||
}
|
||||
|
||||
// Post begins a POST request. Short for c.Verb("POST").
|
||||
|
|
@ -254,5 +193,5 @@ func (c *RESTClient) Delete() *Request {
|
|||
|
||||
// APIVersion returns the APIVersion this RESTClient is expected to use.
|
||||
func (c *RESTClient) APIVersion() schema.GroupVersion {
|
||||
return *c.contentConfig.GroupVersion
|
||||
return c.content.GroupVersion
|
||||
}
|
||||
|
|
|
|||
|
|
@ -269,6 +269,9 @@ type ContentConfig struct {
|
|||
GroupVersion *schema.GroupVersion
|
||||
// NegotiatedSerializer is used for obtaining encoders and decoders for multiple
|
||||
// supported media types.
|
||||
//
|
||||
// TODO: NegotiatedSerializer will be phased out as internal clients are removed
|
||||
// from Kubernetes.
|
||||
NegotiatedSerializer runtime.NegotiatedSerializer
|
||||
}
|
||||
|
||||
|
|
@ -283,14 +286,6 @@ func RESTClientFor(config *Config) (*RESTClient, error) {
|
|||
if config.NegotiatedSerializer == nil {
|
||||
return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
|
||||
}
|
||||
qps := config.QPS
|
||||
if config.QPS == 0.0 {
|
||||
qps = DefaultQPS
|
||||
}
|
||||
burst := config.Burst
|
||||
if config.Burst == 0 {
|
||||
burst = DefaultBurst
|
||||
}
|
||||
|
||||
baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
|
||||
if err != nil {
|
||||
|
|
@ -310,7 +305,33 @@ func RESTClientFor(config *Config) (*RESTClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
|
||||
rateLimiter := config.RateLimiter
|
||||
if rateLimiter == nil {
|
||||
qps := config.QPS
|
||||
if config.QPS == 0.0 {
|
||||
qps = DefaultQPS
|
||||
}
|
||||
burst := config.Burst
|
||||
if config.Burst == 0 {
|
||||
burst = DefaultBurst
|
||||
}
|
||||
if qps > 0 {
|
||||
rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst)
|
||||
}
|
||||
}
|
||||
|
||||
var gv schema.GroupVersion
|
||||
if config.GroupVersion != nil {
|
||||
gv = *config.GroupVersion
|
||||
}
|
||||
clientContent := ClientContentConfig{
|
||||
AcceptContentTypes: config.AcceptContentTypes,
|
||||
ContentType: config.ContentType,
|
||||
GroupVersion: gv,
|
||||
Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv),
|
||||
}
|
||||
|
||||
return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
|
||||
}
|
||||
|
||||
// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
|
||||
|
|
@ -338,13 +359,33 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
versionConfig := config.ContentConfig
|
||||
if versionConfig.GroupVersion == nil {
|
||||
v := metav1.SchemeGroupVersion
|
||||
versionConfig.GroupVersion = &v
|
||||
rateLimiter := config.RateLimiter
|
||||
if rateLimiter == nil {
|
||||
qps := config.QPS
|
||||
if config.QPS == 0.0 {
|
||||
qps = DefaultQPS
|
||||
}
|
||||
burst := config.Burst
|
||||
if config.Burst == 0 {
|
||||
burst = DefaultBurst
|
||||
}
|
||||
if qps > 0 {
|
||||
rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst)
|
||||
}
|
||||
}
|
||||
|
||||
return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
|
||||
gv := metav1.SchemeGroupVersion
|
||||
if config.GroupVersion != nil {
|
||||
gv = *config.GroupVersion
|
||||
}
|
||||
clientContent := ClientContentConfig{
|
||||
AcceptContentTypes: config.AcceptContentTypes,
|
||||
ContentType: config.ContentType,
|
||||
GroupVersion: gv,
|
||||
Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv),
|
||||
}
|
||||
|
||||
return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
|
||||
}
|
||||
|
||||
// SetKubernetesDefaults sets default values on the provided client config for accessing the
|
||||
|
|
|
|||
|
|
@ -48,7 +48,8 @@ import (
|
|||
|
||||
var (
|
||||
// longThrottleLatency defines threshold for logging requests. All requests being
|
||||
// throttle for more than longThrottleLatency will be logged.
|
||||
// throttled (via the provided rateLimiter) for more than longThrottleLatency will
|
||||
// be logged.
|
||||
longThrottleLatency = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
|
|
@ -74,19 +75,20 @@ func (r *RequestConstructionError) Error() string {
|
|||
return fmt.Sprintf("request construction error: '%v'", r.Err)
|
||||
}
|
||||
|
||||
var noBackoff = &NoBackoff{}
|
||||
|
||||
// Request allows for building up a request to a server in a chained fashion.
|
||||
// Any errors are stored until the end of your call, so you only have to
|
||||
// check once.
|
||||
type Request struct {
|
||||
// required
|
||||
client HTTPClient
|
||||
verb string
|
||||
c *RESTClient
|
||||
|
||||
baseURL *url.URL
|
||||
content ContentConfig
|
||||
serializers Serializers
|
||||
rateLimiter flowcontrol.RateLimiter
|
||||
backoff BackoffManager
|
||||
timeout time.Duration
|
||||
|
||||
// generic components accessible via method setters
|
||||
verb string
|
||||
pathPrefix string
|
||||
subpath string
|
||||
params url.Values
|
||||
|
|
@ -98,7 +100,6 @@ type Request struct {
|
|||
resource string
|
||||
resourceName string
|
||||
subresource string
|
||||
timeout time.Duration
|
||||
|
||||
// output
|
||||
err error
|
||||
|
|
@ -106,42 +107,63 @@ type Request struct {
|
|||
|
||||
// This is only used for per-request timeouts, deadlines, and cancellations.
|
||||
ctx context.Context
|
||||
|
||||
backoffMgr BackoffManager
|
||||
throttle flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
|
||||
func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
|
||||
func NewRequest(c *RESTClient) *Request {
|
||||
var backoff BackoffManager
|
||||
if c.createBackoffMgr != nil {
|
||||
backoff = c.createBackoffMgr()
|
||||
}
|
||||
if backoff == nil {
|
||||
klog.V(2).Infof("Not implementing request backoff strategy.")
|
||||
backoff = &NoBackoff{}
|
||||
backoff = noBackoff
|
||||
}
|
||||
|
||||
pathPrefix := "/"
|
||||
if baseURL != nil {
|
||||
pathPrefix = path.Join(pathPrefix, baseURL.Path)
|
||||
var pathPrefix string
|
||||
if c.base != nil {
|
||||
pathPrefix = path.Join("/", c.base.Path, c.versionedAPIPath)
|
||||
} else {
|
||||
pathPrefix = path.Join("/", c.versionedAPIPath)
|
||||
}
|
||||
|
||||
var timeout time.Duration
|
||||
if c.Client != nil {
|
||||
timeout = c.Client.Timeout
|
||||
}
|
||||
|
||||
r := &Request{
|
||||
client: client,
|
||||
verb: verb,
|
||||
baseURL: baseURL,
|
||||
pathPrefix: path.Join(pathPrefix, versionedAPIPath),
|
||||
content: content,
|
||||
serializers: serializers,
|
||||
backoffMgr: backoff,
|
||||
throttle: throttle,
|
||||
c: c,
|
||||
rateLimiter: c.rateLimiter,
|
||||
backoff: backoff,
|
||||
timeout: timeout,
|
||||
pathPrefix: pathPrefix,
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(content.AcceptContentTypes) > 0:
|
||||
r.SetHeader("Accept", content.AcceptContentTypes)
|
||||
case len(content.ContentType) > 0:
|
||||
r.SetHeader("Accept", content.ContentType+", */*")
|
||||
case len(c.content.AcceptContentTypes) > 0:
|
||||
r.SetHeader("Accept", c.content.AcceptContentTypes)
|
||||
case len(c.content.ContentType) > 0:
|
||||
r.SetHeader("Accept", c.content.ContentType+", */*")
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// NewRequestWithClient creates a Request with an embedded RESTClient for use in test scenarios.
|
||||
func NewRequestWithClient(base *url.URL, versionedAPIPath string, content ClientContentConfig, client *http.Client) *Request {
|
||||
return NewRequest(&RESTClient{
|
||||
base: base,
|
||||
versionedAPIPath: versionedAPIPath,
|
||||
content: content,
|
||||
Client: client,
|
||||
})
|
||||
}
|
||||
|
||||
// Verb sets the verb this request will use.
|
||||
func (r *Request) Verb(verb string) *Request {
|
||||
r.verb = verb
|
||||
return r
|
||||
}
|
||||
|
||||
// Prefix adds segments to the relative beginning to the request path. These
|
||||
// items will be placed before the optional Namespace, Resource, or Name sections.
|
||||
// Setting AbsPath will clear any previously set Prefix segments
|
||||
|
|
@ -184,17 +206,17 @@ func (r *Request) Resource(resource string) *Request {
|
|||
// or defaults to the stub implementation if nil is provided
|
||||
func (r *Request) BackOff(manager BackoffManager) *Request {
|
||||
if manager == nil {
|
||||
r.backoffMgr = &NoBackoff{}
|
||||
r.backoff = &NoBackoff{}
|
||||
return r
|
||||
}
|
||||
|
||||
r.backoffMgr = manager
|
||||
r.backoff = manager
|
||||
return r
|
||||
}
|
||||
|
||||
// Throttle receives a rate-limiter and sets or replaces an existing request limiter
|
||||
func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
|
||||
r.throttle = limiter
|
||||
r.rateLimiter = limiter
|
||||
return r
|
||||
}
|
||||
|
||||
|
|
@ -272,8 +294,8 @@ func (r *Request) AbsPath(segments ...string) *Request {
|
|||
if r.err != nil {
|
||||
return r
|
||||
}
|
||||
r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
|
||||
if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
|
||||
r.pathPrefix = path.Join(r.c.base.Path, path.Join(segments...))
|
||||
if len(segments) == 1 && (len(r.c.base.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
|
||||
// preserve any trailing slashes for legacy behavior
|
||||
r.pathPrefix += "/"
|
||||
}
|
||||
|
|
@ -317,7 +339,7 @@ func (r *Request) Param(paramName, s string) *Request {
|
|||
// VersionedParams will not write query parameters that have omitempty set and are empty. If a
|
||||
// parameter has already been set it is appended to (Params and VersionedParams are additive).
|
||||
func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
|
||||
return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
|
||||
return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion)
|
||||
}
|
||||
|
||||
func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
|
||||
|
|
@ -397,14 +419,19 @@ func (r *Request) Body(obj interface{}) *Request {
|
|||
if reflect.ValueOf(t).IsNil() {
|
||||
return r
|
||||
}
|
||||
data, err := runtime.Encode(r.serializers.Encoder, t)
|
||||
encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return r
|
||||
}
|
||||
data, err := runtime.Encode(encoder, t)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return r
|
||||
}
|
||||
glogBody("Request Body", data)
|
||||
r.body = bytes.NewReader(data)
|
||||
r.SetHeader("Content-Type", r.content.ContentType)
|
||||
r.SetHeader("Content-Type", r.c.content.ContentType)
|
||||
default:
|
||||
r.err = fmt.Errorf("unknown type used for body: %+v", obj)
|
||||
}
|
||||
|
|
@ -433,8 +460,8 @@ func (r *Request) URL() *url.URL {
|
|||
}
|
||||
|
||||
finalURL := &url.URL{}
|
||||
if r.baseURL != nil {
|
||||
*finalURL = *r.baseURL
|
||||
if r.c.base != nil {
|
||||
*finalURL = *r.c.base
|
||||
}
|
||||
finalURL.Path = p
|
||||
|
||||
|
|
@ -468,8 +495,8 @@ func (r Request) finalURLTemplate() url.URL {
|
|||
segments := strings.Split(r.URL().Path, "/")
|
||||
groupIndex := 0
|
||||
index := 0
|
||||
if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
|
||||
groupIndex += len(strings.Split(r.baseURL.Path, "/"))
|
||||
if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) {
|
||||
groupIndex += len(strings.Split(r.c.base.Path, "/"))
|
||||
}
|
||||
if groupIndex >= len(segments) {
|
||||
return *url
|
||||
|
|
@ -522,16 +549,16 @@ func (r Request) finalURLTemplate() url.URL {
|
|||
}
|
||||
|
||||
func (r *Request) tryThrottle() error {
|
||||
if r.throttle == nil {
|
||||
if r.rateLimiter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var err error
|
||||
if r.ctx != nil {
|
||||
err = r.throttle.Wait(r.ctx)
|
||||
err = r.rateLimiter.Wait(r.ctx)
|
||||
} else {
|
||||
r.throttle.Accept()
|
||||
r.rateLimiter.Accept()
|
||||
}
|
||||
|
||||
if latency := time.Since(now); latency > longThrottleLatency {
|
||||
|
|
@ -544,27 +571,11 @@ func (r *Request) tryThrottle() error {
|
|||
// Watch attempts to begin watching the requested location.
|
||||
// Returns a watch.Interface, or an error.
|
||||
func (r *Request) Watch() (watch.Interface, error) {
|
||||
return r.WatchWithSpecificDecoders(
|
||||
func(body io.ReadCloser) streaming.Decoder {
|
||||
framer := r.serializers.Framer.NewFrameReader(body)
|
||||
return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
|
||||
},
|
||||
r.serializers.Decoder,
|
||||
)
|
||||
}
|
||||
|
||||
// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
|
||||
// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
|
||||
// Returns a watch.Interface, or an error.
|
||||
func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
|
||||
// We specifically don't want to rate limit watches, so we
|
||||
// don't use r.throttle here.
|
||||
// don't use r.rateLimiter here.
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
if r.serializers.Framer == nil {
|
||||
return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
|
||||
}
|
||||
|
||||
url := r.URL().String()
|
||||
req, err := http.NewRequest(r.verb, url, r.body)
|
||||
|
|
@ -575,18 +586,18 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser)
|
|||
req = req.WithContext(r.ctx)
|
||||
}
|
||||
req.Header = r.headers
|
||||
client := r.client
|
||||
client := r.c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
|
||||
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
|
||||
resp, err := client.Do(req)
|
||||
updateURLMetrics(r, resp, err)
|
||||
if r.baseURL != nil {
|
||||
if r.c.base != nil {
|
||||
if err != nil {
|
||||
r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
|
||||
r.backoff.UpdateBackoff(r.c.base, err, 0)
|
||||
} else {
|
||||
r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
|
||||
r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -604,9 +615,22 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser)
|
|||
}
|
||||
return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
|
||||
}
|
||||
wrapperDecoder := wrapperDecoderFn(resp.Body)
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err)
|
||||
}
|
||||
objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frameReader := framer.NewFrameReader(resp.Body)
|
||||
watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer)
|
||||
|
||||
return watch.NewStreamWatcher(
|
||||
restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder),
|
||||
restclientwatch.NewDecoder(watchEventDecoder, objectDecoder),
|
||||
// use 500 to indicate that the cause of the error is unknown - other error codes
|
||||
// are more specific to HTTP interactions, and set a reason
|
||||
errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
|
||||
|
|
@ -617,8 +641,8 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser)
|
|||
// It also handles corner cases for incomplete/invalid request data.
|
||||
func updateURLMetrics(req *Request, resp *http.Response, err error) {
|
||||
url := "none"
|
||||
if req.baseURL != nil {
|
||||
url = req.baseURL.Host
|
||||
if req.c.base != nil {
|
||||
url = req.c.base.Host
|
||||
}
|
||||
|
||||
// Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
|
||||
|
|
@ -656,18 +680,18 @@ func (r *Request) Stream() (io.ReadCloser, error) {
|
|||
req = req.WithContext(r.ctx)
|
||||
}
|
||||
req.Header = r.headers
|
||||
client := r.client
|
||||
client := r.c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
|
||||
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
|
||||
resp, err := client.Do(req)
|
||||
updateURLMetrics(r, resp, err)
|
||||
if r.baseURL != nil {
|
||||
if r.c.base != nil {
|
||||
if err != nil {
|
||||
r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
|
||||
r.backoff.UpdateBackoff(r.URL(), err, 0)
|
||||
} else {
|
||||
r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
|
||||
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -738,7 +762,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
|||
return err
|
||||
}
|
||||
|
||||
client := r.client
|
||||
client := r.c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
|
|
@ -765,11 +789,11 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
|||
}
|
||||
req.Header = r.headers
|
||||
|
||||
r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
|
||||
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
|
||||
if retries > 0 {
|
||||
// We are retrying the request that we already send to apiserver
|
||||
// at least once before.
|
||||
// This request should also be throttled with the client-internal throttler.
|
||||
// This request should also be throttled with the client-internal rate limiter.
|
||||
if err := r.tryThrottle(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -777,9 +801,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
|||
resp, err := client.Do(req)
|
||||
updateURLMetrics(r, resp, err)
|
||||
if err != nil {
|
||||
r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
|
||||
r.backoff.UpdateBackoff(r.URL(), err, 0)
|
||||
} else {
|
||||
r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
|
||||
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
|
||||
}
|
||||
if err != nil {
|
||||
// "Connection reset by peer" is usually a transient error.
|
||||
|
|
@ -822,7 +846,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
|||
}
|
||||
|
||||
klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
|
||||
r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
|
||||
r.backoff.Sleep(time.Duration(seconds) * time.Second)
|
||||
return false
|
||||
}
|
||||
fn(req, resp)
|
||||
|
|
@ -908,14 +932,18 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
|
|||
glogBody("Response Body", body)
|
||||
|
||||
// verify the content type is accurate
|
||||
var decoder runtime.Decoder
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
decoder := r.serializers.Decoder
|
||||
if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
|
||||
if len(contentType) == 0 {
|
||||
contentType = r.c.content.ContentType
|
||||
}
|
||||
if len(contentType) > 0 {
|
||||
var err error
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return Result{err: errors.NewInternalError(err)}
|
||||
}
|
||||
decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
|
||||
decoder, err = r.c.content.Negotiator.Decoder(mediaType, params)
|
||||
if err != nil {
|
||||
// if we fail to negotiate a decoder, treat this as an unstructured error
|
||||
switch {
|
||||
|
|
@ -1035,7 +1063,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool,
|
|||
}
|
||||
var groupResource schema.GroupResource
|
||||
if len(r.resource) > 0 {
|
||||
groupResource.Group = r.content.GroupVersion.Group
|
||||
groupResource.Group = r.c.content.GroupVersion.Group
|
||||
groupResource.Resource = r.resource
|
||||
}
|
||||
return errors.NewGenericServerResponse(
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func NewCacheMutationDetector(name string) MutationDetector {
|
|||
return dummyMutationDetector{}
|
||||
}
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute}
|
||||
}
|
||||
|
||||
type dummyMutationDetector struct{}
|
||||
|
|
@ -68,6 +68,10 @@ type defaultCacheMutationDetector struct {
|
|||
lock sync.Mutex
|
||||
cachedObjs []cacheObj
|
||||
|
||||
retainDuration time.Duration
|
||||
lastRotated time.Time
|
||||
retainedCachedObjs []cacheObj
|
||||
|
||||
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
|
||||
// This panic is intentional, since turning on this detection indicates you want a strong
|
||||
// failure signal. This failure is effectively a p0 bug and you can't trust process results
|
||||
|
|
@ -84,6 +88,14 @@ type cacheObj struct {
|
|||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
for {
|
||||
if d.lastRotated.IsZero() {
|
||||
d.lastRotated = time.Now()
|
||||
} else if time.Now().Sub(d.lastRotated) > d.retainDuration {
|
||||
d.retainedCachedObjs = d.cachedObjs
|
||||
d.cachedObjs = nil
|
||||
d.lastRotated = time.Now()
|
||||
}
|
||||
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
|
|
@ -120,6 +132,12 @@ func (d *defaultCacheMutationDetector) CompareObjects() {
|
|||
altered = true
|
||||
}
|
||||
}
|
||||
for i, obj := range d.retainedCachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
||||
if altered {
|
||||
msg := fmt.Sprintf("cache %s modified", d.name)
|
||||
|
|
|
|||
|
|
@ -74,9 +74,6 @@ type Reflector struct {
|
|||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
|
||||
// failed with an HTTP 410 (Gone) status code.
|
||||
isLastSyncResourceVersionGone bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
|
|
@ -188,7 +185,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
|
|
@ -211,17 +211,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if r.WatchListPageSize != 0 {
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
}
|
||||
|
||||
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||
list, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) {
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
// Retry immediately if the resource version used to list is expired.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, or the full list might fail because the
|
||||
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
|
||||
// to recover and ensure the reflector makes forward progress.
|
||||
list, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
|
|
@ -234,7 +225,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
r.setIsLastSyncResourceVersionExpired(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
|
|
@ -308,13 +298,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case err == io.EOF:
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
|
|
@ -333,8 +320,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
case apierrs.IsResourceExpired(err):
|
||||
klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
|
|
@ -446,42 +432,3 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
|||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
}
|
||||
|
||||
// relistResourceVersion determines the resource version the reflector should list or relist from.
|
||||
// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource
|
||||
// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted
|
||||
// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in
|
||||
// etcd via a quorum read.
|
||||
func (r *Reflector) relistResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
|
||||
if r.isLastSyncResourceVersionGone {
|
||||
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
|
||||
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
|
||||
// to the latest available ResourceVersion, using a consistent read from etcd.
|
||||
return ""
|
||||
}
|
||||
if r.lastSyncResourceVersion == "" {
|
||||
// For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to
|
||||
// be served from the watch cache if it is enabled.
|
||||
return "0"
|
||||
}
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
|
||||
// expired error: HTTP 410 (Gone) Status Code.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.isLastSyncResourceVersionGone = isExpired
|
||||
}
|
||||
|
||||
func isExpiredError(err error) bool {
|
||||
// In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and
|
||||
// apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
||||
// and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
|
||||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||
return apierrs.IsResourceExpired(err) || apierrs.IsGone(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -292,6 +292,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
|||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]Na
|
|||
if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.Cluster)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newCluster
|
||||
} else {
|
||||
|
|
@ -65,6 +68,9 @@ func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]
|
|||
if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.AuthInfo)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newAuthInfo
|
||||
} else {
|
||||
|
|
@ -99,6 +105,9 @@ func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]Na
|
|||
if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]*api.Context)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newContext
|
||||
} else {
|
||||
|
|
@ -133,6 +142,9 @@ func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]Named
|
|||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if *out == nil {
|
||||
*out = make(map[string]runtime.Object)
|
||||
}
|
||||
if (*out)[curr.Name] == nil {
|
||||
(*out)[curr.Name] = newExtension
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
|||
if options.Limit == 0 {
|
||||
options.Limit = p.PageSize
|
||||
}
|
||||
requestedResourceVersion := options.ResourceVersion
|
||||
var list *metainternalversion.List
|
||||
for {
|
||||
select {
|
||||
|
|
@ -94,9 +95,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
|||
if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" {
|
||||
return nil, err
|
||||
}
|
||||
// the list expired while we were processing, fall back to a full list
|
||||
// the list expired while we were processing, fall back to a full list at
|
||||
// the requested ResourceVersion.
|
||||
options.Limit = 0
|
||||
options.Continue = ""
|
||||
options.ResourceVersion = requestedResourceVersion
|
||||
return p.PageFn(ctx, options)
|
||||
}
|
||||
m, err := meta.ListAccessor(obj)
|
||||
|
|
@ -129,6 +132,10 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
|||
|
||||
// set the next loop up
|
||||
options.Continue = m.GetContinue()
|
||||
// Clear the ResourceVersion on the subsequent List calls to avoid the
|
||||
// `specifying resource version is not allowed when using continue` error.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143.
|
||||
options.ResourceVersion = ""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
|
||||
// state of particular servers. apiHost is "host:port"
|
||||
func GetClientCANames(apiHost string) ([]string, error) {
|
||||
// when we run this the second time, we know which one we are expecting
|
||||
acceptableCAs := []string{}
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
|
||||
GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
acceptableCAs = []string{}
|
||||
for _, curr := range hello.AcceptableCAs {
|
||||
acceptableCAs = append(acceptableCAs, string(curr))
|
||||
}
|
||||
return &tls.Certificate{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := conn.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acceptableCAs, nil
|
||||
}
|
||||
|
||||
// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
|
||||
func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
|
||||
apiserverURL, err := url.Parse(kubeConfigURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetClientCANames(apiserverURL.Host)
|
||||
}
|
||||
|
||||
// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
|
||||
// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
|
||||
func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true, // this is insecure so that we always get connected
|
||||
}
|
||||
// if a name is specified for SNI, set it.
|
||||
if len(serverName) > 0 {
|
||||
tlsConfig.ServerName = serverName
|
||||
}
|
||||
|
||||
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = conn.Close(); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to close connection : %v", err)
|
||||
}
|
||||
|
||||
peerCerts := conn.ConnectionState().PeerCertificates
|
||||
peerCertBytes := [][]byte{}
|
||||
for _, a := range peerCerts {
|
||||
actualCert, err := EncodeCertificates(a)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
|
||||
}
|
||||
|
||||
return peerCerts, peerCertBytes, err
|
||||
}
|
||||
|
||||
// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
|
||||
func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
|
||||
apiserverURL, err := url.Parse(kubeConfigURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return GetServingCertificates(apiserverURL.Host, serverName)
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ gopkg.in/alecthomas/kingpin.v2
|
|||
gopkg.in/inf.v0
|
||||
# gopkg.in/yaml.v2 v2.2.4
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.0.0-20191112020540-7f9008e52f64
|
||||
# k8s.io/api v0.17.2
|
||||
k8s.io/api/admissionregistration/v1
|
||||
k8s.io/api/admissionregistration/v1beta1
|
||||
k8s.io/api/apps/v1
|
||||
|
|
@ -185,6 +185,7 @@ k8s.io/api/coordination/v1
|
|||
k8s.io/api/coordination/v1beta1
|
||||
k8s.io/api/core/v1
|
||||
k8s.io/api/discovery/v1alpha1
|
||||
k8s.io/api/discovery/v1beta1
|
||||
k8s.io/api/events/v1beta1
|
||||
k8s.io/api/extensions/v1beta1
|
||||
k8s.io/api/flowcontrol/v1alpha1
|
||||
|
|
@ -203,7 +204,7 @@ k8s.io/api/settings/v1alpha1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apimachinery v0.0.0-20191111054156-6eb29fdf75dc
|
||||
# k8s.io/apimachinery v0.17.2
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
|
|
@ -246,7 +247,7 @@ k8s.io/apimachinery/pkg/version
|
|||
k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20191115143342-4cf961056038
|
||||
# k8s.io/autoscaler/vertical-pod-autoscaler v0.0.0-20200123122250-fa95810cfc1e
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2
|
||||
|
|
@ -257,7 +258,7 @@ k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/a
|
|||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta2
|
||||
k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1
|
||||
# k8s.io/client-go v0.0.0-20191109102209-3c0d1af94be5
|
||||
# k8s.io/client-go v0.17.2
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/fake
|
||||
k8s.io/client-go/kubernetes
|
||||
|
|
@ -305,6 +306,8 @@ k8s.io/client-go/kubernetes/typed/core/v1
|
|||
k8s.io/client-go/kubernetes/typed/core/v1/fake
|
||||
k8s.io/client-go/kubernetes/typed/discovery/v1alpha1
|
||||
k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake
|
||||
k8s.io/client-go/kubernetes/typed/discovery/v1beta1
|
||||
k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake
|
||||
k8s.io/client-go/kubernetes/typed/events/v1beta1
|
||||
k8s.io/client-go/kubernetes/typed/events/v1beta1/fake
|
||||
k8s.io/client-go/kubernetes/typed/extensions/v1beta1
|
||||
|
|
@ -376,7 +379,7 @@ k8s.io/client-go/util/retry
|
|||
k8s.io/klog
|
||||
# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
# k8s.io/utils v0.0.0-20191030222137-2b95a09bc58d
|
||||
# k8s.io/utils v0.0.0-20200109141947-94aeca20bf09
|
||||
k8s.io/utils/buffer
|
||||
k8s.io/utils/integer
|
||||
k8s.io/utils/trace
|
||||
|
|
|
|||
Loading…
Reference in New Issue