Merge pull request #14236 from dcfranca/feat/cordon-node-before-terminating

Add suport to --cordon-node-before-terminating autoscaler flag
This commit is contained in:
Kubernetes Prow Robot 2022-09-06 10:25:52 -07:00 committed by GitHub
commit 61cae77100
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 51 additions and 7 deletions

View File

@ -11,6 +11,7 @@ This is a document to gather the release notes prior to the release.
* Karpenter support can be considered stable on Kubernetes versions 1.22 up until 1.24. Karpenter does not yet support Kubernetes above 1.25.
* Cert Manager may now solve dns-01 challenges. See [the cert manager documentation](/addons/#enabling-dns-01-challenges).
* Add support to --cordon-node-before-terminating on the cluster autoscaler addon (CordonNodeBeforeTerminating)
* EBS CSI driver can now be self-managed. See the [addon docs](/addons/#self-managed-aws-ebs-csi-driver).

View File

@ -666,6 +666,10 @@ spec:
description: 'BalanceSimilarNodeGroups makes cluster autoscaler
treat similar node groups as one. Default: false'
type: boolean
cordonNodeBeforeTerminating:
description: 'CordonNodeBeforeTerminating should CA cordon nodes
before terminating during downscale process Default: false'
type: boolean
cpuRequest:
anyOf:
- type: integer

View File

@ -1013,6 +1013,9 @@ type ClusterAutoscalerConfig struct {
// ScaleDownDelayAfterAdd determines the time after scale up that scale down evaluation resumes
// Default: 10m0s
ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty"`
// CordonNodeBeforeTerminating should CA cordon nodes before terminating during downscale process
// Default: false
CordonNodeBeforeTerminating *bool `json:"cordonNodeBeforeTerminating,omitempty"`
// Image is the docker container used.
// Default: the latest supported image for the specified kubernetes version.
Image *string `json:"image,omitempty"`

View File

@ -1039,6 +1039,9 @@ type ClusterAutoscalerConfig struct {
// ScaleDownDelayAfterAdd determines the time after scale up that scale down evaluation resumes
// Default: 10m0s
ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty"`
// CordonNodeBeforeTerminating should CA cordon nodes before terminating during downscale process
// Default: false
CordonNodeBeforeTerminating *bool `json:"cordonNodeBeforeTerminating,omitempty"`
// Image is the docker container used.
// Default: the latest supported image for the specified kubernetes version.
Image *string `json:"image,omitempty"`

View File

@ -2332,6 +2332,7 @@ func autoConvert_v1alpha2_ClusterAutoscalerConfig_To_kops_ClusterAutoscalerConfi
out.SkipNodesWithLocalStorage = in.SkipNodesWithLocalStorage
out.NewPodScaleUpDelay = in.NewPodScaleUpDelay
out.ScaleDownDelayAfterAdd = in.ScaleDownDelayAfterAdd
out.CordonNodeBeforeTerminating = in.CordonNodeBeforeTerminating
out.Image = in.Image
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
@ -2355,6 +2356,7 @@ func autoConvert_kops_ClusterAutoscalerConfig_To_v1alpha2_ClusterAutoscalerConfi
out.SkipNodesWithLocalStorage = in.SkipNodesWithLocalStorage
out.NewPodScaleUpDelay = in.NewPodScaleUpDelay
out.ScaleDownDelayAfterAdd = in.ScaleDownDelayAfterAdd
out.CordonNodeBeforeTerminating = in.CordonNodeBeforeTerminating
out.Image = in.Image
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest

View File

@ -943,6 +943,11 @@ func (in *ClusterAutoscalerConfig) DeepCopyInto(out *ClusterAutoscalerConfig) {
*out = new(string)
**out = **in
}
if in.CordonNodeBeforeTerminating != nil {
in, out := &in.CordonNodeBeforeTerminating, &out.CordonNodeBeforeTerminating
*out = new(bool)
**out = **in
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(string)

View File

@ -1010,6 +1010,9 @@ type ClusterAutoscalerConfig struct {
// ScaleDownDelayAfterAdd determines the time after scale up that scale down evaluation resumes
// Default: 10m0s
ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty"`
// CordonNodeBeforeTerminating should CA cordon nodes before terminating during downscale process
// Default: false
CordonNodeBeforeTerminating *bool `json:"cordonNodeBeforeTerminating,omitempty"`
// Image is the docker container used.
// Default: the latest supported image for the specified kubernetes version.
Image *string `json:"image,omitempty"`

View File

@ -2415,6 +2415,7 @@ func autoConvert_v1alpha3_ClusterAutoscalerConfig_To_kops_ClusterAutoscalerConfi
out.SkipNodesWithLocalStorage = in.SkipNodesWithLocalStorage
out.NewPodScaleUpDelay = in.NewPodScaleUpDelay
out.ScaleDownDelayAfterAdd = in.ScaleDownDelayAfterAdd
out.CordonNodeBeforeTerminating = in.CordonNodeBeforeTerminating
out.Image = in.Image
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
@ -2438,6 +2439,7 @@ func autoConvert_kops_ClusterAutoscalerConfig_To_v1alpha3_ClusterAutoscalerConfi
out.SkipNodesWithLocalStorage = in.SkipNodesWithLocalStorage
out.NewPodScaleUpDelay = in.NewPodScaleUpDelay
out.ScaleDownDelayAfterAdd = in.ScaleDownDelayAfterAdd
out.CordonNodeBeforeTerminating = in.CordonNodeBeforeTerminating
out.Image = in.Image
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest

View File

@ -906,6 +906,11 @@ func (in *ClusterAutoscalerConfig) DeepCopyInto(out *ClusterAutoscalerConfig) {
*out = new(string)
**out = **in
}
if in.CordonNodeBeforeTerminating != nil {
in, out := &in.CordonNodeBeforeTerminating, &out.CordonNodeBeforeTerminating
*out = new(bool)
**out = **in
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(string)

View File

@ -1003,6 +1003,11 @@ func (in *ClusterAutoscalerConfig) DeepCopyInto(out *ClusterAutoscalerConfig) {
*out = new(string)
**out = **in
}
if in.CordonNodeBeforeTerminating != nil {
in, out := &in.CordonNodeBeforeTerminating, &out.CordonNodeBeforeTerminating
*out = new(bool)
**out = **in
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(string)

View File

@ -41,7 +41,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: 356aa2af7e37cb715c74cba3dc013b8e3982d2459b78cbf333bdad379d5d0055
manifestHash: 729dcadedb99acbf9a9b72bdb73bbc18525352865e12c7e3c2eb67cf83544dbb
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -330,6 +330,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -48,7 +48,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: cd7dcf9f8dcee6c325f114c3581af77161cb94cd8ac2ee6f70c5e7b1dea0f48d
manifestHash: e14369e0688f93dc6578f464f8da46a9b0001410e5510713513b4aa06a0cdbce
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -330,6 +330,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -48,7 +48,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: 32c2d0d535f2a11e9034cf857011b214402f08c16b6bc1cf22cfbd54f2a62a5a
manifestHash: faa9596e64df6bbf071647f3a74acb0230cccb801a330bccb67821251eb73140
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -330,6 +330,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -48,7 +48,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: e7077b9af47fa0146ed707c801cb96b86cc9b9fb318b40c3da386a43a956129c
manifestHash: 995129efbafc17f912c327c8fcce41ab5389b7b9e487ca936d0d47fcc51dc789
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -330,6 +330,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -41,7 +41,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: e7077b9af47fa0146ed707c801cb96b86cc9b9fb318b40c3da386a43a956129c
manifestHash: 995129efbafc17f912c327c8fcce41ab5389b7b9e487ca936d0d47fcc51dc789
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -330,6 +330,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -41,7 +41,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: 6de82c3f846c3b3f7fb7cd92ab62635a6174e4c92c343374964a51e44a0691ca
manifestHash: cd6497b9d972c3a828e8a2057ed7f907729674192b10377f8378d413f80126fe
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -333,6 +333,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -41,7 +41,7 @@ spec:
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: 8234f4b6bdccbbeb8e3df94093a0ed737e8c3a333c5193d7fd7ffbe7640cf1ec
manifestHash: 6393b5636a0aa88b21e94c52a6209a03a6257be6e6344ab1c5887d5d9f2634c5
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io

View File

@ -334,6 +334,7 @@ spec:
- --scale-down-delay-after-add=10m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating="true"
- --logtostderr=true
- --stderrthreshold=info
- --v=4

View File

@ -329,6 +329,10 @@ spec:
- --scale-down-delay-after-add={{ .ScaleDownDelayAfterAdd }}
- --new-pod-scale-up-delay={{ .NewPodScaleUpDelay }}
- --max-node-provision-time={{ .MaxNodeProvisionTime }}
# This flag does not exist before CAS 1.21
{{ if IsKubernetesGTE "1.21" }}
- --cordon-node-before-terminating="{{ WithDefaultBool .CordonNodeBeforeTerminating true }}"
{{ end }}
- --logtostderr=true
- --stderrthreshold=info
- --v=4