Add --chunk-size flag to kubectl drain

Kubernetes-commit: 5200ff86d0983e42c3d2d5c0d830e96495516347
This commit is contained in:
Katrina Verey 2021-03-19 08:32:37 -07:00 committed by Kubernetes Publisher
parent 086cadbf87
commit ea308b60b6
3 changed files with 22 additions and 2 deletions

View File

@ -148,6 +148,7 @@ func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams
GracePeriodSeconds: -1,
Out: ioStreams.Out,
ErrOut: ioStreams.ErrOut,
ChunkSize: cmdutil.DefaultChunkSize,
},
}
o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted
@ -198,6 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr
cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.")
cmdutil.AddChunkSizeFlag(cmd, &o.drainer.ChunkSize)
cmdutil.AddDryRunFlag(cmd)
return cmd
}
@ -256,6 +258,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [
builder := f.NewBuilder().
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
NamespaceParam(o.Namespace).DefaultNamespace().
RequestChunksOf(o.drainer.ChunkSize).
ResourceNames("nodes", args...).
SingleResourceType().
Flatten()

View File

@ -810,6 +810,7 @@ func TestDrain(t *testing.T) {
}
getParams := make(url.Values)
getParams["fieldSelector"] = []string{"spec.nodeName=node"}
getParams["limit"] = []string{"500"}
if !reflect.DeepEqual(getParams, values) {
t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values)
}

View File

@ -29,6 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/cli-runtime/pkg/resource"
@ -61,6 +62,7 @@ type Helper struct {
DeleteEmptyDirData bool
Selector string
PodSelector string
ChunkSize int64
// DisableEviction forces drain to use delete rather than evict
DisableEviction bool
@ -189,9 +191,23 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*PodDeleteList, []error) {
return nil, []error{err}
}
podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), metav1.ListOptions{
podList := &corev1.PodList{}
initialOpts := &metav1.ListOptions{
LabelSelector: labelSelector.String(),
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String(),
Limit: d.ChunkSize,
}
err = resource.FollowContinue(initialOpts, func(options metav1.ListOptions) (runtime.Object, error) {
newPods, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), options)
if err != nil {
podR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String())
return nil, resource.EnhanceListError(err, options, podR.String())
}
podList.Items = append(podList.Items, newPods.Items...)
return newPods, nil
})
if err != nil {
return nil, []error{err}
}