mirror of https://github.com/openkruise/kruise.git
add flag --daemonset-extra-allowed-pod-number to avoid daemonset pod schedule fail
This commit is contained in:
parent
4bb20ac117
commit
fc284a3704
|
|
@ -70,11 +70,14 @@ import (
|
|||
func init() {
|
||||
flag.BoolVar(&scheduleDaemonSetPods, "assign-pods-by-scheduler", true, "Use scheduler to assign pod to node.")
|
||||
flag.IntVar(&concurrentReconciles, "daemonset-workers", concurrentReconciles, "Max concurrent workers for DaemonSet controller.")
|
||||
flag.Int64Var(&extraAllowedPodNumber, "daemonset-extra-allowed-pod-number", extraAllowedPodNumber,
|
||||
"Extra allowed number of Pods that can run on one node, ensure daemonset pod to be assigned")
|
||||
}
|
||||
|
||||
var (
|
||||
concurrentReconciles = 3
|
||||
scheduleDaemonSetPods bool
|
||||
extraAllowedPodNumber = int64(0)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
controllerKind = appsv1alpha1.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
|
@ -163,6 +164,22 @@ func NodeShouldRunDaemonPod(client client.Client, node *corev1.Node, ds *appsv1a
|
|||
return
|
||||
}
|
||||
|
||||
func newSchedulerNodeInfo(node *corev1.Node) *schedulernodeinfo.NodeInfo {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
if extraAllowedPodNumber > 0 {
|
||||
rQuant, ok := node.Status.Allocatable[corev1.ResourcePods]
|
||||
if ok {
|
||||
rQuant.Add(*resource.NewQuantity(extraAllowedPodNumber, resource.DecimalSI))
|
||||
nodeCopy := node.DeepCopy()
|
||||
nodeCopy.Status.Allocatable[corev1.ResourcePods] = rQuant
|
||||
nodeInfo.SetNode(nodeCopy)
|
||||
return nodeInfo
|
||||
}
|
||||
}
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
func Simulate(kubeclient client.Client, newPod *corev1.Pod, node *corev1.Node, ds *appsv1alpha1.DaemonSet) ([]predicates.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {
|
||||
podList := corev1.PodList{}
|
||||
err := kubeclient.List(context.TODO(), &podList, client.MatchingFields{"spec.nodeName": node.Name})
|
||||
|
|
@ -170,8 +187,7 @@ func Simulate(kubeclient client.Client, newPod *corev1.Pod, node *corev1.Node, d
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
nodeInfo := newSchedulerNodeInfo(node)
|
||||
|
||||
for index := range podList.Items {
|
||||
if isControlledByDaemonSet(&podList.Items[index], ds.GetUID()) {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
|
@ -338,3 +339,62 @@ func TestNodeShouldUpdateBySelector(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSchedulerNodeInfo(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
Title string
|
||||
Node *corev1.Node
|
||||
SetExtraAllowedPodNumber int64
|
||||
ExpectedAllowedPodNumber int
|
||||
}{
|
||||
{
|
||||
"node with no pod resource, without set extra allowed pod number",
|
||||
func() *corev1.Node {
|
||||
n := newNode("node1", nil)
|
||||
return n
|
||||
}(),
|
||||
0,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"node with no pod resource, with set extra allowed pod number",
|
||||
func() *corev1.Node {
|
||||
n := newNode("node1", nil)
|
||||
return n
|
||||
}(),
|
||||
9,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"node with pod resource, without set extra allowed pod number",
|
||||
func() *corev1.Node {
|
||||
n := newNode("node1", nil)
|
||||
n.Status.Allocatable = corev1.ResourceList{
|
||||
corev1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
}
|
||||
return n
|
||||
}(),
|
||||
0,
|
||||
110,
|
||||
},
|
||||
{
|
||||
"node with pod resource, with set extra allowed pod number",
|
||||
func() *corev1.Node {
|
||||
n := newNode("node1", nil)
|
||||
n.Status.Allocatable = corev1.ResourceList{
|
||||
corev1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
}
|
||||
return n
|
||||
}(),
|
||||
9,
|
||||
119,
|
||||
},
|
||||
} {
|
||||
t.Logf("\t%s", tt.Title)
|
||||
extraAllowedPodNumber = tt.SetExtraAllowedPodNumber
|
||||
nodeInfo := newSchedulerNodeInfo(tt.Node)
|
||||
if nodeInfo.AllowedPodNumber() != tt.ExpectedAllowedPodNumber {
|
||||
t.Errorf("actual allowed pod number = %v, want %v", nodeInfo.AllowedPodNumber(), tt.ExpectedAllowedPodNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue