Remove use of PredicateMetadata

This commit is contained in:
Łukasz Osipiuk 2019-12-19 14:28:26 +01:00
parent a0fcc36401
commit 4a2b8c7dfc
6 changed files with 22 additions and 54 deletions

View File

@ -258,7 +258,7 @@ func computeExpansionOption(context *context.AutoscalingContext, podEquivalenceG
for _, eg := range podEquivalenceGroups {
samplePod := eg.pods[0]
if err := context.PredicateChecker.CheckPredicates(samplePod, nil, nodeInfo); err == nil {
if err := context.PredicateChecker.CheckPredicates(samplePod, nodeInfo); err == nil {
// add pods to option
option.Pods = append(option.Pods, eg.pods...)
// mark pod group as (theoretically) schedulable

View File

@ -61,7 +61,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla
for _, podInfo := range podInfos {
found := false
for i, nodeInfo := range newNodes {
if err := estimator.predicateChecker.CheckPredicates(podInfo.pod, nil, nodeInfo); err == nil {
if err := estimator.predicateChecker.CheckPredicates(podInfo.pod, nodeInfo); err == nil {
found = true
newNodes[i] = schedulerUtils.NodeWithPod(nodeInfo, podInfo.pod)
break

View File

@ -33,7 +33,6 @@ import (
apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog"
@ -233,7 +232,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
loggingQuota := glogx.PodsLoggingQuota()
tryNodeForPod := func(nodename string, pod *apiv1.Pod, predicateMeta predicates.Metadata) bool {
tryNodeForPod := func(nodename string, pod *apiv1.Pod) bool {
nodeInfo, found := newNodeInfos[nodename]
if found {
if nodeInfo.Node() == nil {
@ -243,7 +242,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
klog.Warningf("No node in nodeInfo %s -> %v", nodename, nodeInfo)
return false
}
err := predicateChecker.CheckPredicates(pod, predicateMeta, nodeInfo)
err := predicateChecker.CheckPredicates(pod, nodeInfo)
if err != nil {
glogx.V(4).UpTo(loggingQuota).Infof("Evaluation %s for %s/%s -> %v", nodename, pod.Namespace, pod.Name, err.VerboseError())
} else {
@ -273,14 +272,13 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
foundPlace := false
targetNode := ""
predicateMeta := predicateChecker.GetPredicateMetadata(pod, newNodeInfos)
loggingQuota.Reset()
klog.V(5).Infof("Looking for place for %s/%s", pod.Namespace, pod.Name)
hintedNode, hasHint := oldHints[podKey(pod)]
if hasHint {
if hintedNode != removedNode && tryNodeForPod(hintedNode, pod, predicateMeta) {
if hintedNode != removedNode && tryNodeForPod(hintedNode, pod) {
foundPlace = true
targetNode = hintedNode
}
@ -290,7 +288,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
if node.Name == removedNode {
continue
}
if tryNodeForPod(node.Name, pod, predicateMeta) {
if tryNodeForPod(node.Name, pod) {
foundPlace = true
targetNode = node.Name
break

View File

@ -32,10 +32,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulersnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
// We need to import provider to initialize default scheduler.
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
)
@ -61,10 +58,9 @@ type PredicateInfo struct {
// PredicateChecker checks whether all required predicates pass for given Pod and Node.
type PredicateChecker struct {
predicates []PredicateInfo
predicateMetadataProducer predicates.MetadataProducer
enableAffinityPredicate bool
scheduler *scheduler.Scheduler
predicates []PredicateInfo
enableAffinityPredicate bool
scheduler *scheduler.Scheduler
}
// We run some predicates first as they are cheap to check and they should be enough
@ -161,10 +157,9 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{})
informerFactory.Start(stop)
return &PredicateChecker{
predicates: predicateList,
predicateMetadataProducer: sched.Algorithm.PredicateMetadataProducer(),
enableAffinityPredicate: true,
scheduler: sched,
predicates: predicateList,
enableAffinityPredicate: true,
scheduler: sched,
}, nil
}
@ -185,9 +180,6 @@ func NewTestPredicateChecker() *PredicateChecker {
{Name: "default", Predicate: predicates.GeneralPredicates},
{Name: "ready", Predicate: IsNodeReadyAndSchedulablePredicate},
},
predicateMetadataProducer: func(_ *apiv1.Pod, _ schedulerlisters.SharedLister) predicates.Metadata {
return nil
},
}
}
@ -196,9 +188,6 @@ func NewTestPredicateChecker() *PredicateChecker {
func NewCustomTestPredicateChecker(predicateInfos []PredicateInfo) *PredicateChecker {
return &PredicateChecker{
predicates: predicateInfos,
predicateMetadataProducer: func(_ *apiv1.Pod, _ schedulerlisters.SharedLister) predicates.Metadata {
return nil
},
}
}
@ -224,19 +213,6 @@ func (p *PredicateChecker) IsAffinityPredicateEnabled() bool {
return p.enableAffinityPredicate
}
// GetPredicateMetadata precomputes some information useful for running predicates on a given pod in a given state
// of the cluster (represented by nodeInfos map). Passing the result of this function to CheckPredicates can significantly
// improve the performance of running predicates, especially MatchInterPodAffinity predicate. However, calculating
// predicateMetadata is also quite expensive, so it's not always the best option to run this method.
// Please refer to https://github.com/kubernetes/autoscaler/issues/257 for more details.
func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) predicates.Metadata {
// Skip precomputation if affinity predicate is disabled - it's not worth it performance-wise.
if !p.enableAffinityPredicate {
return nil
}
return p.predicateMetadataProducer(pod, schedulersnapshot.NewSnapshot(nodeInfos))
}
// FitsAny checks if the given pod can be place on any of the given nodes.
func (p *PredicateChecker) FitsAny(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) (string, error) {
for name, nodeInfo := range nodeInfos {
@ -244,7 +220,7 @@ func (p *PredicateChecker) FitsAny(pod *apiv1.Pod, nodeInfos map[string]*schedul
if nodeInfo.Node().Spec.Unschedulable {
continue
}
if err := p.CheckPredicates(pod, nil, nodeInfo); err == nil {
if err := p.CheckPredicates(pod, nodeInfo); err == nil {
return name, nil
}
}
@ -323,20 +299,14 @@ func (pe *PredicateError) PredicateName() string {
}
// CheckPredicates checks if the given pod can be placed on the given node.
// To improve performance predicateMetadata can be calculated using GetPredicateMetadata
// method and passed to CheckPredicates, however, this may lead to incorrect results if
// it was calculated using NodeInfo map representing different cluster state and the
// performance gains of CheckPredicates won't always offset the cost of GetPredicateMetadata.
// Alternatively you can pass nil as predicateMetadata.
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError {
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError {
for _, predInfo := range p.predicates {
// Skip affinity predicate if it has been disabled.
if !p.enableAffinityPredicate && predInfo.Name == affinityPredicateName {
continue
}
match, failureReasons, err := predInfo.Predicate(pod, predicateMetadata, nodeInfo)
match, failureReasons, err := predInfo.Predicate(pod, nil, nodeInfo)
if err != nil || !match {
return &PredicateError{
predicateName: predInfo.Name,

View File

@ -58,16 +58,16 @@ func TestPredicates(t *testing.T) {
_, err = predicateChecker.FitsAny(p3, nodeInfos)
assert.Error(t, err)
predicateErr := predicateChecker.CheckPredicates(p2, nil, ni1)
predicateErr := predicateChecker.CheckPredicates(p2, ni1)
assert.NotNil(t, predicateErr)
assert.True(t, strings.Contains(predicateErr.Error(), "Predicates failed"))
assert.True(t, strings.Contains(predicateErr.VerboseError(), "Insufficient cpu"))
assert.NotNil(t, predicateChecker.CheckPredicates(p2, nil, ni1))
assert.Nil(t, predicateChecker.CheckPredicates(p4, nil, ni1))
assert.Nil(t, predicateChecker.CheckPredicates(p2, nil, ni2))
assert.Nil(t, predicateChecker.CheckPredicates(p4, nil, ni2))
assert.NotNil(t, predicateChecker.CheckPredicates(p3, nil, ni2))
assert.NotNil(t, predicateChecker.CheckPredicates(p2, ni1))
assert.Nil(t, predicateChecker.CheckPredicates(p4, ni1))
assert.Nil(t, predicateChecker.CheckPredicates(p2, ni2))
assert.Nil(t, predicateChecker.CheckPredicates(p4, ni2))
assert.NotNil(t, predicateChecker.CheckPredicates(p3, ni2))
}
func TestDebugInfo(t *testing.T) {

View File

@ -32,7 +32,7 @@ func GetDaemonSetPodsForNode(nodeInfo *schedulernodeinfo.NodeInfo, daemonsets []
result := make([]*apiv1.Pod, 0)
for _, ds := range daemonsets {
pod := newPod(ds, nodeInfo.Node().Name)
if err := predicateChecker.CheckPredicates(pod, nil, nodeInfo); err == nil {
if err := predicateChecker.CheckPredicates(pod, nodeInfo); err == nil {
result = append(result, pod)
}
}