refactor some codes

Signed-off-by: Poor12 <shentiecheng@huawei.com>
This commit is contained in:
Poor12 2023-02-16 20:36:46 +08:00
parent e523977cb0
commit a616758eeb
11 changed files with 54 additions and 81 deletions

View File

@ -987,17 +987,11 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic
return err
}
for _, rb := range resourceBindings.Items {
gv, err := schema.ParseGroupVersion(rb.Spec.Resource.APIVersion)
resourceKey, err := helper.ConstructClusterWideKey(rb.Spec.Resource)
if err != nil {
return err
}
d.Processor.Add(keys.ClusterWideKey{
Name: rb.Spec.Resource.Name,
Namespace: rb.Spec.Resource.Namespace,
Kind: rb.Spec.Resource.Kind,
Group: gv.Group,
Version: gv.Version,
})
d.Processor.Add(resourceKey)
}
matchedKeys := d.GetMatching(policy.Spec.ResourceSelectors)
@ -1048,30 +1042,18 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy
return err
}
for _, rb := range resourceBindings.Items {
gv, err := schema.ParseGroupVersion(rb.Spec.Resource.APIVersion)
resourceKey, err := helper.ConstructClusterWideKey(rb.Spec.Resource)
if err != nil {
return err
}
d.Processor.Add(keys.ClusterWideKey{
Name: rb.Spec.Resource.Name,
Namespace: rb.Spec.Resource.Namespace,
Kind: rb.Spec.Resource.Kind,
Group: gv.Group,
Version: gv.Version,
})
d.Processor.Add(resourceKey)
}
for _, crb := range clusterResourceBindings.Items {
gv, err := schema.ParseGroupVersion(crb.Spec.Resource.APIVersion)
resourceKey, err := helper.ConstructClusterWideKey(crb.Spec.Resource)
if err != nil {
return err
}
d.Processor.Add(keys.ClusterWideKey{
Name: crb.Spec.Resource.Name,
Namespace: crb.Spec.Resource.Namespace,
Kind: crb.Spec.Resource.Kind,
Group: gv.Group,
Version: gv.Version,
})
d.Processor.Add(resourceKey)
}
matchedKeys := d.GetMatching(policy.Spec.ResourceSelectors)

View File

@ -58,7 +58,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, spec *workv1alpha2.Reso
return result, fmt.Errorf("no clusters available to schedule")
}
feasibleClusters, diagnosis, err := g.findClustersThatFit(ctx, g.scheduleFramework, spec.Placement, spec, &clusterInfoSnapshot)
feasibleClusters, diagnosis, err := g.findClustersThatFit(ctx, g.scheduleFramework, spec, &clusterInfoSnapshot)
if err != nil {
return result, fmt.Errorf("failed to findClustersThatFit: %v", err)
}
@ -72,7 +72,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, spec *workv1alpha2.Reso
}
klog.V(4).Infof("Feasible clusters found: %v", feasibleClusters)
clustersScore, err := g.prioritizeClusters(ctx, g.scheduleFramework, spec.Placement, spec, feasibleClusters)
clustersScore, err := g.prioritizeClusters(ctx, g.scheduleFramework, spec, feasibleClusters)
if err != nil {
return result, fmt.Errorf("failed to prioritizeClusters: %v", err)
}
@ -100,7 +100,6 @@ func (g *genericScheduler) Schedule(ctx context.Context, spec *workv1alpha2.Reso
func (g *genericScheduler) findClustersThatFit(
ctx context.Context,
fwk framework.Framework,
placement *policyv1alpha1.Placement,
bindingSpec *workv1alpha2.ResourceBindingSpec,
clusterInfo *cache.Snapshot,
) ([]*clusterv1alpha1.Cluster, framework.Diagnosis, error) {
@ -115,7 +114,7 @@ func (g *genericScheduler) findClustersThatFit(
// DO NOT filter unhealthy cluster, let users make decisions by using ClusterTolerations of Placement.
clusters := clusterInfo.GetClusters()
for _, c := range clusters {
if result := fwk.RunFilterPlugins(ctx, placement, bindingSpec, c.Cluster()); !result.IsSuccess() {
if result := fwk.RunFilterPlugins(ctx, bindingSpec, c.Cluster()); !result.IsSuccess() {
klog.V(4).Infof("Cluster %q is not fit, reason: %v", c.Cluster().Name, result.AsError())
diagnosis.ClusterToResultMap[c.Cluster().Name] = result
} else {
@ -130,13 +129,12 @@ func (g *genericScheduler) findClustersThatFit(
func (g *genericScheduler) prioritizeClusters(
ctx context.Context,
fwk framework.Framework,
placement *policyv1alpha1.Placement,
spec *workv1alpha2.ResourceBindingSpec,
clusters []*clusterv1alpha1.Cluster) (result framework.ClusterScoreList, err error) {
startTime := time.Now()
defer metrics.ScheduleStep(metrics.ScheduleStepScore, startTime)
scoresMap, runScorePluginsResult := fwk.RunScorePlugins(ctx, placement, spec, clusters)
scoresMap, runScorePluginsResult := fwk.RunScorePlugins(ctx, spec, clusters)
if runScorePluginsResult != nil {
return result, runScorePluginsResult.AsError()
}

View File

@ -8,7 +8,6 @@ import (
"strings"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
)
@ -26,10 +25,10 @@ type Framework interface {
// RunFilterPlugins runs the set of configured Filter plugins for resources on
// the given cluster.
RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, bindingSpec *workv1alpha2.ResourceBindingSpec, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
RunFilterPlugins(ctx context.Context, bindingSpec *workv1alpha2.ResourceBindingSpec, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
// RunScorePlugins runs the set of configured Score plugins, it returns a map of plugin name to cores
RunScorePlugins(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec, clusters []*clusterv1alpha1.Cluster) (PluginToClusterScores, *Result)
RunScorePlugins(ctx context.Context, spec *workv1alpha2.ResourceBindingSpec, clusters []*clusterv1alpha1.Cluster) (PluginToClusterScores, *Result)
}
// Plugin is the parent type for all the scheduling framework plugins.
@ -42,7 +41,7 @@ type Plugin interface {
type FilterPlugin interface {
Plugin
// Filter is called by the scheduling framework.
Filter(ctx context.Context, placement *policyv1alpha1.Placement, bindingSpec *workv1alpha2.ResourceBindingSpec, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
Filter(ctx context.Context, bindingSpec *workv1alpha2.ResourceBindingSpec, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
}
// Result indicates the result of running a plugin. It consists of a code, a
@ -164,7 +163,7 @@ type ScorePlugin interface {
// Score is called on each filtered cluster. It must return success and an integer
// indicating the rank of the cluster. All scoring plugins must return success or
// the resource will be rejected.
Score(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *Result)
Score(ctx context.Context, spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *Result)
// ScoreExtensions returns a ScoreExtensions interface
// if it implements one, or nil if does not.

View File

@ -6,7 +6,6 @@ import (
"k8s.io/klog/v2"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
"github.com/karmada-io/karmada/pkg/util/helper"
@ -33,7 +32,7 @@ func (p *APIEnablement) Name() string {
}
// Filter checks if the API(CRD) of the resource is enabled or installed in the target cluster.
func (p *APIEnablement) Filter(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *APIEnablement) Filter(ctx context.Context,
bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind) {
klog.V(2).Infof("Cluster(%s) not fit as missing API(%s, kind=%s)", cluster.Name, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind)

View File

@ -4,7 +4,6 @@ import (
"context"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
"github.com/karmada-io/karmada/pkg/util"
@ -32,9 +31,9 @@ func (p *ClusterAffinity) Name() string {
}
// Filter checks if the cluster matched the placement cluster affinity constraint.
func (p *ClusterAffinity) Filter(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *ClusterAffinity) Filter(ctx context.Context,
bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
affinity := placement.ClusterAffinity
affinity := bindingSpec.Placement.ClusterAffinity
if affinity != nil {
if util.ClusterMatches(cluster, *affinity) {
return framework.NewResult(framework.Success)
@ -47,7 +46,7 @@ func (p *ClusterAffinity) Filter(ctx context.Context, placement *policyv1alpha1.
}
// Score calculates the score on the candidate cluster.
func (p *ClusterAffinity) Score(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *ClusterAffinity) Score(ctx context.Context,
spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *framework.Result) {
return framework.MinClusterScore, framework.NewResult(framework.Success)
}

View File

@ -4,7 +4,6 @@ import (
"context"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
"github.com/karmada-io/karmada/pkg/util"
@ -33,7 +32,7 @@ func (p *ClusterLocality) Name() string {
// Score calculates the score on the candidate cluster.
// If the cluster already have the resource(exists in .spec.Clusters of ResourceBinding or ClusterResourceBinding),
// then score is 100, otherwise 0.
func (p *ClusterLocality) Score(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *ClusterLocality) Score(ctx context.Context,
spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *framework.Result) {
if len(spec.Clusters) == 0 {
return framework.MinClusterScore, framework.NewResult(framework.Success)

View File

@ -30,9 +30,9 @@ func (p *SpreadConstraint) Name() string {
}
// Filter checks if the cluster Provider/Zone/Region spread is null.
func (p *SpreadConstraint) Filter(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *SpreadConstraint) Filter(ctx context.Context,
bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
for _, spreadConstraint := range placement.SpreadConstraints {
for _, spreadConstraint := range bindingSpec.Placement.SpreadConstraints {
if spreadConstraint.SpreadByField == policyv1alpha1.SpreadByFieldProvider && cluster.Spec.Provider == "" {
return framework.NewResult(framework.Unschedulable, "cluster(s) didn't have provider property")
} else if spreadConstraint.SpreadByField == policyv1alpha1.SpreadByFieldRegion && cluster.Spec.Region == "" {

View File

@ -8,7 +8,6 @@ import (
v1helper "k8s.io/component-helpers/scheduling/corev1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
)
@ -34,7 +33,7 @@ func (p *TaintToleration) Name() string {
}
// Filter checks if the given tolerations in placement tolerate cluster's taints.
func (p *TaintToleration) Filter(ctx context.Context, placement *policyv1alpha1.Placement,
func (p *TaintToleration) Filter(ctx context.Context,
bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
// skip the filter if the cluster is already in the list of scheduling results,
// if the workload referencing by the binding can't tolerate the taint,
@ -47,7 +46,7 @@ func (p *TaintToleration) Filter(ctx context.Context, placement *policyv1alpha1.
return t.Effect == corev1.TaintEffectNoSchedule || t.Effect == corev1.TaintEffectNoExecute
}
taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(cluster.Spec.Taints, placement.ClusterTolerations, filterPredicate)
taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(cluster.Spec.Taints, bindingSpec.Placement.ClusterTolerations, filterPredicate)
if !isUntolerated {
return framework.NewResult(framework.Success)
}

View File

@ -7,7 +7,6 @@ import (
"time"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
"github.com/karmada-io/karmada/pkg/scheduler/metrics"
@ -75,29 +74,29 @@ func NewFramework(r Registry, opts ...Option) (framework.Framework, error) {
// RunFilterPlugins runs the set of configured Filter plugins for resources on the cluster.
// If any of the result is not success, the cluster is not suited for the resource.
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (result *framework.Result) {
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (result *framework.Result) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(filter, result.Code().String()).Observe(utilmetrics.DurationInSeconds(startTime))
}()
for _, p := range frw.filterPlugins {
if result := frw.runFilterPlugin(ctx, p, placement, bindingSpec, cluster); !result.IsSuccess() {
if result := frw.runFilterPlugin(ctx, p, bindingSpec, cluster); !result.IsSuccess() {
return result
}
}
return framework.NewResult(framework.Success)
}
func (frw *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, placement *policyv1alpha1.Placement, bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
func (frw *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, bindingSpec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) *framework.Result {
startTime := time.Now()
result := pl.Filter(ctx, placement, bindingSpec, cluster)
result := pl.Filter(ctx, bindingSpec, cluster)
frw.metricsRecorder.observePluginDurationAsync(filter, pl.Name(), result, utilmetrics.DurationInSeconds(startTime))
return result
}
// RunScorePlugins runs the set of configured Filter plugins for resources on the cluster.
// If any of the result is not success, the cluster is not suited for the resource.
func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec, clusters []*clusterv1alpha1.Cluster) (ps framework.PluginToClusterScores, result *framework.Result) {
func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, spec *workv1alpha2.ResourceBindingSpec, clusters []*clusterv1alpha1.Cluster) (ps framework.PluginToClusterScores, result *framework.Result) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(score, result.Code().String()).Observe(utilmetrics.DurationInSeconds(startTime))
@ -106,7 +105,7 @@ func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, placement *policy
for _, p := range frw.scorePlugins {
var scoreList framework.ClusterScoreList
for _, cluster := range clusters {
s, res := frw.runScorePlugin(ctx, p, placement, spec, cluster)
s, res := frw.runScorePlugin(ctx, p, spec, cluster)
if !res.IsSuccess() {
return nil, framework.AsResult(fmt.Errorf("plugin %q failed with: %w", p.Name(), res.AsError()))
}
@ -139,9 +138,9 @@ func (frw *frameworkImpl) RunScorePlugins(ctx context.Context, placement *policy
return pluginToClusterScores, nil
}
func (frw *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *framework.Result) {
func (frw *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, spec *workv1alpha2.ResourceBindingSpec, cluster *clusterv1alpha1.Cluster) (int64, *framework.Result) {
startTime := time.Now()
s, result := pl.Score(ctx, placement, spec, cluster)
s, result := pl.Score(ctx, spec, cluster)
frw.metricsRecorder.observePluginDurationAsync(score, pl.Name(), result, utilmetrics.DurationInSeconds(startTime))
return s, result
}

View File

@ -19,12 +19,12 @@ func Test_frameworkImpl_RunFilterPlugins(t *testing.T) {
defer mockCtrl.Finish()
alwaysError := frameworktesting.NewMockFilterPlugin(mockCtrl)
alwaysError.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
alwaysError.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
Return(framework.NewResult(framework.Error, "foo"))
alwaysError.EXPECT().Name().AnyTimes().Return("foo")
alwaysSuccess := frameworktesting.NewMockFilterPlugin(mockCtrl)
alwaysSuccess.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
alwaysSuccess.EXPECT().Filter(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
Return(framework.NewResult(framework.Success))
alwaysSuccess.EXPECT().Name().AnyTimes().Return("foo")
@ -72,7 +72,7 @@ func Test_frameworkImpl_RunFilterPlugins(t *testing.T) {
t.Errorf("create frame work error:%v", err)
}
result := frameWork.RunFilterPlugins(ctx, nil, nil, nil)
result := frameWork.RunFilterPlugins(ctx, nil, nil)
if result.IsSuccess() != tt.isSuccess {
t.Errorf("want %v, but get:%v", tt.isSuccess, result.IsSuccess())
}
@ -111,7 +111,7 @@ func Test_frameworkImpl_RunScorePlugins(t *testing.T) {
{
name: "Test score ok",
mockFunc: func(mockScorePlugin *frameworktesting.MockScorePlugin, mockScoreExtension *frameworktesting.MockScoreExtensions) {
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any()).
Return(int64(60), framework.NewResult(framework.Success))
mockScorePlugin.EXPECT().ScoreExtensions().Times(2).Return(mockScoreExtension)
mockScorePlugin.EXPECT().Name().AnyTimes().Return("foo")
@ -124,7 +124,7 @@ func Test_frameworkImpl_RunScorePlugins(t *testing.T) {
{
name: "Test score func error",
mockFunc: func(mockScorePlugin *frameworktesting.MockScorePlugin, mockScoreExtension *frameworktesting.MockScoreExtensions) {
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any()).
Return(int64(-1), framework.NewResult(framework.Error, "foo"))
mockScorePlugin.EXPECT().Name().AnyTimes().Return("foo")
},
@ -133,7 +133,7 @@ func Test_frameworkImpl_RunScorePlugins(t *testing.T) {
{
name: "Test normalize score error",
mockFunc: func(mockScorePlugin *frameworktesting.MockScorePlugin, mockScoreExtension *frameworktesting.MockScoreExtensions) {
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
mockScorePlugin.EXPECT().Score(gomock.Any(), gomock.Any(), gomock.Any()).
Return(int64(60), framework.NewResult(framework.Success))
mockScorePlugin.EXPECT().ScoreExtensions().Times(2).Return(mockScoreExtension)
mockScorePlugin.EXPECT().Name().AnyTimes().Return("foo")
@ -160,7 +160,7 @@ func Test_frameworkImpl_RunScorePlugins(t *testing.T) {
}
tt.mockFunc(mockScorePlugin, mockScoreExtension)
_, result := frameWork.RunScorePlugins(ctx, nil, nil, clusters)
_, result := frameWork.RunScorePlugins(ctx, nil, clusters)
if result.IsSuccess() != tt.isSuccess {
t.Errorf("want %v, but get:%v", tt.isSuccess, result.IsSuccess())
}

View File

@ -10,7 +10,6 @@ import (
gomock "github.com/golang/mock/gomock"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
v1alpha10 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
v1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
framework "github.com/karmada-io/karmada/pkg/scheduler/framework"
)
@ -39,32 +38,32 @@ func (m *MockFramework) EXPECT() *MockFrameworkMockRecorder {
}
// RunFilterPlugins mocks base method.
func (m *MockFramework) RunFilterPlugins(ctx context.Context, placement *v1alpha10.Placement, bindingSpec *v1alpha2.ResourceBindingSpec, clusterv1alpha1 *v1alpha1.Cluster) *framework.Result {
func (m *MockFramework) RunFilterPlugins(ctx context.Context, bindingSpec *v1alpha2.ResourceBindingSpec, clusterv1alpha1 *v1alpha1.Cluster) *framework.Result {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunFilterPlugins", ctx, placement, bindingSpec, clusterv1alpha1)
ret := m.ctrl.Call(m, "RunFilterPlugins", ctx, bindingSpec, clusterv1alpha1)
ret0, _ := ret[0].(*framework.Result)
return ret0
}
// RunFilterPlugins indicates an expected call of RunFilterPlugins.
func (mr *MockFrameworkMockRecorder) RunFilterPlugins(ctx, placement, bindingSpec, clusterv1alpha1 interface{}) *gomock.Call {
func (mr *MockFrameworkMockRecorder) RunFilterPlugins(ctx, bindingSpec, clusterv1alpha1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunFilterPlugins", reflect.TypeOf((*MockFramework)(nil).RunFilterPlugins), ctx, placement, bindingSpec, clusterv1alpha1)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunFilterPlugins", reflect.TypeOf((*MockFramework)(nil).RunFilterPlugins), ctx, bindingSpec, clusterv1alpha1)
}
// RunScorePlugins mocks base method.
func (m *MockFramework) RunScorePlugins(ctx context.Context, placement *v1alpha10.Placement, spec *v1alpha2.ResourceBindingSpec, clusters []*v1alpha1.Cluster) (framework.PluginToClusterScores, *framework.Result) {
func (m *MockFramework) RunScorePlugins(ctx context.Context, spec *v1alpha2.ResourceBindingSpec, clusters []*v1alpha1.Cluster) (framework.PluginToClusterScores, *framework.Result) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunScorePlugins", ctx, placement, spec, clusters)
ret := m.ctrl.Call(m, "RunScorePlugins", ctx, spec, clusters)
ret0, _ := ret[0].(framework.PluginToClusterScores)
ret1, _ := ret[1].(*framework.Result)
return ret0, ret1
}
// RunScorePlugins indicates an expected call of RunScorePlugins.
func (mr *MockFrameworkMockRecorder) RunScorePlugins(ctx, placement, spec, clusters interface{}) *gomock.Call {
func (mr *MockFrameworkMockRecorder) RunScorePlugins(ctx, spec, clusters interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunScorePlugins", reflect.TypeOf((*MockFramework)(nil).RunScorePlugins), ctx, placement, spec, clusters)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunScorePlugins", reflect.TypeOf((*MockFramework)(nil).RunScorePlugins), ctx, spec, clusters)
}
// MockPlugin is a mock of Plugin interface.
@ -128,17 +127,17 @@ func (m *MockFilterPlugin) EXPECT() *MockFilterPluginMockRecorder {
}
// Filter mocks base method.
func (m *MockFilterPlugin) Filter(ctx context.Context, placement *v1alpha10.Placement, bindingSpec *v1alpha2.ResourceBindingSpec, clusterv1alpha1 *v1alpha1.Cluster) *framework.Result {
func (m *MockFilterPlugin) Filter(ctx context.Context, bindingSpec *v1alpha2.ResourceBindingSpec, clusterv1alpha1 *v1alpha1.Cluster) *framework.Result {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Filter", ctx, placement, bindingSpec, clusterv1alpha1)
ret := m.ctrl.Call(m, "Filter", ctx, bindingSpec, clusterv1alpha1)
ret0, _ := ret[0].(*framework.Result)
return ret0
}
// Filter indicates an expected call of Filter.
func (mr *MockFilterPluginMockRecorder) Filter(ctx, placement, bindingSpec, clusterv1alpha1 interface{}) *gomock.Call {
func (mr *MockFilterPluginMockRecorder) Filter(ctx, bindingSpec, clusterv1alpha1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Filter", reflect.TypeOf((*MockFilterPlugin)(nil).Filter), ctx, placement, bindingSpec, clusterv1alpha1)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Filter", reflect.TypeOf((*MockFilterPlugin)(nil).Filter), ctx, bindingSpec, clusterv1alpha1)
}
// Name mocks base method.
@ -193,18 +192,18 @@ func (mr *MockScorePluginMockRecorder) Name() *gomock.Call {
}
// Score mocks base method.
func (m *MockScorePlugin) Score(ctx context.Context, placement *v1alpha10.Placement, spec *v1alpha2.ResourceBindingSpec, cluster *v1alpha1.Cluster) (int64, *framework.Result) {
func (m *MockScorePlugin) Score(ctx context.Context, spec *v1alpha2.ResourceBindingSpec, cluster *v1alpha1.Cluster) (int64, *framework.Result) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Score", ctx, placement, spec, cluster)
ret := m.ctrl.Call(m, "Score", ctx, spec, cluster)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(*framework.Result)
return ret0, ret1
}
// Score indicates an expected call of Score.
func (mr *MockScorePluginMockRecorder) Score(ctx, placement, spec, cluster interface{}) *gomock.Call {
func (mr *MockScorePluginMockRecorder) Score(ctx, spec, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Score", reflect.TypeOf((*MockScorePlugin)(nil).Score), ctx, placement, spec, cluster)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Score", reflect.TypeOf((*MockScorePlugin)(nil).Score), ctx, spec, cluster)
}
// ScoreExtensions mocks base method.