adopt binding v1alpha2
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
This commit is contained in:
parent
a362e07125
commit
56fbbe1054
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/overridemanager"
|
||||
|
@ -42,13 +43,13 @@ type ResourceBindingController struct {
|
|||
func (c *ResourceBindingController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
|
||||
klog.V(4).Infof("Reconciling ResourceBinding %s.", req.NamespacedName.String())
|
||||
|
||||
binding := &workv1alpha1.ResourceBinding{}
|
||||
binding := &workv1alpha2.ResourceBinding{}
|
||||
if err := c.Client.Get(context.TODO(), req.NamespacedName, binding); err != nil {
|
||||
// The resource no longer exist, clean up derived Work objects.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return helper.DeleteWorks(c.Client, labels.Set{
|
||||
workv1alpha1.ResourceBindingNamespaceLabel: req.Namespace,
|
||||
workv1alpha1.ResourceBindingNameLabel: req.Name,
|
||||
workv1alpha2.ResourceBindingNamespaceLabel: req.Namespace,
|
||||
workv1alpha2.ResourceBindingNameLabel: req.Name,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -69,7 +70,7 @@ func (c *ResourceBindingController) Reconcile(ctx context.Context, req controlle
|
|||
}
|
||||
|
||||
// syncBinding will sync resourceBinding to Works.
|
||||
func (c *ResourceBindingController) syncBinding(binding *workv1alpha1.ResourceBinding) (controllerruntime.Result, error) {
|
||||
func (c *ResourceBindingController) syncBinding(binding *workv1alpha2.ResourceBinding) (controllerruntime.Result, error) {
|
||||
clusterNames := helper.GetBindingClusterNames(binding.Spec.Clusters)
|
||||
works, err := helper.FindOrphanWorks(c.Client, binding.Namespace, binding.Name, clusterNames, apiextensionsv1.NamespaceScoped)
|
||||
if err != nil {
|
||||
|
@ -117,8 +118,8 @@ func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manag
|
|||
var requests []reconcile.Request
|
||||
|
||||
labels := a.GetLabels()
|
||||
resourcebindingNamespace, namespaceExist := labels[workv1alpha1.ResourceBindingNamespaceLabel]
|
||||
resourcebindingName, nameExist := labels[workv1alpha1.ResourceBindingNameLabel]
|
||||
resourcebindingNamespace, namespaceExist := labels[workv1alpha2.ResourceBindingNamespaceLabel]
|
||||
resourcebindingName, nameExist := labels[workv1alpha2.ResourceBindingNameLabel]
|
||||
if !namespaceExist || !nameExist {
|
||||
return nil
|
||||
}
|
||||
|
@ -131,7 +132,7 @@ func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manag
|
|||
return requests
|
||||
})
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.ResourceBinding{}).
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha2.ResourceBinding{}).
|
||||
Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn).
|
||||
Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
|
||||
Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
|
||||
|
@ -151,7 +152,7 @@ func (c *ResourceBindingController) newOverridePolicyFunc() handler.MapFunc {
|
|||
return nil
|
||||
}
|
||||
|
||||
bindingList := &workv1alpha1.ResourceBindingList{}
|
||||
bindingList := &workv1alpha2.ResourceBindingList{}
|
||||
if err := c.Client.List(context.TODO(), bindingList); err != nil {
|
||||
klog.Errorf("Failed to list resourceBindings, error: %v", err)
|
||||
return nil
|
||||
|
@ -180,7 +181,7 @@ func (c *ResourceBindingController) newOverridePolicyFunc() handler.MapFunc {
|
|||
func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc {
|
||||
return func(a client.Object) []reconcile.Request {
|
||||
rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors
|
||||
bindingList := &workv1alpha1.ResourceBindingList{}
|
||||
bindingList := &workv1alpha2.ResourceBindingList{}
|
||||
if err := c.Client.List(context.TODO(), bindingList); err != nil {
|
||||
klog.Errorf("Failed to list resourceBindings, error: %v", err)
|
||||
return nil
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/overridemanager"
|
||||
|
@ -42,12 +43,12 @@ type ClusterResourceBindingController struct {
|
|||
func (c *ClusterResourceBindingController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
|
||||
klog.V(4).Infof("Reconciling ClusterResourceBinding %s.", req.NamespacedName.String())
|
||||
|
||||
clusterResourceBinding := &workv1alpha1.ClusterResourceBinding{}
|
||||
clusterResourceBinding := &workv1alpha2.ClusterResourceBinding{}
|
||||
if err := c.Client.Get(context.TODO(), req.NamespacedName, clusterResourceBinding); err != nil {
|
||||
// The resource no longer exist, clean up derived Work objects.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return helper.DeleteWorks(c.Client, labels.Set{
|
||||
workv1alpha1.ClusterResourceBindingLabel: req.Name,
|
||||
workv1alpha2.ClusterResourceBindingLabel: req.Name,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -68,7 +69,7 @@ func (c *ClusterResourceBindingController) Reconcile(ctx context.Context, req co
|
|||
}
|
||||
|
||||
// syncBinding will sync clusterResourceBinding to Works.
|
||||
func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha1.ClusterResourceBinding) (controllerruntime.Result, error) {
|
||||
func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha2.ClusterResourceBinding) (controllerruntime.Result, error) {
|
||||
clusterNames := helper.GetBindingClusterNames(binding.Spec.Clusters)
|
||||
works, err := helper.FindOrphanWorks(c.Client, "", binding.Name, clusterNames, apiextensionsv1.ClusterScoped)
|
||||
if err != nil {
|
||||
|
@ -111,7 +112,7 @@ func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntim
|
|||
var requests []reconcile.Request
|
||||
|
||||
labels := a.GetLabels()
|
||||
clusterResourcebindingName, nameExist := labels[workv1alpha1.ClusterResourceBindingLabel]
|
||||
clusterResourcebindingName, nameExist := labels[workv1alpha2.ClusterResourceBindingLabel]
|
||||
if !nameExist {
|
||||
return nil
|
||||
}
|
||||
|
@ -123,7 +124,7 @@ func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntim
|
|||
return requests
|
||||
})
|
||||
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.ClusterResourceBinding{}).
|
||||
return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha2.ClusterResourceBinding{}).
|
||||
Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn).
|
||||
Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
|
||||
Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
|
||||
|
@ -143,7 +144,7 @@ func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.MapFu
|
|||
return nil
|
||||
}
|
||||
|
||||
bindingList := &workv1alpha1.ClusterResourceBindingList{}
|
||||
bindingList := &workv1alpha2.ClusterResourceBindingList{}
|
||||
if err := c.Client.List(context.TODO(), bindingList); err != nil {
|
||||
klog.Errorf("Failed to list clusterResourceBindings, error: %v", err)
|
||||
return nil
|
||||
|
@ -172,7 +173,7 @@ func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.MapFu
|
|||
func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc {
|
||||
return func(a client.Object) []reconcile.Request {
|
||||
rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors
|
||||
bindingList := &workv1alpha1.ClusterResourceBindingList{}
|
||||
bindingList := &workv1alpha2.ClusterResourceBindingList{}
|
||||
if err := c.Client.List(context.TODO(), bindingList); err != nil {
|
||||
klog.Errorf("Failed to list clusterResourceBindings, error: %v", err)
|
||||
return nil
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
|
@ -56,13 +57,13 @@ var workPredicateFn = builder.WithPredicates(predicate.Funcs{
|
|||
|
||||
// ensureWork ensure Work to be created or updated.
|
||||
func ensureWork(c client.Client, workload *unstructured.Unstructured, overrideManager overridemanager.OverrideManager, binding metav1.Object, scope apiextensionsv1.ResourceScope) error {
|
||||
var targetClusters []workv1alpha1.TargetCluster
|
||||
var targetClusters []workv1alpha2.TargetCluster
|
||||
switch scope {
|
||||
case apiextensionsv1.NamespaceScoped:
|
||||
bindingObj := binding.(*workv1alpha1.ResourceBinding)
|
||||
bindingObj := binding.(*workv1alpha2.ResourceBinding)
|
||||
targetClusters = bindingObj.Spec.Clusters
|
||||
case apiextensionsv1.ClusterScoped:
|
||||
bindingObj := binding.(*workv1alpha1.ClusterResourceBinding)
|
||||
bindingObj := binding.(*workv1alpha2.ClusterResourceBinding)
|
||||
targetClusters = bindingObj.Spec.Clusters
|
||||
}
|
||||
|
||||
|
@ -117,7 +118,7 @@ func ensureWork(c client.Client, workload *unstructured.Unstructured, overrideMa
|
|||
return nil
|
||||
}
|
||||
|
||||
func getRSPAndReplicaInfos(c client.Client, workload *unstructured.Unstructured, targetClusters []workv1alpha1.TargetCluster) (bool, *policyv1alpha1.ReplicaSchedulingPolicy, map[string]int64, error) {
|
||||
func getRSPAndReplicaInfos(c client.Client, workload *unstructured.Unstructured, targetClusters []workv1alpha2.TargetCluster) (bool, *policyv1alpha1.ReplicaSchedulingPolicy, map[string]int64, error) {
|
||||
if helper.HasScheduledReplica(targetClusters) {
|
||||
return true, nil, transScheduleResultToMap(targetClusters), nil
|
||||
}
|
||||
|
@ -147,17 +148,17 @@ func applyReplicaSchedulingPolicy(workload *unstructured.Unstructured, desireRep
|
|||
|
||||
func mergeLabel(workload *unstructured.Unstructured, workNamespace string, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
|
||||
var workLabel = make(map[string]string)
|
||||
util.MergeLabel(workload, workv1alpha1.WorkNamespaceLabel, workNamespace)
|
||||
util.MergeLabel(workload, workv1alpha1.WorkNameLabel, names.GenerateWorkName(workload.GetKind(), workload.GetName(), workload.GetNamespace()))
|
||||
util.MergeLabel(workload, workv1alpha2.WorkNamespaceLabel, workNamespace)
|
||||
util.MergeLabel(workload, workv1alpha2.WorkNameLabel, names.GenerateWorkName(workload.GetKind(), workload.GetName(), workload.GetNamespace()))
|
||||
|
||||
if scope == apiextensionsv1.NamespaceScoped {
|
||||
util.MergeLabel(workload, workv1alpha1.ResourceBindingNamespaceLabel, binding.GetNamespace())
|
||||
util.MergeLabel(workload, workv1alpha1.ResourceBindingNameLabel, binding.GetName())
|
||||
workLabel[workv1alpha1.ResourceBindingNamespaceLabel] = binding.GetNamespace()
|
||||
workLabel[workv1alpha1.ResourceBindingNameLabel] = binding.GetName()
|
||||
util.MergeLabel(workload, workv1alpha2.ResourceBindingNamespaceLabel, binding.GetNamespace())
|
||||
util.MergeLabel(workload, workv1alpha2.ResourceBindingNameLabel, binding.GetName())
|
||||
workLabel[workv1alpha2.ResourceBindingNamespaceLabel] = binding.GetNamespace()
|
||||
workLabel[workv1alpha2.ResourceBindingNameLabel] = binding.GetName()
|
||||
} else {
|
||||
util.MergeLabel(workload, workv1alpha1.ClusterResourceBindingLabel, binding.GetName())
|
||||
workLabel[workv1alpha1.ClusterResourceBindingLabel] = binding.GetName()
|
||||
util.MergeLabel(workload, workv1alpha2.ClusterResourceBindingLabel, binding.GetName())
|
||||
workLabel[workv1alpha2.ClusterResourceBindingLabel] = binding.GetName()
|
||||
}
|
||||
|
||||
return workLabel
|
||||
|
@ -189,7 +190,7 @@ func recordAppliedOverrides(cops *overridemanager.AppliedOverrides, ops *overrid
|
|||
return annotations, nil
|
||||
}
|
||||
|
||||
func transScheduleResultToMap(scheduleResult []workv1alpha1.TargetCluster) map[string]int64 {
|
||||
func transScheduleResultToMap(scheduleResult []workv1alpha2.TargetCluster) map[string]int64 {
|
||||
var desireReplicaInfos = make(map[string]int64, len(scheduleResult))
|
||||
for _, clusterInfo := range scheduleResult {
|
||||
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
|
@ -98,8 +98,8 @@ func (c *HorizontalPodAutoscalerController) buildWorks(hpa *autoscalingv1.Horizo
|
|||
},
|
||||
}
|
||||
|
||||
util.MergeLabel(hpaObj, workv1alpha1.WorkNamespaceLabel, workNamespace)
|
||||
util.MergeLabel(hpaObj, workv1alpha1.WorkNameLabel, workName)
|
||||
util.MergeLabel(hpaObj, workv1alpha2.WorkNamespaceLabel, workNamespace)
|
||||
util.MergeLabel(hpaObj, workv1alpha2.WorkNameLabel, workName)
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c.Client, objectMeta, hpaObj); err != nil {
|
||||
return err
|
||||
|
@ -125,7 +125,7 @@ func (c *HorizontalPodAutoscalerController) getTargetPlacement(objRef autoscalin
|
|||
return nil, err
|
||||
}
|
||||
bindingName := names.GenerateBindingName(unstructuredWorkLoad.GetKind(), unstructuredWorkLoad.GetName())
|
||||
binding := &workv1alpha1.ResourceBinding{}
|
||||
binding := &workv1alpha2.ResourceBinding{}
|
||||
namespacedName := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: bindingName,
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/estimator/pb"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
)
|
||||
|
@ -34,13 +34,13 @@ func NewSchedulerEstimator(cache *SchedulerEstimatorCache, timeout time.Duration
|
|||
}
|
||||
|
||||
// MaxAvailableReplicas estimates the maximum replicas that can be applied to the target cluster by calling karmada-scheduler-estimator.
|
||||
func (se *SchedulerEstimator) MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha1.ReplicaRequirements) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (se *SchedulerEstimator) MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) ([]workv1alpha2.TargetCluster, error) {
|
||||
return getClusterReplicasConcurrently(clusters, se.timeout, func(ctx context.Context, cluster string) (int32, error) {
|
||||
return se.maxAvailableReplicas(ctx, cluster, replicaRequirements.DeepCopy())
|
||||
})
|
||||
}
|
||||
|
||||
func (se *SchedulerEstimator) maxAvailableReplicas(ctx context.Context, cluster string, replicaRequirements *workv1alpha1.ReplicaRequirements) (int32, error) {
|
||||
func (se *SchedulerEstimator) maxAvailableReplicas(ctx context.Context, cluster string, replicaRequirements *workv1alpha2.ReplicaRequirements) (int32, error) {
|
||||
client, err := se.cache.GetClient(cluster)
|
||||
if err != nil {
|
||||
return UnauthenticReplica, err
|
||||
|
@ -66,8 +66,8 @@ func (se *SchedulerEstimator) maxAvailableReplicas(ctx context.Context, cluster
|
|||
return res.MaxReplicas, nil
|
||||
}
|
||||
|
||||
func getClusterReplicasConcurrently(clusters []*clusterv1alpha1.Cluster, timeout time.Duration, getClusterReplicas getClusterReplicasFunc) ([]workv1alpha1.TargetCluster, error) {
|
||||
availableTargetClusters := make([]workv1alpha1.TargetCluster, len(clusters))
|
||||
func getClusterReplicasConcurrently(clusters []*clusterv1alpha1.Cluster, timeout time.Duration, getClusterReplicas getClusterReplicasFunc) ([]workv1alpha2.TargetCluster, error) {
|
||||
availableTargetClusters := make([]workv1alpha2.TargetCluster, len(clusters))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
|
@ -81,7 +81,7 @@ func getClusterReplicasConcurrently(clusters []*clusterv1alpha1.Cluster, timeout
|
|||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
availableTargetClusters[idx] = workv1alpha1.TargetCluster{Name: cluster, Replicas: replicas}
|
||||
availableTargetClusters[idx] = workv1alpha2.TargetCluster{Name: cluster, Replicas: replicas}
|
||||
}(i, clusters[i].Name)
|
||||
}
|
||||
wg.Wait()
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// GeneralEstimator is the default replica estimator.
|
||||
|
@ -23,16 +23,16 @@ func NewGeneralEstimator() *GeneralEstimator {
|
|||
}
|
||||
|
||||
// MaxAvailableReplicas estimates the maximum replicas that can be applied to the target cluster by cluster ResourceSummary.
|
||||
func (ge *GeneralEstimator) MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha1.ReplicaRequirements) ([]workv1alpha1.TargetCluster, error) {
|
||||
availableTargetClusters := make([]workv1alpha1.TargetCluster, len(clusters))
|
||||
func (ge *GeneralEstimator) MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) ([]workv1alpha2.TargetCluster, error) {
|
||||
availableTargetClusters := make([]workv1alpha2.TargetCluster, len(clusters))
|
||||
for i, cluster := range clusters {
|
||||
maxReplicas := ge.maxAvailableReplicas(cluster, replicaRequirements)
|
||||
availableTargetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name, Replicas: maxReplicas}
|
||||
availableTargetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name, Replicas: maxReplicas}
|
||||
}
|
||||
return availableTargetClusters, nil
|
||||
}
|
||||
|
||||
func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluster, replicaRequirements *workv1alpha1.ReplicaRequirements) int32 {
|
||||
func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) int32 {
|
||||
var maximumReplicas int64 = math.MaxInt32
|
||||
resourceSummary := cluster.Status.ResourceSummary
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package client
|
|||
|
||||
import (
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// UnauthenticReplica is special replica number returned by estimator in case of estimator can't calculate the available
|
||||
|
@ -16,7 +16,7 @@ var (
|
|||
|
||||
// ReplicaEstimator is an estimator which estimates the maximum replicas that can be applied to the target cluster.
|
||||
type ReplicaEstimator interface {
|
||||
MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha1.ReplicaRequirements) ([]workv1alpha1.TargetCluster, error)
|
||||
MaxAvailableReplicas(clusters []*clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) ([]workv1alpha2.TargetCluster, error)
|
||||
}
|
||||
|
||||
// GetReplicaEstimators returns all replica estimators.
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
estimatorclient "github.com/karmada-io/karmada/pkg/estimator/client"
|
||||
lister "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/cache"
|
||||
|
@ -23,14 +23,14 @@ import (
|
|||
|
||||
// ScheduleAlgorithm is the interface that should be implemented to schedule a resource to the target clusters.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(context.Context, *policyv1alpha1.Placement, *workv1alpha1.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
ScaleSchedule(context.Context, *policyv1alpha1.Placement, *workv1alpha1.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
FailoverSchedule(context.Context, *policyv1alpha1.Placement, *workv1alpha1.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
Schedule(context.Context, *policyv1alpha1.Placement, *workv1alpha2.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
ScaleSchedule(context.Context, *policyv1alpha1.Placement, *workv1alpha2.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
FailoverSchedule(context.Context, *policyv1alpha1.Placement, *workv1alpha2.ResourceBindingSpec) (scheduleResult ScheduleResult, err error)
|
||||
}
|
||||
|
||||
// ScheduleResult includes the clusters selected.
|
||||
type ScheduleResult struct {
|
||||
SuggestedClusters []workv1alpha1.TargetCluster
|
||||
SuggestedClusters []workv1alpha2.TargetCluster
|
||||
}
|
||||
|
||||
type genericScheduler struct {
|
||||
|
@ -53,7 +53,7 @@ func NewGenericScheduler(
|
|||
}
|
||||
}
|
||||
|
||||
func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha1.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
clusterInfoSnapshot := g.schedulerCache.Snapshot()
|
||||
if clusterInfoSnapshot.NumOfClusters() == 0 {
|
||||
return result, fmt.Errorf("no clusters available to schedule")
|
||||
|
@ -90,7 +90,7 @@ func (g *genericScheduler) findClustersThatFit(
|
|||
ctx context.Context,
|
||||
fwk framework.Framework,
|
||||
placement *policyv1alpha1.Placement,
|
||||
resource *workv1alpha1.ObjectReference,
|
||||
resource *workv1alpha2.ObjectReference,
|
||||
clusterInfo *cache.Snapshot) ([]*clusterv1alpha1.Cluster, error) {
|
||||
var out []*clusterv1alpha1.Cluster
|
||||
clusters := clusterInfo.GetReadyClusters()
|
||||
|
@ -199,16 +199,16 @@ func (g *genericScheduler) chooseSpreadGroup(spreadGroup *util.SpreadGroup) []*c
|
|||
return feasibleClusters
|
||||
}
|
||||
|
||||
func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, replicaSchedulingStrategy *policyv1alpha1.ReplicaSchedulingStrategy, object *workv1alpha1.ResourceBindingSpec) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, replicaSchedulingStrategy *policyv1alpha1.ReplicaSchedulingStrategy, object *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) {
|
||||
if len(clusters) == 0 {
|
||||
return nil, fmt.Errorf("no clusters available to schedule")
|
||||
}
|
||||
targetClusters := make([]workv1alpha1.TargetCluster, len(clusters))
|
||||
targetClusters := make([]workv1alpha2.TargetCluster, len(clusters))
|
||||
|
||||
if object.Replicas > 0 && replicaSchedulingStrategy != nil {
|
||||
if replicaSchedulingStrategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDuplicated {
|
||||
for i, cluster := range clusters {
|
||||
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name, Replicas: object.Replicas}
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name, Replicas: object.Replicas}
|
||||
}
|
||||
return targetClusters, nil
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ func (g *genericScheduler) assignReplicas(clusters []*clusterv1alpha1.Cluster, r
|
|||
}
|
||||
|
||||
for i, cluster := range clusters {
|
||||
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name}
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name}
|
||||
}
|
||||
return targetClusters, nil
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ func getDefaultWeightPreference(clusters []*clusterv1alpha1.Cluster) *policyv1al
|
|||
}
|
||||
|
||||
// divideReplicasByStaticWeight assigns a total number of replicas to the selected clusters by the weight list.
|
||||
func (g *genericScheduler) divideReplicasByStaticWeight(clusters []*clusterv1alpha1.Cluster, staticWeightList []policyv1alpha1.StaticClusterWeight, replicas int32) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (g *genericScheduler) divideReplicasByStaticWeight(clusters []*clusterv1alpha1.Cluster, staticWeightList []policyv1alpha1.StaticClusterWeight, replicas int32) ([]workv1alpha2.TargetCluster, error) {
|
||||
weightSum := int64(0)
|
||||
matchClusters := make(map[string]int64)
|
||||
desireReplicaInfos := make(map[string]int64)
|
||||
|
@ -298,24 +298,24 @@ func (g *genericScheduler) divideReplicasByStaticWeight(clusters []*clusterv1alp
|
|||
}
|
||||
}
|
||||
|
||||
targetClusters := make([]workv1alpha1.TargetCluster, len(desireReplicaInfos))
|
||||
targetClusters := make([]workv1alpha2.TargetCluster, len(desireReplicaInfos))
|
||||
i := 0
|
||||
for key, value := range desireReplicaInfos {
|
||||
targetClusters[i] = workv1alpha1.TargetCluster{Name: key, Replicas: int32(value)}
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: key, Replicas: int32(value)}
|
||||
i++
|
||||
}
|
||||
return targetClusters, nil
|
||||
}
|
||||
|
||||
// TargetClustersList is a slice of TargetCluster that implements sort.Interface to sort by Value.
|
||||
type TargetClustersList []workv1alpha1.TargetCluster
|
||||
type TargetClustersList []workv1alpha2.TargetCluster
|
||||
|
||||
func (a TargetClustersList) Len() int { return len(a) }
|
||||
func (a TargetClustersList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a TargetClustersList) Less(i, j int) bool { return a[i].Replicas > a[j].Replicas }
|
||||
|
||||
func (g *genericScheduler) divideReplicasAggregatedWithResource(clusters []*clusterv1alpha1.Cluster,
|
||||
spec *workv1alpha1.ResourceBindingSpec, preUsedClustersName ...string) ([]workv1alpha1.TargetCluster, error) {
|
||||
spec *workv1alpha2.ResourceBindingSpec, preUsedClustersName ...string) ([]workv1alpha2.TargetCluster, error) {
|
||||
// make sure preUsedClusters are in front of the unUsedClusters in the list of clusterAvailableReplicas
|
||||
// so that we can assign new replicas to them preferentially when scale up.
|
||||
// preUsedClusters have none items during first scheduler
|
||||
|
@ -326,8 +326,8 @@ func (g *genericScheduler) divideReplicasAggregatedWithResource(clusters []*clus
|
|||
return g.divideReplicasAggregatedWithClusterReplicas(clusterAvailableReplicas, spec.Replicas)
|
||||
}
|
||||
|
||||
func (g *genericScheduler) calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha1.ResourceBindingSpec) []workv1alpha1.TargetCluster {
|
||||
availableTargetClusters := make([]workv1alpha1.TargetCluster, len(clusters))
|
||||
func (g *genericScheduler) calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster {
|
||||
availableTargetClusters := make([]workv1alpha2.TargetCluster, len(clusters))
|
||||
|
||||
// Set the boundary.
|
||||
for i := range availableTargetClusters {
|
||||
|
@ -366,7 +366,7 @@ func (g *genericScheduler) calAvailableReplicas(clusters []*clusterv1alpha1.Clus
|
|||
return availableTargetClusters
|
||||
}
|
||||
|
||||
func (g *genericScheduler) divideReplicasAggregatedWithClusterReplicas(clusterAvailableReplicas []workv1alpha1.TargetCluster, replicas int32) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (g *genericScheduler) divideReplicasAggregatedWithClusterReplicas(clusterAvailableReplicas []workv1alpha2.TargetCluster, replicas int32) ([]workv1alpha2.TargetCluster, error) {
|
||||
clustersNum := 0
|
||||
clustersMaxReplicas := int32(0)
|
||||
for _, clusterInfo := range clusterAvailableReplicas {
|
||||
|
@ -401,23 +401,23 @@ func (g *genericScheduler) divideReplicasAggregatedWithClusterReplicas(clusterAv
|
|||
}
|
||||
}
|
||||
|
||||
targetClusters := make([]workv1alpha1.TargetCluster, len(clusterAvailableReplicas))
|
||||
targetClusters := make([]workv1alpha2.TargetCluster, len(clusterAvailableReplicas))
|
||||
i := 0
|
||||
for key, value := range desireReplicaInfos {
|
||||
targetClusters[i] = workv1alpha1.TargetCluster{Name: key, Replicas: value}
|
||||
targetClusters[i] = workv1alpha2.TargetCluster{Name: key, Replicas: value}
|
||||
i++
|
||||
}
|
||||
return targetClusters, nil
|
||||
}
|
||||
|
||||
func (g *genericScheduler) ScaleSchedule(ctx context.Context, placement *policyv1alpha1.Placement,
|
||||
spec *workv1alpha1.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
newTargetClusters := make([]workv1alpha1.TargetCluster, len(spec.Clusters))
|
||||
spec *workv1alpha2.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
newTargetClusters := make([]workv1alpha2.TargetCluster, len(spec.Clusters))
|
||||
|
||||
if spec.Replicas > 0 {
|
||||
if placement.ReplicaScheduling.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDuplicated {
|
||||
for i, cluster := range spec.Clusters {
|
||||
newTargetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name, Replicas: spec.Replicas}
|
||||
newTargetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name, Replicas: spec.Replicas}
|
||||
}
|
||||
result.SuggestedClusters = newTargetClusters
|
||||
return result, nil
|
||||
|
@ -446,13 +446,13 @@ func (g *genericScheduler) ScaleSchedule(ctx context.Context, placement *policyv
|
|||
}
|
||||
|
||||
for i, cluster := range spec.Clusters {
|
||||
newTargetClusters[i] = workv1alpha1.TargetCluster{Name: cluster.Name}
|
||||
newTargetClusters[i] = workv1alpha2.TargetCluster{Name: cluster.Name}
|
||||
}
|
||||
result.SuggestedClusters = newTargetClusters
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *genericScheduler) scaleScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha1.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
func (g *genericScheduler) scaleScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha2.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
assignedReplicas := util.GetSumOfReplicas(spec.Clusters)
|
||||
if assignedReplicas > spec.Replicas {
|
||||
newTargetClusters, err := g.scaleDownScheduleWithReplicaDivisionPreferenceAggregated(spec)
|
||||
|
@ -472,11 +472,11 @@ func (g *genericScheduler) scaleScheduleWithReplicaDivisionPreferenceAggregated(
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (g *genericScheduler) scaleDownScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha1.ResourceBindingSpec) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (g *genericScheduler) scaleDownScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) {
|
||||
return g.divideReplicasAggregatedWithClusterReplicas(spec.Clusters, spec.Replicas)
|
||||
}
|
||||
|
||||
func (g *genericScheduler) scaleUpScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha1.ResourceBindingSpec) ([]workv1alpha1.TargetCluster, error) {
|
||||
func (g *genericScheduler) scaleUpScheduleWithReplicaDivisionPreferenceAggregated(spec *workv1alpha2.ResourceBindingSpec) ([]workv1alpha2.TargetCluster, error) {
|
||||
// find the clusters that have old replicas so we can assign new replicas to them preferentially
|
||||
// targetMap map of the result for the old replicas so that it can be merged with the new result easily
|
||||
targetMap := make(map[string]int32)
|
||||
|
@ -508,12 +508,12 @@ func (g *genericScheduler) scaleUpScheduleWithReplicaDivisionPreferenceAggregate
|
|||
}
|
||||
}
|
||||
for key, value := range targetMap {
|
||||
result = append(result, workv1alpha1.TargetCluster{Name: key, Replicas: value})
|
||||
result = append(result, workv1alpha2.TargetCluster{Name: key, Replicas: value})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *genericScheduler) getPreSelected(targetClusters []workv1alpha1.TargetCluster) []*clusterv1alpha1.Cluster {
|
||||
func (g *genericScheduler) getPreSelected(targetClusters []workv1alpha2.TargetCluster) []*clusterv1alpha1.Cluster {
|
||||
var preSelectedClusters []*clusterv1alpha1.Cluster
|
||||
clusterInfoSnapshot := g.schedulerCache.Snapshot()
|
||||
for _, targetCluster := range targetClusters {
|
||||
|
@ -545,7 +545,7 @@ func (g *genericScheduler) getPreUsed(clusters []*clusterv1alpha1.Cluster, preUs
|
|||
}
|
||||
|
||||
func (g *genericScheduler) FailoverSchedule(ctx context.Context, placement *policyv1alpha1.Placement,
|
||||
spec *workv1alpha1.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
spec *workv1alpha2.ResourceBindingSpec) (result ScheduleResult, err error) {
|
||||
readyClusters := g.schedulerCache.Snapshot().GetReadyClusterNames()
|
||||
totalClusters := util.ConvertToClusterNames(spec.Clusters)
|
||||
|
||||
|
@ -588,9 +588,9 @@ func (g *genericScheduler) FailoverSchedule(ctx context.Context, placement *poli
|
|||
targetClusters.Insert(clusterList[i])
|
||||
}
|
||||
|
||||
var reScheduleResult []workv1alpha1.TargetCluster
|
||||
var reScheduleResult []workv1alpha2.TargetCluster
|
||||
for cluster := range targetClusters {
|
||||
reScheduleResult = append(reScheduleResult, workv1alpha1.TargetCluster{Name: cluster})
|
||||
reScheduleResult = append(reScheduleResult, workv1alpha2.TargetCluster{Name: cluster})
|
||||
}
|
||||
|
||||
return ScheduleResult{reScheduleResult}, nil
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// Framework manages the set of plugins in use by the scheduling framework.
|
||||
|
@ -16,7 +16,7 @@ type Framework interface {
|
|||
|
||||
// RunFilterPlugins runs the set of configured Filter plugins for resources on
|
||||
// the given cluster.
|
||||
RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, clusterv1alpha1 *clusterv1alpha1.Cluster) PluginToResult
|
||||
RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, clusterv1alpha1 *clusterv1alpha1.Cluster) PluginToResult
|
||||
|
||||
// RunScorePlugins runs the set of configured Score plugins, it returns a map of plugin name to cores
|
||||
RunScorePlugins(ctx context.Context, placement *policyv1alpha1.Placement, clusters []*clusterv1alpha1.Cluster) (PluginToClusterScores, error)
|
||||
|
@ -32,7 +32,7 @@ type Plugin interface {
|
|||
type FilterPlugin interface {
|
||||
Plugin
|
||||
// Filter is called by the scheduling framework.
|
||||
Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
|
||||
Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, clusterv1alpha1 *clusterv1alpha1.Cluster) *Result
|
||||
}
|
||||
|
||||
// Result indicates the result of running a plugin. It consists of a code, a
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
@ -34,7 +34,7 @@ func (p *APIInstalled) Name() string {
|
|||
}
|
||||
|
||||
// Filter checks if the API(CRD) of the resource is installed in the target cluster.
|
||||
func (p *APIInstalled) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
func (p *APIInstalled) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
if !helper.IsAPIEnabled(cluster.Status.APIEnablements, resource.APIVersion, resource.Kind) {
|
||||
klog.V(2).Infof("cluster(%s) not fit as missing API(%s, kind=%s)", cluster.Name, resource.APIVersion, resource.Kind)
|
||||
return framework.NewResult(framework.Unschedulable, "no such API resource")
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
)
|
||||
|
@ -32,7 +32,7 @@ func (p *ClusterAffinity) Name() string {
|
|||
}
|
||||
|
||||
// Filter checks if the cluster matched the placement cluster affinity constraint.
|
||||
func (p *ClusterAffinity) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
func (p *ClusterAffinity) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
affinity := placement.ClusterAffinity
|
||||
if affinity != nil {
|
||||
if util.ClusterMatches(cluster, *affinity) {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework"
|
||||
)
|
||||
|
||||
|
@ -35,7 +35,7 @@ func (p *TaintToleration) Name() string {
|
|||
}
|
||||
|
||||
// Filter checks if the given tolerations in placement tolerate cluster's taints.
|
||||
func (p *TaintToleration) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
func (p *TaintToleration) Filter(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, cluster *clusterv1alpha1.Cluster) *framework.Result {
|
||||
filterPredicate := func(t *corev1.Taint) bool {
|
||||
// now only interested in NoSchedule taint which means do not allow new resource to schedule onto the cluster unless they tolerate the taint
|
||||
// todo: supprot NoExecute taint
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework"
|
||||
plugins2 "github.com/karmada-io/karmada/pkg/scheduler/framework/plugins"
|
||||
)
|
||||
|
@ -48,7 +48,7 @@ func NewFramework(plugins []string) framework.Framework {
|
|||
|
||||
// RunFilterPlugins runs the set of configured Filter plugins for resources on the cluster.
|
||||
// If any of the result is not success, the cluster is not suited for the resource.
|
||||
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha1.ObjectReference, cluster *clusterv1alpha1.Cluster) framework.PluginToResult {
|
||||
func (frw *frameworkImpl) RunFilterPlugins(ctx context.Context, placement *policyv1alpha1.Placement, resource *workv1alpha2.ObjectReference, cluster *clusterv1alpha1.Cluster) framework.PluginToResult {
|
||||
result := make(framework.PluginToResult, len(frw.filterPlugins))
|
||||
for _, p := range frw.filterPlugins {
|
||||
pluginResult := p.Filter(ctx, placement, resource, cluster)
|
||||
|
|
|
@ -22,13 +22,13 @@ import (
|
|||
"github.com/karmada-io/karmada/cmd/scheduler/app/options"
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
estimatorclient "github.com/karmada-io/karmada/pkg/estimator/client"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1"
|
||||
policylister "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
|
||||
worklister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha1"
|
||||
worklister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2"
|
||||
schedulercache "github.com/karmada-io/karmada/pkg/scheduler/cache"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework/plugins/apiinstalled"
|
||||
|
@ -95,12 +95,12 @@ type Scheduler struct {
|
|||
// NewScheduler instantiates a scheduler
|
||||
func NewScheduler(dynamicClient dynamic.Interface, karmadaClient karmadaclientset.Interface, kubeClient kubernetes.Interface, opts *options.Options) *Scheduler {
|
||||
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
|
||||
bindingInformer := factory.Work().V1alpha1().ResourceBindings().Informer()
|
||||
bindingLister := factory.Work().V1alpha1().ResourceBindings().Lister()
|
||||
bindingInformer := factory.Work().V1alpha2().ResourceBindings().Informer()
|
||||
bindingLister := factory.Work().V1alpha2().ResourceBindings().Lister()
|
||||
policyInformer := factory.Policy().V1alpha1().PropagationPolicies().Informer()
|
||||
policyLister := factory.Policy().V1alpha1().PropagationPolicies().Lister()
|
||||
clusterBindingInformer := factory.Work().V1alpha1().ClusterResourceBindings().Informer()
|
||||
clusterBindingLister := factory.Work().V1alpha1().ClusterResourceBindings().Lister()
|
||||
clusterBindingInformer := factory.Work().V1alpha2().ClusterResourceBindings().Informer()
|
||||
clusterBindingLister := factory.Work().V1alpha2().ClusterResourceBindings().Lister()
|
||||
clusterPolicyInformer := factory.Policy().V1alpha1().ClusterPropagationPolicies().Informer()
|
||||
clusterPolicyLister := factory.Policy().V1alpha1().ClusterPropagationPolicies().Lister()
|
||||
clusterLister := factory.Cluster().V1alpha1().Clusters().Lister()
|
||||
|
@ -289,7 +289,7 @@ func (s *Scheduler) requeueClusterResourceBindings(selector labels.Selector) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) getPlacement(resourceBinding *workv1alpha1.ResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
func (s *Scheduler) getPlacement(resourceBinding *workv1alpha2.ResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
var placement policyv1alpha1.Placement
|
||||
var clusterPolicyName string
|
||||
var policyName string
|
||||
|
@ -335,7 +335,7 @@ func (s *Scheduler) getPlacement(resourceBinding *workv1alpha1.ResourceBinding)
|
|||
return placement, string(placementBytes), nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) getClusterPlacement(crb *workv1alpha1.ClusterResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
func (s *Scheduler) getClusterPlacement(crb *workv1alpha2.ClusterResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
var placement policyv1alpha1.Placement
|
||||
policyName := util.GetLabelValue(crb.Labels, policyv1alpha1.ClusterPropagationPolicyLabel)
|
||||
|
||||
|
@ -432,7 +432,7 @@ func (s *Scheduler) scheduleOne(key string) (err error) {
|
|||
return s.scheduleResourceBinding(resourceBinding)
|
||||
}
|
||||
|
||||
func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha1.ResourceBinding) (err error) {
|
||||
func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding) (err error) {
|
||||
placement, placementStr, err := s.getPlacement(resourceBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -453,14 +453,14 @@ func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha1.Resour
|
|||
}
|
||||
binding.Annotations[util.PolicyPlacementAnnotation] = placementStr
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv1alpha1.ClusterResourceBinding, policy *policyv1alpha1.ClusterPropagationPolicy) (err error) {
|
||||
func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding, policy *policyv1alpha1.ClusterPropagationPolicy) (err error) {
|
||||
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), &policy.Spec.Placement, &clusterResourceBinding.Spec)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("failed scheduling ClusterResourceBinding %s: %v", clusterResourceBinding.Name, err)
|
||||
|
@ -482,7 +482,7 @@ func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv
|
|||
}
|
||||
binding.Annotations[util.PolicyPlacementAnnotation] = string(placement)
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -633,7 +633,7 @@ func (s *Scheduler) rescheduleOne(key string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) rescheduleClusterResourceBinding(clusterResourceBinding *workv1alpha1.ClusterResourceBinding) error {
|
||||
func (s *Scheduler) rescheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding) error {
|
||||
policyName := util.GetLabelValue(clusterResourceBinding.Labels, policyv1alpha1.ClusterPropagationPolicyLabel)
|
||||
policy, err := s.clusterPolicyLister.Get(policyName)
|
||||
if err != nil {
|
||||
|
@ -652,14 +652,14 @@ func (s *Scheduler) rescheduleClusterResourceBinding(clusterResourceBinding *wor
|
|||
clusterResourceBinding.Spec.Clusters = reScheduleResult.SuggestedClusters
|
||||
klog.Infof("The final binding.Spec.Cluster values are: %v\n", clusterResourceBinding.Spec.Clusters)
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ClusterResourceBindings().Update(context.TODO(), clusterResourceBinding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), clusterResourceBinding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) rescheduleResourceBinding(resourceBinding *workv1alpha1.ResourceBinding) error {
|
||||
func (s *Scheduler) rescheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding) error {
|
||||
placement, _, err := s.getPlacement(resourceBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get placement by resourceBinding(%s/%s): Error: %v", resourceBinding.Namespace, resourceBinding.Name, err)
|
||||
|
@ -677,7 +677,7 @@ func (s *Scheduler) rescheduleResourceBinding(resourceBinding *workv1alpha1.Reso
|
|||
resourceBinding.Spec.Clusters = reScheduleResult.SuggestedClusters
|
||||
klog.Infof("The final binding.Spec.Cluster values are: %v\n", resourceBinding.Spec.Clusters)
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ResourceBindings(resourceBinding.Namespace).Update(context.TODO(), resourceBinding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(resourceBinding.Namespace).Update(context.TODO(), resourceBinding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -717,7 +717,7 @@ func (s *Scheduler) scaleScheduleOne(key string) (err error) {
|
|||
return s.scaleScheduleResourceBinding(resourceBinding)
|
||||
}
|
||||
|
||||
func (s *Scheduler) scaleScheduleResourceBinding(resourceBinding *workv1alpha1.ResourceBinding) (err error) {
|
||||
func (s *Scheduler) scaleScheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding) (err error) {
|
||||
placement, placementStr, err := s.getPlacement(resourceBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -739,14 +739,14 @@ func (s *Scheduler) scaleScheduleResourceBinding(resourceBinding *workv1alpha1.R
|
|||
}
|
||||
binding.Annotations[util.PolicyPlacementAnnotation] = placementStr
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ResourceBindings(binding.Namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) scaleScheduleClusterResourceBinding(clusterResourceBinding *workv1alpha1.ClusterResourceBinding,
|
||||
func (s *Scheduler) scaleScheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding,
|
||||
policy *policyv1alpha1.ClusterPropagationPolicy) (err error) {
|
||||
scheduleResult, err := s.Algorithm.ScaleSchedule(context.TODO(), &policy.Spec.Placement, &clusterResourceBinding.Spec)
|
||||
if err != nil {
|
||||
|
@ -770,7 +770,7 @@ func (s *Scheduler) scaleScheduleClusterResourceBinding(clusterResourceBinding *
|
|||
}
|
||||
binding.Annotations[util.PolicyPlacementAnnotation] = string(placement)
|
||||
|
||||
_, err = s.KarmadaClient.WorkV1alpha1().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
_, err = s.KarmadaClient.WorkV1alpha2().ClusterResourceBindings().Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -838,7 +838,7 @@ func (s *Scheduler) getTypeFromClusterResourceBindings(name string) ScheduleType
|
|||
return FailoverSchedule
|
||||
}
|
||||
|
||||
func (s *Scheduler) allClustersInReadyState(tcs []workv1alpha1.TargetCluster) bool {
|
||||
func (s *Scheduler) allClustersInReadyState(tcs []workv1alpha2.TargetCluster) bool {
|
||||
clusters := s.schedulerCache.Snapshot().GetClusters()
|
||||
for i := range tcs {
|
||||
for _, c := range clusters {
|
||||
|
|
|
@ -4,11 +4,11 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// GetBindingClusterNames will get clusterName list from bind clusters field
|
||||
func GetBindingClusterNames(binding *workv1alpha1.ResourceBinding) []string {
|
||||
func GetBindingClusterNames(binding *workv1alpha2.ResourceBinding) []string {
|
||||
var clusterNames []string
|
||||
for _, targetCluster := range binding.Spec.Clusters {
|
||||
clusterNames = append(clusterNames, targetCluster.Name)
|
||||
|
@ -17,7 +17,7 @@ func GetBindingClusterNames(binding *workv1alpha1.ResourceBinding) []string {
|
|||
}
|
||||
|
||||
// IsBindingReplicasChanged will check if the sum of replicas is different from the replicas of object
|
||||
func IsBindingReplicasChanged(bindingSpec *workv1alpha1.ResourceBindingSpec, strategy *policyv1alpha1.ReplicaSchedulingStrategy) bool {
|
||||
func IsBindingReplicasChanged(bindingSpec *workv1alpha2.ResourceBindingSpec, strategy *policyv1alpha1.ReplicaSchedulingStrategy) bool {
|
||||
if strategy == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func IsBindingReplicasChanged(bindingSpec *workv1alpha1.ResourceBindingSpec, str
|
|||
}
|
||||
|
||||
// GetSumOfReplicas will get the sum of replicas in target clusters
|
||||
func GetSumOfReplicas(clusters []workv1alpha1.TargetCluster) int32 {
|
||||
func GetSumOfReplicas(clusters []workv1alpha2.TargetCluster) int32 {
|
||||
replicasSum := int32(0)
|
||||
for i := range clusters {
|
||||
replicasSum += clusters[i].Replicas
|
||||
|
@ -49,7 +49,7 @@ func GetSumOfReplicas(clusters []workv1alpha1.TargetCluster) int32 {
|
|||
}
|
||||
|
||||
// ConvertToClusterNames will convert a cluster slice to clusterName's sets.String
|
||||
func ConvertToClusterNames(clusters []workv1alpha1.TargetCluster) sets.String {
|
||||
func ConvertToClusterNames(clusters []workv1alpha2.TargetCluster) sets.String {
|
||||
clusterNames := sets.NewString()
|
||||
for _, cluster := range clusters {
|
||||
clusterNames.Insert(cluster.Name)
|
||||
|
|
|
@ -16,11 +16,11 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// AggregateDeploymentStatus summarize deployment status and update to original objects.
|
||||
func (d *ResourceDetector) AggregateDeploymentStatus(objRef workv1alpha1.ObjectReference, status []workv1alpha1.AggregatedStatusItem) error {
|
||||
func (d *ResourceDetector) AggregateDeploymentStatus(objRef workv1alpha2.ObjectReference, status []workv1alpha2.AggregatedStatusItem) error {
|
||||
if objRef.APIVersion != "apps/v1" {
|
||||
return nil
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func (d *ResourceDetector) AggregateDeploymentStatus(objRef workv1alpha1.ObjectR
|
|||
}
|
||||
|
||||
// AggregateServiceStatus summarize service status and update to original objects.
|
||||
func (d *ResourceDetector) AggregateServiceStatus(objRef workv1alpha1.ObjectReference, status []workv1alpha1.AggregatedStatusItem) error {
|
||||
func (d *ResourceDetector) AggregateServiceStatus(objRef workv1alpha2.ObjectReference, status []workv1alpha2.AggregatedStatusItem) error {
|
||||
if objRef.APIVersion != "v1" {
|
||||
return nil
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func (d *ResourceDetector) AggregateServiceStatus(objRef workv1alpha1.ObjectRefe
|
|||
}
|
||||
|
||||
// AggregateIngressStatus summarize ingress status and update to original objects.
|
||||
func (d *ResourceDetector) AggregateIngressStatus(objRef workv1alpha1.ObjectReference, status []workv1alpha1.AggregatedStatusItem) error {
|
||||
func (d *ResourceDetector) AggregateIngressStatus(objRef workv1alpha2.ObjectReference, status []workv1alpha2.AggregatedStatusItem) error {
|
||||
if objRef.APIVersion != "extensions/v1beta1" {
|
||||
return nil
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ func (d *ResourceDetector) AggregateIngressStatus(objRef workv1alpha1.ObjectRefe
|
|||
}
|
||||
|
||||
// AggregateJobStatus summarize job status and update to original objects.
|
||||
func (d *ResourceDetector) AggregateJobStatus(objRef workv1alpha1.ObjectReference, status []workv1alpha1.AggregatedStatusItem, clusters []workv1alpha1.TargetCluster) error {
|
||||
func (d *ResourceDetector) AggregateJobStatus(objRef workv1alpha2.ObjectReference, status []workv1alpha2.AggregatedStatusItem, clusters []workv1alpha2.TargetCluster) error {
|
||||
if objRef.APIVersion != "batch/v1" {
|
||||
return nil
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func (d *ResourceDetector) getJobFinishedStatus(jobStatus *batchv1.JobStatus) (b
|
|||
}
|
||||
|
||||
// parsingJobStatus generates new status of given 'AggregatedStatusItem'.
|
||||
func (d *ResourceDetector) parsingJobStatus(obj *batchv1.Job, status []workv1alpha1.AggregatedStatusItem, clusters []workv1alpha1.TargetCluster) (*batchv1.JobStatus, error) {
|
||||
func (d *ResourceDetector) parsingJobStatus(obj *batchv1.Job, status []workv1alpha2.AggregatedStatusItem, clusters []workv1alpha2.TargetCluster) (*batchv1.JobStatus, error) {
|
||||
var jobFailed []string
|
||||
successfulJobs := 0
|
||||
newStatus := &batchv1.JobStatus{}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/informermanager"
|
||||
|
@ -112,8 +112,8 @@ func (d *ResourceDetector) Start(ctx context.Context) error {
|
|||
|
||||
// watch and enqueue ResourceBinding changes.
|
||||
resourceBindingGVR := schema.GroupVersionResource{
|
||||
Group: workv1alpha1.GroupVersion.Group,
|
||||
Version: workv1alpha1.GroupVersion.Version,
|
||||
Group: workv1alpha2.GroupVersion.Group,
|
||||
Version: workv1alpha2.GroupVersion.Version,
|
||||
Resource: "resourcebindings",
|
||||
}
|
||||
bindingHandler := informermanager.NewHandlerOnEvents(d.OnResourceBindingAdd, d.OnResourceBindingUpdate, nil)
|
||||
|
@ -122,8 +122,8 @@ func (d *ResourceDetector) Start(ctx context.Context) error {
|
|||
|
||||
// watch and enqueue ClusterResourceBinding changes.
|
||||
clusterResourceBindingGVR := schema.GroupVersionResource{
|
||||
Group: workv1alpha1.GroupVersion.Group,
|
||||
Version: workv1alpha1.GroupVersion.Version,
|
||||
Group: workv1alpha2.GroupVersion.Group,
|
||||
Version: workv1alpha2.GroupVersion.Version,
|
||||
Resource: "clusterresourcebindings",
|
||||
}
|
||||
clusterBindingHandler := informermanager.NewHandlerOnEvents(d.OnClusterResourceBindingAdd, d.OnClusterResourceBindingUpdate, nil)
|
||||
|
@ -612,13 +612,13 @@ func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unst
|
|||
}
|
||||
|
||||
// BuildResourceBinding builds a desired ResourceBinding for object.
|
||||
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string) (*workv1alpha1.ResourceBinding, error) {
|
||||
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string) (*workv1alpha2.ResourceBinding, error) {
|
||||
bindingName := names.GenerateBindingName(object.GetKind(), object.GetName())
|
||||
replicaRequirements, replicas, err := d.GetReplicaDeclaration(object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
propagationBinding := &workv1alpha1.ResourceBinding{
|
||||
propagationBinding := &workv1alpha2.ResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: bindingName,
|
||||
Namespace: object.GetNamespace(),
|
||||
|
@ -627,8 +627,8 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
},
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: workv1alpha1.ResourceBindingSpec{
|
||||
Resource: workv1alpha1.ObjectReference{
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
Namespace: object.GetNamespace(),
|
||||
|
@ -644,13 +644,13 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
}
|
||||
|
||||
// BuildClusterResourceBinding builds a desired ClusterResourceBinding for object.
|
||||
func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string) (*workv1alpha1.ClusterResourceBinding, error) {
|
||||
func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, objectKey keys.ClusterWideKey, labels map[string]string) (*workv1alpha2.ClusterResourceBinding, error) {
|
||||
bindingName := names.GenerateBindingName(object.GetKind(), object.GetName())
|
||||
replicaRequirements, replicas, err := d.GetReplicaDeclaration(object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binding := &workv1alpha1.ClusterResourceBinding{
|
||||
binding := &workv1alpha2.ClusterResourceBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: bindingName,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
|
@ -658,8 +658,8 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst
|
|||
},
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: workv1alpha1.ResourceBindingSpec{
|
||||
Resource: workv1alpha1.ObjectReference{
|
||||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
Name: object.GetName(),
|
||||
|
@ -674,7 +674,7 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst
|
|||
}
|
||||
|
||||
// GetReplicaDeclaration get the replicas and resource requirements of a Deployment object
|
||||
func (d *ResourceDetector) GetReplicaDeclaration(object *unstructured.Unstructured) (*workv1alpha1.ReplicaRequirements, int32, error) {
|
||||
func (d *ResourceDetector) GetReplicaDeclaration(object *unstructured.Unstructured) (*workv1alpha2.ReplicaRequirements, int32, error) {
|
||||
if object.GetKind() == util.DeploymentKind {
|
||||
replicas, ok, err := unstructured.NestedInt64(object.Object, util.SpecField, util.ReplicasField)
|
||||
if !ok || err != nil {
|
||||
|
@ -693,14 +693,14 @@ func (d *ResourceDetector) GetReplicaDeclaration(object *unstructured.Unstructur
|
|||
return nil, 0, nil
|
||||
}
|
||||
|
||||
func (d *ResourceDetector) getReplicaRequirements(object map[string]interface{}) (*workv1alpha1.ReplicaRequirements, error) {
|
||||
func (d *ResourceDetector) getReplicaRequirements(object map[string]interface{}) (*workv1alpha2.ReplicaRequirements, error) {
|
||||
var podTemplateSpec *corev1.PodTemplateSpec
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object, &podTemplateSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := util.EmptyResource().AddPodRequest(&podTemplateSpec.Spec)
|
||||
replicaRequirements := &workv1alpha1.ReplicaRequirements{
|
||||
replicaRequirements := &workv1alpha2.ReplicaRequirements{
|
||||
NodeClaim: helper.GenerateNodeClaimByPodSpec(&podTemplateSpec.Spec),
|
||||
ResourceRequest: res.ResourceList(),
|
||||
}
|
||||
|
@ -1061,7 +1061,7 @@ func (d *ResourceDetector) OnClusterResourceBindingUpdate(oldObj, newObj interfa
|
|||
}
|
||||
|
||||
// CleanupLabels removes labels from object referencing by objRef.
|
||||
func (d *ResourceDetector) CleanupLabels(objRef workv1alpha1.ObjectReference, labels ...string) error {
|
||||
func (d *ResourceDetector) CleanupLabels(objRef workv1alpha2.ObjectReference, labels ...string) error {
|
||||
workload, err := helper.FetchWorkload(d.DynamicClient, d.RESTMapper, objRef)
|
||||
if err != nil {
|
||||
// do nothing if resource template not exist, it might has been removed.
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
)
|
||||
|
@ -54,12 +55,12 @@ func SortClusterByWeight(m map[string]int64) ClusterWeightInfoList {
|
|||
}
|
||||
|
||||
// IsBindingReady will check if resourceBinding/clusterResourceBinding is ready to build Work.
|
||||
func IsBindingReady(targetClusters []workv1alpha1.TargetCluster) bool {
|
||||
func IsBindingReady(targetClusters []workv1alpha2.TargetCluster) bool {
|
||||
return len(targetClusters) != 0
|
||||
}
|
||||
|
||||
// HasScheduledReplica checks if the scheduler has assigned replicas for each cluster.
|
||||
func HasScheduledReplica(scheduleResult []workv1alpha1.TargetCluster) bool {
|
||||
func HasScheduledReplica(scheduleResult []workv1alpha2.TargetCluster) bool {
|
||||
for _, clusterResult := range scheduleResult {
|
||||
if clusterResult.Replicas > 0 {
|
||||
return true
|
||||
|
@ -69,7 +70,7 @@ func HasScheduledReplica(scheduleResult []workv1alpha1.TargetCluster) bool {
|
|||
}
|
||||
|
||||
// GetBindingClusterNames will get clusterName list from bind clusters field
|
||||
func GetBindingClusterNames(targetClusters []workv1alpha1.TargetCluster) []string {
|
||||
func GetBindingClusterNames(targetClusters []workv1alpha2.TargetCluster) []string {
|
||||
var clusterNames []string
|
||||
for _, targetCluster := range targetClusters {
|
||||
clusterNames = append(clusterNames, targetCluster.Name)
|
||||
|
@ -83,8 +84,8 @@ func FindOrphanWorks(c client.Client, bindingNamespace, bindingName string, clus
|
|||
workList := &workv1alpha1.WorkList{}
|
||||
if scope == apiextensionsv1.NamespaceScoped {
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
workv1alpha1.ResourceBindingNamespaceLabel: bindingNamespace,
|
||||
workv1alpha1.ResourceBindingNameLabel: bindingName,
|
||||
workv1alpha2.ResourceBindingNamespaceLabel: bindingNamespace,
|
||||
workv1alpha2.ResourceBindingNameLabel: bindingName,
|
||||
})
|
||||
|
||||
if err := c.List(context.TODO(), workList, &client.ListOptions{LabelSelector: selector}); err != nil {
|
||||
|
@ -92,7 +93,7 @@ func FindOrphanWorks(c client.Client, bindingNamespace, bindingName string, clus
|
|||
}
|
||||
} else {
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
workv1alpha1.ClusterResourceBindingLabel: bindingName,
|
||||
workv1alpha2.ClusterResourceBindingLabel: bindingName,
|
||||
})
|
||||
|
||||
if err := c.List(context.TODO(), workList, &client.ListOptions{LabelSelector: selector}); err != nil {
|
||||
|
@ -129,7 +130,7 @@ func RemoveOrphanWorks(c client.Client, works []workv1alpha1.Work) error {
|
|||
}
|
||||
|
||||
// FetchWorkload fetches the kubernetes resource to be propagated.
|
||||
func FetchWorkload(dynamicClient dynamic.Interface, restMapper meta.RESTMapper, resource workv1alpha1.ObjectReference) (*unstructured.Unstructured, error) {
|
||||
func FetchWorkload(dynamicClient dynamic.Interface, restMapper meta.RESTMapper, resource workv1alpha2.ObjectReference) (*unstructured.Unstructured, error) {
|
||||
dynamicResource, err := restmapper.GetGroupVersionResource(restMapper,
|
||||
schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind))
|
||||
if err != nil {
|
||||
|
@ -150,16 +151,16 @@ func FetchWorkload(dynamicClient dynamic.Interface, restMapper meta.RESTMapper,
|
|||
}
|
||||
|
||||
// GetClusterResourceBindings returns a ClusterResourceBindingList by labels.
|
||||
func GetClusterResourceBindings(c client.Client, ls labels.Set) (*workv1alpha1.ClusterResourceBindingList, error) {
|
||||
bindings := &workv1alpha1.ClusterResourceBindingList{}
|
||||
func GetClusterResourceBindings(c client.Client, ls labels.Set) (*workv1alpha2.ClusterResourceBindingList, error) {
|
||||
bindings := &workv1alpha2.ClusterResourceBindingList{}
|
||||
listOpt := &client.ListOptions{LabelSelector: labels.SelectorFromSet(ls)}
|
||||
|
||||
return bindings, c.List(context.TODO(), bindings, listOpt)
|
||||
}
|
||||
|
||||
// GetResourceBindings returns a ResourceBindingList by labels
|
||||
func GetResourceBindings(c client.Client, ls labels.Set) (*workv1alpha1.ResourceBindingList, error) {
|
||||
bindings := &workv1alpha1.ResourceBindingList{}
|
||||
func GetResourceBindings(c client.Client, ls labels.Set) (*workv1alpha2.ResourceBindingList, error) {
|
||||
bindings := &workv1alpha2.ResourceBindingList{}
|
||||
listOpt := &client.ListOptions{LabelSelector: labels.SelectorFromSet(ls)}
|
||||
|
||||
return bindings, c.List(context.TODO(), bindings, listOpt)
|
||||
|
@ -197,8 +198,8 @@ func DeleteWorks(c client.Client, selector labels.Set) (controllerruntime.Result
|
|||
}
|
||||
|
||||
// GenerateNodeClaimByPodSpec will return a NodeClaim from PodSpec.
|
||||
func GenerateNodeClaimByPodSpec(podSpec *corev1.PodSpec) *workv1alpha1.NodeClaim {
|
||||
nodeClaim := &workv1alpha1.NodeClaim{
|
||||
func GenerateNodeClaimByPodSpec(podSpec *corev1.PodSpec) *workv1alpha2.NodeClaim {
|
||||
nodeClaim := &workv1alpha2.NodeClaim{
|
||||
NodeSelector: podSpec.NodeSelector,
|
||||
Tolerations: podSpec.Tolerations,
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
)
|
||||
|
||||
// ConvertToPropagationPolicy converts a PropagationPolicy object from unstructured to typed.
|
||||
|
@ -32,8 +32,8 @@ func ConvertToClusterPropagationPolicy(obj *unstructured.Unstructured) (*policyv
|
|||
}
|
||||
|
||||
// ConvertToResourceBinding converts a ResourceBinding object from unstructured to typed.
|
||||
func ConvertToResourceBinding(obj *unstructured.Unstructured) (*workv1alpha1.ResourceBinding, error) {
|
||||
typedObj := &workv1alpha1.ResourceBinding{}
|
||||
func ConvertToResourceBinding(obj *unstructured.Unstructured) (*workv1alpha2.ResourceBinding, error) {
|
||||
typedObj := &workv1alpha2.ResourceBinding{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), typedObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,15 +15,16 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
// AggregateResourceBindingWorkStatus will collect all work statuses with current ResourceBinding objects,
|
||||
// then aggregate status info to current ResourceBinding status.
|
||||
func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha1.ResourceBinding, workload *unstructured.Unstructured) error {
|
||||
func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.ResourceBinding, workload *unstructured.Unstructured) error {
|
||||
aggregatedStatuses, err := assembleWorkStatus(c, labels.SelectorFromSet(labels.Set{
|
||||
workv1alpha1.ResourceBindingNamespaceLabel: binding.Namespace,
|
||||
workv1alpha1.ResourceBindingNameLabel: binding.Name,
|
||||
workv1alpha2.ResourceBindingNamespaceLabel: binding.Namespace,
|
||||
workv1alpha2.ResourceBindingNameLabel: binding.Name,
|
||||
}), workload)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -45,9 +46,9 @@ func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha1.R
|
|||
|
||||
// AggregateClusterResourceBindingWorkStatus will collect all work statuses with current ClusterResourceBinding objects,
|
||||
// then aggregate status info to current ClusterResourceBinding status.
|
||||
func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1alpha1.ClusterResourceBinding, workload *unstructured.Unstructured) error {
|
||||
func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.ClusterResourceBinding, workload *unstructured.Unstructured) error {
|
||||
aggregatedStatuses, err := assembleWorkStatus(c, labels.SelectorFromSet(labels.Set{
|
||||
workv1alpha1.ClusterResourceBindingLabel: binding.Name,
|
||||
workv1alpha2.ClusterResourceBindingLabel: binding.Name,
|
||||
}), workload)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -68,13 +69,13 @@ func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1a
|
|||
}
|
||||
|
||||
// assemble workStatuses from workList which list by selector and match with workload.
|
||||
func assembleWorkStatus(c client.Client, selector labels.Selector, workload *unstructured.Unstructured) ([]workv1alpha1.AggregatedStatusItem, error) {
|
||||
func assembleWorkStatus(c client.Client, selector labels.Selector, workload *unstructured.Unstructured) ([]workv1alpha2.AggregatedStatusItem, error) {
|
||||
workList := &workv1alpha1.WorkList{}
|
||||
if err := c.List(context.TODO(), workList, &client.ListOptions{LabelSelector: selector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statuses := make([]workv1alpha1.AggregatedStatusItem, 0)
|
||||
statuses := make([]workv1alpha2.AggregatedStatusItem, 0)
|
||||
for _, work := range workList.Items {
|
||||
identifierIndex, err := GetManifestIndex(work.Spec.Workload.Manifests, workload)
|
||||
if err != nil {
|
||||
|
@ -104,7 +105,7 @@ func assembleWorkStatus(c client.Client, selector labels.Selector, workload *uns
|
|||
}
|
||||
}
|
||||
if !applied {
|
||||
aggregatedStatus := workv1alpha1.AggregatedStatusItem{
|
||||
aggregatedStatus := workv1alpha2.AggregatedStatusItem{
|
||||
ClusterName: clusterName,
|
||||
Applied: applied,
|
||||
AppliedMessage: appliedMsg,
|
||||
|
@ -119,7 +120,7 @@ func assembleWorkStatus(c client.Client, selector labels.Selector, workload *uns
|
|||
return nil, err
|
||||
}
|
||||
if equal {
|
||||
aggregatedStatus := workv1alpha1.AggregatedStatusItem{
|
||||
aggregatedStatus := workv1alpha2.AggregatedStatusItem{
|
||||
ClusterName: clusterName,
|
||||
Status: manifestStatus.Status,
|
||||
Applied: applied,
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
|
@ -209,7 +209,7 @@ func recoverCluster(c client.Client, clusterName string, originalAPIEndpoint str
|
|||
// get the target cluster names from binding information
|
||||
func getTargetClusterNames(deployment *appsv1.Deployment) (targetClusterNames []string, err error) {
|
||||
bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name)
|
||||
binding := &workv1alpha1.ResourceBinding{}
|
||||
binding := &workv1alpha2.ResourceBinding{}
|
||||
|
||||
err = wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
|
||||
err = controlPlaneClient.Get(context.TODO(), client.ObjectKey{Namespace: deployment.Namespace, Name: bindingName}, binding)
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
|
@ -248,7 +248,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
ginkgo.By("collect the target clusters in cluster resource binding", func() {
|
||||
bindingName := names.GenerateBindingName(crd.Kind, crd.Name)
|
||||
fmt.Printf("crd kind is %s, name is %s\n", crd.Kind, crd.Name)
|
||||
binding := &workv1alpha1.ClusterResourceBinding{}
|
||||
binding := &workv1alpha2.ClusterResourceBinding{}
|
||||
|
||||
fmt.Printf("MaxGroups= %v, MinGroups= %v\n", maxGroups, minGroups)
|
||||
gomega.Eventually(func() int {
|
||||
|
|
Loading…
Reference in New Issue