Moving ResourceBinding to work.karmada.io group

Signed-off-by: RainbowMango <renhongcai@huawei.com>
This commit is contained in:
RainbowMango 2021-03-06 14:43:55 +08:00 committed by Hongcai Ren
parent 0b22238888
commit 30bda2b55b
8 changed files with 57 additions and 54 deletions

View File

@ -46,7 +46,7 @@ type ResourceBindingController struct {
func (c *ResourceBindingController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling ResourceBinding %s.", req.NamespacedName.String())
binding := &v1alpha1.ResourceBinding{}
binding := &workv1alpha1.ResourceBinding{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, binding); err != nil {
// The resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
@ -72,12 +72,12 @@ func (c *ResourceBindingController) Reconcile(req controllerruntime.Request) (co
}
// isBindingReady will check if propagationBinding is ready to build Work.
func (c *ResourceBindingController) isBindingReady(binding *v1alpha1.ResourceBinding) bool {
func (c *ResourceBindingController) isBindingReady(binding *workv1alpha1.ResourceBinding) bool {
return len(binding.Spec.Clusters) != 0
}
// syncBinding will sync propagationBinding to Works.
func (c *ResourceBindingController) syncBinding(binding *v1alpha1.ResourceBinding) (controllerruntime.Result, error) {
func (c *ResourceBindingController) syncBinding(binding *workv1alpha1.ResourceBinding) (controllerruntime.Result, error) {
clusterNames := c.getBindingClusterNames(binding)
works, err := c.findOrphanWorks(binding.Namespace, binding.Name, clusterNames)
if err != nil {
@ -143,11 +143,11 @@ func (c *ResourceBindingController) findOrphanWorks(bindingNamespace string, bin
// SetupWithManager creates a controller and register to controller manager.
func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.ResourceBinding{}).Complete(c)
return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.ResourceBinding{}).Complete(c)
}
// getBindingClusterNames will get clusterName list from bind clusters field
func (c *ResourceBindingController) getBindingClusterNames(binding *v1alpha1.ResourceBinding) []string {
func (c *ResourceBindingController) getBindingClusterNames(binding *workv1alpha1.ResourceBinding) []string {
var clusterNames []string
for _, targetCluster := range binding.Spec.Clusters {
clusterNames = append(clusterNames, targetCluster.Name)
@ -167,7 +167,7 @@ func (c *ResourceBindingController) removeIrrelevantField(workload *unstructured
}
// transformBindingToWorks will transform propagationBinding to Works
func (c *ResourceBindingController) transformBindingToWorks(binding *v1alpha1.ResourceBinding, clusterNames []string) error {
func (c *ResourceBindingController) transformBindingToWorks(binding *workv1alpha1.ResourceBinding, clusterNames []string) error {
dynamicResource, err := restmapper.GetGroupVersionResource(c.RESTMapper,
schema.FromAPIVersionAndKind(binding.Spec.Resource.APIVersion, binding.Spec.Resource.Kind))
if err != nil {
@ -192,7 +192,7 @@ func (c *ResourceBindingController) transformBindingToWorks(binding *v1alpha1.Re
// ensureWork ensure Work to be created or updated
func (c *ResourceBindingController) ensureWork(workload *unstructured.Unstructured, clusterNames []string,
binding *v1alpha1.ResourceBinding) error {
binding *workv1alpha1.ResourceBinding) error {
c.removeIrrelevantField(workload)
for _, clusterName := range clusterNames {

View File

@ -17,7 +17,7 @@ import (
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/pkg/util/restmapper"
@ -133,7 +133,7 @@ func (c *HorizontalPodAutoscalerController) getTargetPlacement(objRef autoscalin
return nil, err
}
bindingName := names.GenerateBindingName(unstructuredWorkLoad.GetNamespace(), unstructuredWorkLoad.GetKind(), unstructuredWorkLoad.GetName())
binding := &v1alpha1.ResourceBinding{}
binding := &workv1alpha1.ResourceBinding{}
namespacedName := types.NamespacedName{
Namespace: namespace,
Name: bindingName,

View File

@ -17,6 +17,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/pkg/util/restmapper"
@ -111,7 +112,7 @@ func (c *Controller) fetchWorkloads(policy *v1alpha1.PropagationPolicy) ([]*unst
}
// deleteResourceBinding will delete ResourceBinding.
func (c *Controller) deleteResourceBinding(binding v1alpha1.ResourceBinding) error {
func (c *Controller) deleteResourceBinding(binding workv1alpha1.ResourceBinding) error {
err := c.Client.Delete(context.TODO(), &binding)
if err != nil && errors.IsNotFound(err) {
klog.Infof("ResourceBinding %s/%s is already not exist.", binding.GetNamespace(), binding.GetName())
@ -126,18 +127,18 @@ func (c *Controller) deleteResourceBinding(binding v1alpha1.ResourceBinding) err
// calculateResourceBindings will get orphanBindings and workloads that need to update or create.
func (c *Controller) calculateResourceBindings(policy *v1alpha1.PropagationPolicy,
workloads []*unstructured.Unstructured) ([]v1alpha1.ResourceBinding, []*unstructured.Unstructured, error) {
workloads []*unstructured.Unstructured) ([]workv1alpha1.ResourceBinding, []*unstructured.Unstructured, error) {
selector := labels.SelectorFromSet(labels.Set{
util.PropagationPolicyNamespaceLabel: policy.Namespace,
util.PropagationPolicyNameLabel: policy.Name,
})
bindingList := &v1alpha1.ResourceBindingList{}
bindingList := &workv1alpha1.ResourceBindingList{}
if err := c.Client.List(context.TODO(), bindingList, &client.ListOptions{LabelSelector: selector}); err != nil {
klog.Errorf("Failed to list ResourceBinding in namespace %s", policy.GetNamespace())
return nil, nil, err
}
var orphanBindings []v1alpha1.ResourceBinding
var orphanBindings []workv1alpha1.ResourceBinding
for _, binding := range bindingList.Items {
isFind := false
for _, workload := range workloads {
@ -296,7 +297,7 @@ func (c *Controller) fetchWorkload(resourceSelector v1alpha1.ResourceSelector) (
// ensureResourceBinding will ensure ResourceBindings are created or updated.
func (c *Controller) ensureResourceBinding(policy *v1alpha1.PropagationPolicy, workload *unstructured.Unstructured) error {
bindingName := names.GenerateBindingName(workload.GetNamespace(), workload.GetKind(), workload.GetName())
binding := &v1alpha1.ResourceBinding{
binding := &workv1alpha1.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: bindingName,
Namespace: policy.GetNamespace(),
@ -308,8 +309,8 @@ func (c *Controller) ensureResourceBinding(policy *v1alpha1.PropagationPolicy, w
util.PropagationPolicyNameLabel: policy.GetName(),
},
},
Spec: v1alpha1.ResourceBindingSpec{
Resource: v1alpha1.ObjectReference{
Spec: workv1alpha1.ResourceBindingSpec{
Resource: workv1alpha1.ObjectReference{
APIVersion: workload.GetAPIVersion(),
Kind: workload.GetKind(),
Namespace: workload.GetNamespace(),

View File

@ -6,8 +6,9 @@ import (
"k8s.io/klog/v2"
clusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
lister "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
"github.com/karmada-io/karmada/pkg/scheduler/cache"
"github.com/karmada-io/karmada/pkg/scheduler/framework"
@ -17,7 +18,7 @@ import (
// ScheduleAlgorithm is the interface that should be implemented to schedule a resource to the target clusters.
type ScheduleAlgorithm interface {
Schedule(context.Context, *v1alpha1.ResourceBinding) (scheduleResult ScheduleResult, err error)
Schedule(context.Context, *workv1alpha1.ResourceBinding) (scheduleResult ScheduleResult, err error)
}
// ScheduleResult includes the clusters selected.
@ -45,7 +46,7 @@ func NewGenericScheduler(
}
}
func (g *genericScheduler) Schedule(ctx context.Context, binding *v1alpha1.ResourceBinding) (result ScheduleResult, err error) {
func (g *genericScheduler) Schedule(ctx context.Context, binding *workv1alpha1.ResourceBinding) (result ScheduleResult, err error) {
klog.V(4).Infof("Scheduling %s/%s", binding.Namespace, binding.Name)
clusterInfoSnapshot := g.schedulerCache.Snapshot()
@ -87,9 +88,9 @@ func (g *genericScheduler) Schedule(ctx context.Context, binding *v1alpha1.Resou
func (g *genericScheduler) findClustersThatFit(
ctx context.Context,
fwk framework.Framework,
placement *v1alpha1.Placement,
clusterInfo *cache.Snapshot) ([]*clusterapi.Cluster, error) {
var out []*clusterapi.Cluster
placement *policyv1alpha1.Placement,
clusterInfo *cache.Snapshot) ([]*clusterv1alpha1.Cluster, error) {
var out []*clusterv1alpha1.Cluster
clusters := clusterInfo.GetReadyClusters()
for _, c := range clusters {
resMap := fwk.RunFilterPlugins(ctx, placement, c.Cluster())
@ -108,8 +109,8 @@ func (g *genericScheduler) findClustersThatFit(
func (g *genericScheduler) prioritizeClusters(
ctx context.Context,
fwk framework.Framework,
placement *v1alpha1.Placement,
clusters []*clusterapi.Cluster) (result framework.ClusterScoreList, err error) {
placement *policyv1alpha1.Placement,
clusters []*clusterv1alpha1.Cluster) (result framework.ClusterScoreList, err error) {
scoresMap, err := fwk.RunScorePlugins(ctx, placement, clusters)
if err != nil {
return result, err
@ -126,7 +127,7 @@ func (g *genericScheduler) prioritizeClusters(
return result, nil
}
func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreList, spreadConstraints []v1alpha1.SpreadConstraint, clusters []*clusterapi.Cluster) []string {
func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreList, spreadConstraints []policyv1alpha1.SpreadConstraint, clusters []*clusterv1alpha1.Cluster) []string {
if len(spreadConstraints) != 0 {
return g.matchSpreadConstraints(clusters, spreadConstraints)
}
@ -138,23 +139,23 @@ func (g *genericScheduler) selectClusters(clustersScore framework.ClusterScoreLi
return out
}
func (g *genericScheduler) matchSpreadConstraints(clusters []*clusterapi.Cluster, spreadConstraints []v1alpha1.SpreadConstraint) []string {
func (g *genericScheduler) matchSpreadConstraints(clusters []*clusterv1alpha1.Cluster, spreadConstraints []policyv1alpha1.SpreadConstraint) []string {
state := util.NewSpreadGroup()
g.runSpreadConstraintsFilter(clusters, spreadConstraints, state)
return g.calSpreadResult(state)
}
// Now support spread by cluster. More rules will be implemented later.
func (g *genericScheduler) runSpreadConstraintsFilter(clusters []*clusterapi.Cluster, spreadConstraints []v1alpha1.SpreadConstraint, spreadGroup *util.SpreadGroup) {
func (g *genericScheduler) runSpreadConstraintsFilter(clusters []*clusterv1alpha1.Cluster, spreadConstraints []policyv1alpha1.SpreadConstraint, spreadGroup *util.SpreadGroup) {
for _, spreadConstraint := range spreadConstraints {
spreadGroup.InitialGroupRecord(spreadConstraint)
if spreadConstraint.SpreadByField == v1alpha1.SpreadByFieldCluster {
if spreadConstraint.SpreadByField == policyv1alpha1.SpreadByFieldCluster {
g.groupByFieldCluster(clusters, spreadConstraint, spreadGroup)
}
}
}
func (g *genericScheduler) groupByFieldCluster(clusters []*clusterapi.Cluster, spreadConstraint v1alpha1.SpreadConstraint, spreadGroup *util.SpreadGroup) {
func (g *genericScheduler) groupByFieldCluster(clusters []*clusterv1alpha1.Cluster, spreadConstraint policyv1alpha1.SpreadConstraint, spreadGroup *util.SpreadGroup) {
for _, cluster := range clusters {
clusterGroup := cluster.Name
spreadGroup.GroupRecord[spreadConstraint][clusterGroup] = append(spreadGroup.GroupRecord[spreadConstraint][clusterGroup], cluster.Name)
@ -173,7 +174,7 @@ func (g *genericScheduler) calSpreadResult(spreadGroup *util.SpreadGroup) []stri
func (g *genericScheduler) chooseSpreadGroup(spreadGroup *util.SpreadGroup) []string {
var feasibleClusters []string
for spreadConstraint, clusterGroups := range spreadGroup.GroupRecord {
if spreadConstraint.SpreadByField == v1alpha1.SpreadByFieldCluster {
if spreadConstraint.SpreadByField == policyv1alpha1.SpreadByFieldCluster {
if len(clusterGroups) < spreadConstraint.MinGroups {
return nil
}

View File

@ -18,8 +18,9 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
memclusterapi "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
lister "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
@ -118,7 +119,7 @@ func (s *Scheduler) Run(ctx context.Context) {
}
func (s *Scheduler) onResourceBindingAdd(obj interface{}) {
propagationBinding := obj.(*v1alpha1.ResourceBinding)
propagationBinding := obj.(*workv1alpha1.ResourceBinding)
if len(propagationBinding.Spec.Clusters) > 0 {
return
}
@ -136,8 +137,8 @@ func (s *Scheduler) onResourceBindingUpdate(old, cur interface{}) {
}
func (s *Scheduler) onPropagationPolicyUpdate(old, cur interface{}) {
oldPropagationPolicy := old.(*v1alpha1.PropagationPolicy)
curPropagationPolicy := cur.(*v1alpha1.PropagationPolicy)
oldPropagationPolicy := old.(*policyv1alpha1.PropagationPolicy)
curPropagationPolicy := cur.(*policyv1alpha1.PropagationPolicy)
if equality.Semantic.DeepEqual(oldPropagationPolicy.Spec.Placement, curPropagationPolicy.Spec.Placement) {
klog.V(2).Infof("Ignore PropagationPolicy(%s/%s) which placement unchanged.", oldPropagationPolicy.Namespace, oldPropagationPolicy.Name)
return
@ -191,22 +192,22 @@ func (s *Scheduler) scheduleOne(key string) (err error) {
if err != nil {
return err
}
propagationBinding, err := s.bindingLister.ResourceBindings(ns).Get(name)
resourceBinding, err := s.bindingLister.ResourceBindings(ns).Get(name)
if errors.IsNotFound(err) {
return nil
}
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), propagationBinding)
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), resourceBinding)
if err != nil {
klog.V(2).Infof("failed scheduling ResourceBinding %s: %v", key, err)
return err
}
klog.V(4).Infof("ResourceBinding %s scheduled to clusters %v", key, scheduleResult.SuggestedClusters)
binding := propagationBinding.DeepCopy()
targetClusters := make([]v1alpha1.TargetCluster, len(scheduleResult.SuggestedClusters))
binding := resourceBinding.DeepCopy()
targetClusters := make([]workv1alpha1.TargetCluster, len(scheduleResult.SuggestedClusters))
for i, cluster := range scheduleResult.SuggestedClusters {
targetClusters[i] = v1alpha1.TargetCluster{Name: cluster}
targetClusters[i] = workv1alpha1.TargetCluster{Name: cluster}
}
binding.Spec.Clusters = targetClusters
@ -252,7 +253,7 @@ func (s *Scheduler) handleErr(err error, key interface{}) {
}
func (s *Scheduler) addCluster(obj interface{}) {
cluster, ok := obj.(*memclusterapi.Cluster)
cluster, ok := obj.(*clusterv1alpha1.Cluster)
if !ok {
klog.Errorf("cannot convert to Cluster: %v", obj)
return
@ -263,7 +264,7 @@ func (s *Scheduler) addCluster(obj interface{}) {
}
func (s *Scheduler) updateCluster(_, newObj interface{}) {
newCluster, ok := newObj.(*memclusterapi.Cluster)
newCluster, ok := newObj.(*clusterv1alpha1.Cluster)
if !ok {
klog.Errorf("cannot convert newObj to Cluster: %v", newObj)
return
@ -273,19 +274,19 @@ func (s *Scheduler) updateCluster(_, newObj interface{}) {
}
func (s *Scheduler) deleteCluster(obj interface{}) {
var cluster *memclusterapi.Cluster
var cluster *clusterv1alpha1.Cluster
switch t := obj.(type) {
case *memclusterapi.Cluster:
case *clusterv1alpha1.Cluster:
cluster = t
case cache.DeletedFinalStateUnknown:
var ok bool
cluster, ok = t.Obj.(*memclusterapi.Cluster)
cluster, ok = t.Obj.(*clusterv1alpha1.Cluster)
if !ok {
klog.Errorf("cannot convert to memclusterapi.Cluster: %v", t.Obj)
klog.Errorf("cannot convert to clusterv1alpha1.Cluster: %v", t.Obj)
return
}
default:
klog.Errorf("cannot convert to memclusterapi.Cluster: %v", t)
klog.Errorf("cannot convert to clusterv1alpha1.Cluster: %v", t)
return
}
klog.V(3).Infof("delete event for cluster %s", cluster.Name)

View File

@ -9,12 +9,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
)
// GetBindingClusterNames will get clusterName list from bind clusters field
func GetBindingClusterNames(binding *policyv1alpha1.ResourceBinding) []string {
func GetBindingClusterNames(binding *workv1alpha1.ResourceBinding) []string {
var clusterNames []string
for _, targetCluster := range binding.Spec.Clusters {
clusterNames = append(clusterNames, targetCluster.Name)

View File

@ -21,6 +21,7 @@ import (
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/informermanager"
"github.com/karmada-io/karmada/pkg/util/names"
@ -273,9 +274,9 @@ func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructure
}
// BuildResourceBinding builds a desired ResourceBinding for object.
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey ClusterWideKey, policy *policyv1alpha1.PropagationPolicy) *policyv1alpha1.ResourceBinding {
func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, objectKey ClusterWideKey, policy *policyv1alpha1.PropagationPolicy) *workv1alpha1.ResourceBinding {
bindingName := names.GenerateBindingName(object.GetNamespace(), object.GetKind(), object.GetName())
propagationBinding := &policyv1alpha1.ResourceBinding{
propagationBinding := &workv1alpha1.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: bindingName,
Namespace: object.GetNamespace(),
@ -287,8 +288,8 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
util.PropagationPolicyNameLabel: policy.GetName(),
},
},
Spec: policyv1alpha1.ResourceBindingSpec{
Resource: policyv1alpha1.ObjectReference{
Spec: workv1alpha1.ResourceBindingSpec{
Resource: workv1alpha1.ObjectReference{
APIVersion: object.GetAPIVersion(),
Kind: object.GetKind(),
Namespace: object.GetNamespace(),