diff --git a/pkg/controllers/binding/binding_controller.go b/pkg/controllers/binding/binding_controller.go index 857df3bbf..c3890ea7a 100644 --- a/pkg/controllers/binding/binding_controller.go +++ b/pkg/controllers/binding/binding_controller.go @@ -19,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/names" "github.com/karmada-io/karmada/pkg/util/overridemanager" @@ -102,7 +103,7 @@ func (c *ResourceBindingController) syncBinding(binding *v1alpha1.ResourceBindin } // removeOrphanBindings will remove orphan works. -func (c *ResourceBindingController) removeOrphanWorks(works []v1alpha1.Work) error { +func (c *ResourceBindingController) removeOrphanWorks(works []workv1alpha1.Work) error { for _, work := range works { err := c.Client.Delete(context.TODO(), &work) if err != nil { @@ -114,17 +115,17 @@ func (c *ResourceBindingController) removeOrphanWorks(works []v1alpha1.Work) err } // findOrphanWorks will find orphan works that don't match current propagationBinding clusters. -func (c *ResourceBindingController) findOrphanWorks(bindingNamespace string, bindingName string, clusterNames []string) ([]v1alpha1.Work, error) { +func (c *ResourceBindingController) findOrphanWorks(bindingNamespace string, bindingName string, clusterNames []string) ([]workv1alpha1.Work, error) { selector := labels.SelectorFromSet(labels.Set{ util.ResourceBindingNamespaceLabel: bindingNamespace, util.ResourceBindingNameLabel: bindingName, }) - workList := &v1alpha1.WorkList{} + workList := &workv1alpha1.WorkList{} if err := c.Client.List(context.TODO(), workList, &client.ListOptions{LabelSelector: selector}); err != nil { return nil, err } - var orphanWorks []v1alpha1.Work + var orphanWorks []workv1alpha1.Work expectClusters := sets.NewString(clusterNames...) for _, work := range workList.Items { workTargetCluster, err := names.GetClusterName(work.GetNamespace()) @@ -222,7 +223,7 @@ func (c *ResourceBindingController) ensureWork(workload *unstructured.Unstructur return err } - work := &v1alpha1.Work{ + work := &workv1alpha1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, Namespace: workNamespace, @@ -235,9 +236,9 @@ func (c *ResourceBindingController) ensureWork(workload *unstructured.Unstructur util.ResourceBindingNameLabel: binding.Name, }, }, - Spec: v1alpha1.WorkSpec{ - Workload: v1alpha1.WorkloadTemplate{ - Manifests: []v1alpha1.Manifest{ + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ { RawExtension: runtime.RawExtension{ Raw: workloadJSON, diff --git a/pkg/controllers/execution/execution_controller.go b/pkg/controllers/execution/execution_controller.go index 3fdb83a6e..0505f893b 100644 --- a/pkg/controllers/execution/execution_controller.go +++ b/pkg/controllers/execution/execution_controller.go @@ -17,7 +17,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" - policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/names" "github.com/karmada-io/karmada/pkg/util/objectwatcher" @@ -44,7 +44,7 @@ type Controller struct { func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String()) - work := &policyv1alpha1.Work{} + work := &workv1alpha1.Work{} if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil { // The resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { @@ -72,12 +72,12 @@ func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime // SetupWithManager creates a controller and register to controller manager. func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). - For(&policyv1alpha1.Work{}). + For(&workv1alpha1.Work{}). WithEventFilter(predicate.GenerationChangedPredicate{}). Complete(c) } -func (c *Controller) syncWork(work *policyv1alpha1.Work) (controllerruntime.Result, error) { +func (c *Controller) syncWork(work *workv1alpha1.Work) (controllerruntime.Result, error) { err := c.dispatchWork(work) if err != nil { klog.Errorf("Failed to dispatch work %q, namespace is %v, err is %v", work.Name, work.Namespace, err) @@ -88,9 +88,9 @@ func (c *Controller) syncWork(work *policyv1alpha1.Work) (controllerruntime.Resu } // isResourceApplied checking weather resource has been dispatched to member cluster or not -func (c *Controller) isResourceApplied(workStatus *policyv1alpha1.WorkStatus) bool { +func (c *Controller) isResourceApplied(workStatus *workv1alpha1.WorkStatus) bool { for _, condition := range workStatus.Conditions { - if condition.Type == policyv1alpha1.WorkApplied { + if condition.Type == workv1alpha1.WorkApplied { if condition.Status == metav1.ConditionTrue { return true } @@ -101,7 +101,7 @@ func (c *Controller) isResourceApplied(workStatus *policyv1alpha1.WorkStatus) bo // tryDeleteWorkload tries to delete resource in the given member cluster. // Abort deleting when the member cluster is unready, otherwise we can't unjoin the member cluster when the member cluster is unready -func (c *Controller) tryDeleteWorkload(work *policyv1alpha1.Work) error { +func (c *Controller) tryDeleteWorkload(work *workv1alpha1.Work) error { clusterName, err := names.GetClusterName(work.Namespace) if err != nil { klog.Errorf("Failed to get member cluster name for work %s/%s", work.Namespace, work.Name) @@ -138,7 +138,7 @@ func (c *Controller) tryDeleteWorkload(work *policyv1alpha1.Work) error { return nil } -func (c *Controller) dispatchWork(work *policyv1alpha1.Work) error { +func (c *Controller) dispatchWork(work *workv1alpha1.Work) error { clusterName, err := names.GetClusterName(work.Namespace) if err != nil { klog.Errorf("Failed to get member cluster name for work %s/%s", work.Namespace, work.Name) @@ -166,7 +166,7 @@ func (c *Controller) dispatchWork(work *policyv1alpha1.Work) error { } // syncToClusters ensures that the state of the given object is synchronized to member clusters. -func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *policyv1alpha1.Work) error { +func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *workv1alpha1.Work) error { clusterDynamicClient, err := util.NewClusterDynamicClientSet(cluster, c.KubeClientSet) if err != nil { return err @@ -218,7 +218,7 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *policyv1alp } // removeFinalizer remove finalizer from the given Work -func (c *Controller) removeFinalizer(work *policyv1alpha1.Work) (controllerruntime.Result, error) { +func (c *Controller) removeFinalizer(work *workv1alpha1.Work) (controllerruntime.Result, error) { if !controllerutil.ContainsFinalizer(work, util.ExecutionControllerFinalizer) { return controllerruntime.Result{}, nil } @@ -232,9 +232,9 @@ func (c *Controller) removeFinalizer(work *policyv1alpha1.Work) (controllerrunti } // updateAppliedCondition update the Applied condition for the given Work -func (c *Controller) updateAppliedCondition(work *policyv1alpha1.Work) error { +func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work) error { newWorkAppliedCondition := metav1.Condition{ - Type: policyv1alpha1.WorkApplied, + Type: workv1alpha1.WorkApplied, Status: metav1.ConditionTrue, Reason: "AppliedSuccessful", Message: "Manifest has been successfully applied", diff --git a/pkg/controllers/status/workstatus_controller.go b/pkg/controllers/status/workstatus_controller.go index f49b3296d..a52588060 100644 --- a/pkg/controllers/status/workstatus_controller.go +++ b/pkg/controllers/status/workstatus_controller.go @@ -20,7 +20,7 @@ import ( controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/informermanager" "github.com/karmada-io/karmada/pkg/util/names" @@ -52,7 +52,7 @@ type WorkStatusController struct { func (c *WorkStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling status of Work %s.", req.NamespacedName.String()) - work := &v1alpha1.Work{} + work := &workv1alpha1.Work{} if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil { // The resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { @@ -71,7 +71,7 @@ func (c *WorkStatusController) Reconcile(req controllerruntime.Request) (control // buildResourceInformers builds informer dynamically for managed resources in member cluster. // The created informer watches resource change and then sync to the relevant Work object. -func (c *WorkStatusController) buildResourceInformers(work *v1alpha1.Work) (controllerruntime.Result, error) { +func (c *WorkStatusController) buildResourceInformers(work *workv1alpha1.Work) (controllerruntime.Result, error) { err := c.registerInformersAndStart(work) if err != nil { klog.Errorf("Failed to register informer for Work %s/%s. Error: %v.", work.GetNamespace(), work.GetName(), err) @@ -124,7 +124,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error { return nil } - workObject := &v1alpha1.Work{} + workObject := &workv1alpha1.Work{} if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: workNamespace, Name: workName}, workObject); err != nil { // Stop processing if resource no longer exist. if errors.IsNotFound(err) { @@ -174,7 +174,7 @@ func (c *WorkStatusController) handleDeleteEvent(key string) error { } workName := names.GenerateBindingName(clusterWorkload.Namespace, clusterWorkload.GVK.Kind, clusterWorkload.Name) - work := &v1alpha1.Work{} + work := &workv1alpha1.Work{} if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: executionSpace, Name: workName}, work); err != nil { // Stop processing if resource no longer exist. if errors.IsNotFound(err) { @@ -194,7 +194,7 @@ func (c *WorkStatusController) handleDeleteEvent(key string) error { return c.recreateResourceIfNeeded(work, clusterWorkload) } -func (c *WorkStatusController) recreateResourceIfNeeded(work *v1alpha1.Work, clusterWorkload util.ClusterWorkload) error { +func (c *WorkStatusController) recreateResourceIfNeeded(work *workv1alpha1.Work, clusterWorkload util.ClusterWorkload) error { for _, rawManifest := range work.Spec.Workload.Manifests { manifest := &unstructured.Unstructured{} if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil { @@ -213,7 +213,7 @@ func (c *WorkStatusController) recreateResourceIfNeeded(work *v1alpha1.Work, clu } // reflectStatus grabs cluster object's running status then updates to it's owner object(Work). -func (c *WorkStatusController) reflectStatus(work *v1alpha1.Work, clusterObj *unstructured.Unstructured) error { +func (c *WorkStatusController) reflectStatus(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) error { // Stop processing if resource(such as ConfigMap,Secret,ClusterRole, etc.) doesn't contain 'spec.status' fields. statusMap, exist, err := unstructured.NestedMap(clusterObj.Object, "status") if err != nil { @@ -235,7 +235,7 @@ func (c *WorkStatusController) reflectStatus(work *v1alpha1.Work, clusterObj *un return err } - manifestStatus := v1alpha1.ManifestStatus{ + manifestStatus := workv1alpha1.ManifestStatus{ Identifier: *identifier, Status: *rawExtension, } @@ -245,7 +245,7 @@ func (c *WorkStatusController) reflectStatus(work *v1alpha1.Work, clusterObj *un return c.Client.Status().Update(context.TODO(), work) } -func (c *WorkStatusController) buildStatusIdentifier(work *v1alpha1.Work, clusterObj *unstructured.Unstructured) (*v1alpha1.ResourceIdentifier, error) { +func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) { ordinal, err := c.getManifestIndex(work.Spec.Workload.Manifests, clusterObj) if err != nil { return nil, err @@ -256,7 +256,7 @@ func (c *WorkStatusController) buildStatusIdentifier(work *v1alpha1.Work, cluste return nil, err } - identifier := &v1alpha1.ResourceIdentifier{ + identifier := &workv1alpha1.ResourceIdentifier{ Ordinal: ordinal, // TODO(RainbowMango): Consider merge Group and Version to APIVersion from Work API. Group: groupVersion.Group, @@ -283,13 +283,13 @@ func (c *WorkStatusController) buildStatusRawExtension(status map[string]interfa }, nil } -func (c *WorkStatusController) mergeStatus(statuses []v1alpha1.ManifestStatus, newStatus v1alpha1.ManifestStatus) []v1alpha1.ManifestStatus { +func (c *WorkStatusController) mergeStatus(statuses []workv1alpha1.ManifestStatus, newStatus workv1alpha1.ManifestStatus) []workv1alpha1.ManifestStatus { // TODO(RainbowMango): update 'statuses' if 'newStatus' already exist. // For now, we only have at most one manifest in Work, so just override current 'statuses'. - return []v1alpha1.ManifestStatus{newStatus} + return []workv1alpha1.ManifestStatus{newStatus} } -func (c *WorkStatusController) getManifestIndex(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (int, error) { +func (c *WorkStatusController) getManifestIndex(manifests []workv1alpha1.Manifest, clusterObj *unstructured.Unstructured) (int, error) { for index, rawManifest := range manifests { manifest := &unstructured.Unstructured{} if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil { @@ -307,7 +307,7 @@ func (c *WorkStatusController) getManifestIndex(manifests []v1alpha1.Manifest, c return -1, fmt.Errorf("no such manifest exist") } -func (c *WorkStatusController) getRawManifest(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func (c *WorkStatusController) getRawManifest(manifests []workv1alpha1.Manifest, clusterObj *unstructured.Unstructured) (*unstructured.Unstructured, error) { for _, rawManifest := range manifests { manifest := &unstructured.Unstructured{} if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil { @@ -361,7 +361,7 @@ func (c *WorkStatusController) getObjectFromCache(key string) (*unstructured.Uns // registerInformersAndStart builds informer manager for cluster if it doesn't exist, then constructs informers for gvr // and start it. -func (c *WorkStatusController) registerInformersAndStart(work *v1alpha1.Work) error { +func (c *WorkStatusController) registerInformersAndStart(work *workv1alpha1.Work) error { clusterName, err := names.GetClusterName(work.GetNamespace()) if err != nil { klog.Errorf("Failed to get member cluster name by %s. Error: %v.", work.GetNamespace(), err) @@ -398,7 +398,7 @@ func (c *WorkStatusController) registerInformersAndStart(work *v1alpha1.Work) er } // getGVRsFromWork traverses the manifests in work to find groupVersionResource list. -func (c *WorkStatusController) getGVRsFromWork(work *v1alpha1.Work) (map[schema.GroupVersionResource]bool, error) { +func (c *WorkStatusController) getGVRsFromWork(work *workv1alpha1.Work) (map[schema.GroupVersionResource]bool, error) { gvrTargets := map[schema.GroupVersionResource]bool{} for _, manifest := range work.Spec.Workload.Manifests { workload := &unstructured.Unstructured{} @@ -436,5 +436,5 @@ func (c *WorkStatusController) getSingleClusterManager(clusterName string) (info // SetupWithManager creates a controller and register to controller manager. func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.Work{}).Complete(c) + return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.Work{}).Complete(c) } diff --git a/pkg/util/binding.go b/pkg/util/binding.go index f3443c19d..9c81a9f4a 100644 --- a/pkg/util/binding.go +++ b/pkg/util/binding.go @@ -9,11 +9,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" ) // GetBindingClusterNames will get clusterName list from bind clusters field -func GetBindingClusterNames(binding *v1alpha1.ResourceBinding) []string { +func GetBindingClusterNames(binding *policyv1alpha1.ResourceBinding) []string { var clusterNames []string for _, targetCluster := range binding.Spec.Clusters { clusterNames = append(clusterNames, targetCluster.Name) @@ -23,11 +24,11 @@ func GetBindingClusterNames(binding *v1alpha1.ResourceBinding) []string { // CreateOrUpdateWork creates a Work object if not exist, or updates if it already exist. func CreateOrUpdateWork(client client.Client, objectMeta metav1.ObjectMeta, rawExtension []byte) error { - work := &v1alpha1.Work{ + work := &workv1alpha1.Work{ ObjectMeta: objectMeta, - Spec: v1alpha1.WorkSpec{ - Workload: v1alpha1.WorkloadTemplate{ - Manifests: []v1alpha1.Manifest{ + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ { RawExtension: runtime.RawExtension{ Raw: rawExtension, diff --git a/pkg/util/gclient/gclient.go b/pkg/util/gclient/gclient.go index b4c14d4c6..77047bf2a 100644 --- a/pkg/util/gclient/gclient.go +++ b/pkg/util/gclient/gclient.go @@ -7,16 +7,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" - propagationv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" ) // aggregatedScheme aggregates Kubernetes and extended schemes. var aggregatedScheme = runtime.NewScheme() func init() { - var _ = scheme.AddToScheme(aggregatedScheme) // add Kubernetes schemes - var _ = propagationv1alpha1.AddToScheme(aggregatedScheme) // add propagation schemes - var _ = clusterv1alpha1.AddToScheme(aggregatedScheme) // add cluster schemes + var _ = scheme.AddToScheme(aggregatedScheme) // add Kubernetes schemes + var _ = clusterv1alpha1.AddToScheme(aggregatedScheme) // add cluster schemes + var _ = policyv1alpha1.AddToScheme(aggregatedScheme) // add propagation schemes + var _ = workv1alpha1.AddToScheme(aggregatedScheme) // add work schemes } // NewSchema returns a singleton schema set which aggregated Kubernetes's schemes and extended schemes.