Rename PropagationWork to Work. (#169)

Signed-off-by: RainbowMango <renhongcai@huawei.com>
This commit is contained in:
Hongcai Ren 2021-02-09 14:30:34 +08:00 committed by GitHub
parent eb6265a78a
commit 5f7305f1d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 765 additions and 766 deletions

View File

@ -32,8 +32,8 @@ The Karmada Controller Manager runs the various controllers, the controllers wa
1. Cluster Controller: attach kubernetes clusters to Karmada for managing the lifecycle of the clusters by creating cluster object.
2. Policy Controller: the controller watches PropagationPolicy objects. When PropagationPolicy object is added, it selects a group of resources matching the resourceSelector and create PropagationBinding with each single resource object.
3. Binding Controller: the controller watches PropagationBinding object and create PropagationWork object corresponding to each cluster with single resource manifest.
4. Execution Controller: the controller watches PropagationWork objects.When PropagationWork objects are created, it will distribute the resources to member clusters.
3. Binding Controller: the controller watches PropagationBinding object and create Work object corresponding to each cluster with single resource manifest.
4. Execution Controller: the controller watches Work objects.When Work objects are created, it will distribute the resources to member clusters.
## Concepts

View File

@ -6,21 +6,21 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: propagationworks.policy.karmada.io
name: works.policy.karmada.io
spec:
group: policy.karmada.io
names:
kind: PropagationWork
listKind: PropagationWorkList
plural: propagationworks
singular: propagationwork
kind: Work
listKind: WorkList
plural: works
singular: work
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: PropagationWork defines a list of resources to be deployed on
the member cluster.
description: Work defines a list of resources to be deployed on the member
cluster.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
@ -35,7 +35,7 @@ spec:
metadata:
type: object
spec:
description: Spec represents the desired behavior of PropagationWork.
description: Spec represents the desired behavior of Work.
properties:
workload:
description: Workload represents the manifest workload to be deployed
@ -58,12 +58,11 @@ spec:
conditions:
description: 'Conditions contain the different condition statuses
for this work. Valid condition types are: 1. Applied represents
workload in PropagationWork is applied successfully on a managed
cluster. 2. Progressing represents workload in PropagationWork is
being applied on a managed cluster. 3. Available represents workload
in PropagationWork exists on the managed cluster. 4. Degraded represents
the current state of workload does not match the desired state for
a certain period.'
workload in Work is applied successfully on a managed cluster. 2.
Progressing represents workload in Work is being applied on a managed
cluster. 3. Available represents workload in Work exists on the
managed cluster. 4. Degraded represents the current state of workload
does not match the desired state for a certain period.'
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct

View File

@ -153,7 +153,7 @@ func setupControllers(mgr controllerruntime.Manager, stopChan <-chan struct{}) {
klog.Fatalf("Failed to setup execution controller: %v", err)
}
workStatusController := &status.PropagationWorkStatusController{
workStatusController := &status.WorkStatusController{
Client: mgr.GetClient(),
DynamicClient: dynamicClientSet,
EventRecorder: mgr.GetEventRecorderFor(status.WorkStatusControllerName),

View File

@ -69,7 +69,7 @@ function installCRDs() {
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster.karmada.io_clusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationpolicies.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationbindings.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationworks.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_works.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_overridepolicies.yaml"
}

View File

@ -76,7 +76,7 @@ function installCRDs() {
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster.karmada.io_clusters.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationpolicies.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationbindings.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_propagationworks.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_works.yaml"
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/policy.karmada.io_overridepolicies.yaml"
}

View File

@ -9,21 +9,21 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// PropagationWork defines a list of resources to be deployed on the member cluster.
type PropagationWork struct {
// Work defines a list of resources to be deployed on the member cluster.
type Work struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of PropagationWork.
Spec PropagationWorkSpec `json:"spec"`
// Spec represents the desired behavior of Work.
Spec WorkSpec `json:"spec"`
// Status represents the status of PropagationStatus.
// +optional
Status PropagationWorkStatus `json:"status,omitempty"`
Status WorkStatus `json:"status,omitempty"`
}
// PropagationWorkSpec defines the desired state of PropagationWork.
type PropagationWorkSpec struct {
// WorkSpec defines the desired state of Work.
type WorkSpec struct {
// Workload represents the manifest workload to be deployed on managed cluster.
Workload WorkloadTemplate `json:"workload,omitempty"`
}
@ -41,13 +41,13 @@ type Manifest struct {
runtime.RawExtension `json:",inline"`
}
// PropagationWorkStatus defines the observed state of PropagationWork.
type PropagationWorkStatus struct {
// WorkStatus defines the observed state of Work.
type WorkStatus struct {
// Conditions contain the different condition statuses for this work.
// Valid condition types are:
// 1. Applied represents workload in PropagationWork is applied successfully on a managed cluster.
// 2. Progressing represents workload in PropagationWork is being applied on a managed cluster.
// 3. Available represents workload in PropagationWork exists on the managed cluster.
// 1. Applied represents workload in Work is applied successfully on a managed cluster.
// 2. Progressing represents workload in Work is being applied on a managed cluster.
// 3. Available represents workload in Work exists on the managed cluster.
// 4. Degraded represents the current state of workload does not match the desired
// state for a certain period.
// +optional
@ -95,27 +95,27 @@ type ResourceIdentifier struct {
}
const (
// WorkApplied represents that the resource defined in PropagationWork is
// WorkApplied represents that the resource defined in Work is
// successfully applied on the managed cluster.
WorkApplied string = "Applied"
// WorkProgressing represents that the resource defined in PropagationWork is
// WorkProgressing represents that the resource defined in Work is
// in the progress to be applied on the managed cluster.
WorkProgressing string = "Progressing"
// WorkAvailable represents that all resources of the PropagationWork exists on
// WorkAvailable represents that all resources of the Work exists on
// the managed cluster.
WorkAvailable string = "Available"
// WorkDegraded represents that the current state of PropagationWork does not match
// WorkDegraded represents that the current state of Work does not match
// the desired state for a certain period.
WorkDegraded string = "Degraded"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PropagationWorkList is a collection of PropagationWork.
type PropagationWorkList struct {
// WorkList is a collection of Work.
type WorkList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// Items holds a list of PropagationWork.
Items []PropagationWork `json:"items"`
// Items holds a list of Work.
Items []Work `json:"items"`
}

View File

@ -513,114 +513,6 @@ func (in *PropagationSpec) DeepCopy() *PropagationSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropagationWork) DeepCopyInto(out *PropagationWork) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagationWork.
func (in *PropagationWork) DeepCopy() *PropagationWork {
if in == nil {
return nil
}
out := new(PropagationWork)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PropagationWork) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropagationWorkList) DeepCopyInto(out *PropagationWorkList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PropagationWork, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagationWorkList.
func (in *PropagationWorkList) DeepCopy() *PropagationWorkList {
if in == nil {
return nil
}
out := new(PropagationWorkList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PropagationWorkList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropagationWorkSpec) DeepCopyInto(out *PropagationWorkSpec) {
*out = *in
in.Workload.DeepCopyInto(&out.Workload)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagationWorkSpec.
func (in *PropagationWorkSpec) DeepCopy() *PropagationWorkSpec {
if in == nil {
return nil
}
out := new(PropagationWorkSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PropagationWorkStatus) DeepCopyInto(out *PropagationWorkStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ManifestStatuses != nil {
in, out := &in.ManifestStatuses, &out.ManifestStatuses
*out = make([]ManifestStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagationWorkStatus.
func (in *PropagationWorkStatus) DeepCopy() *PropagationWorkStatus {
if in == nil {
return nil
}
out := new(PropagationWorkStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceIdentifier) DeepCopyInto(out *ResourceIdentifier) {
*out = *in
@ -711,6 +603,114 @@ func (in *TargetCluster) DeepCopy() *TargetCluster {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Work) DeepCopyInto(out *Work) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Work.
func (in *Work) DeepCopy() *Work {
if in == nil {
return nil
}
out := new(Work)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Work) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkList) DeepCopyInto(out *WorkList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Work, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkList.
func (in *WorkList) DeepCopy() *WorkList {
if in == nil {
return nil
}
out := new(WorkList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkSpec) DeepCopyInto(out *WorkSpec) {
*out = *in
in.Workload.DeepCopyInto(&out.Workload)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkSpec.
func (in *WorkSpec) DeepCopy() *WorkSpec {
if in == nil {
return nil
}
out := new(WorkSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkStatus) DeepCopyInto(out *WorkStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ManifestStatuses != nil {
in, out := &in.ManifestStatuses, &out.ManifestStatuses
*out = make([]ManifestStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkStatus.
func (in *WorkStatus) DeepCopy() *WorkStatus {
if in == nil {
return nil
}
out := new(WorkStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadTemplate) DeepCopyInto(out *WorkloadTemplate) {
*out = *in

View File

@ -48,8 +48,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&PropagationBindingList{},
&PropagationPolicy{},
&PropagationPolicyList{},
&PropagationWork{},
&PropagationWorkList{},
&Work{},
&WorkList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)

View File

@ -57,8 +57,8 @@ func (c *PropagationBindingController) Reconcile(req controllerruntime.Request)
}
if !binding.DeletionTimestamp.IsZero() {
// Do nothing, just return as we have added owner reference to PropagationWork.
// PropagationWork will be removed automatically by garbage collector.
// Do nothing, just return as we have added owner reference to Work.
// Work will be removed automatically by garbage collector.
return controllerruntime.Result{}, nil
}
@ -71,68 +71,68 @@ func (c *PropagationBindingController) Reconcile(req controllerruntime.Request)
return c.syncBinding(binding)
}
// isBindingReady will check if propagationBinding is ready to build propagationWork.
// isBindingReady will check if propagationBinding is ready to build Work.
func (c *PropagationBindingController) isBindingReady(binding *v1alpha1.PropagationBinding) bool {
return len(binding.Spec.Clusters) != 0
}
// syncBinding will sync propagationBinding to propagationWorks.
// syncBinding will sync propagationBinding to Works.
func (c *PropagationBindingController) syncBinding(binding *v1alpha1.PropagationBinding) (controllerruntime.Result, error) {
clusterNames := c.getBindingClusterNames(binding)
ownerLabel := names.GenerateOwnerLabelValue(binding.GetNamespace(), binding.GetName())
works, err := c.findOrphanWorks(ownerLabel, clusterNames)
if err != nil {
klog.Errorf("Failed to find orphan propagationWorks by propagationBinding %s/%s. Error: %v.",
klog.Errorf("Failed to find orphan works by propagationBinding %s/%s. Error: %v.",
binding.GetNamespace(), binding.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
err = c.removeOrphanWorks(works)
if err != nil {
klog.Errorf("Failed to remove orphan propagationWorks by propagationBinding %s/%s. Error: %v.",
klog.Errorf("Failed to remove orphan works by propagationBinding %s/%s. Error: %v.",
binding.GetNamespace(), binding.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
err = c.transformBindingToWorks(binding, clusterNames)
if err != nil {
klog.Errorf("Failed to transform propagationBinding %s/%s to propagationWorks. Error: %v.",
klog.Errorf("Failed to transform propagationBinding %s/%s to works. Error: %v.",
binding.GetNamespace(), binding.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
// removeOrphanBindings will remove orphan propagationWorks.
func (c *PropagationBindingController) removeOrphanWorks(works []v1alpha1.PropagationWork) error {
// removeOrphanBindings will remove orphan works.
func (c *PropagationBindingController) removeOrphanWorks(works []v1alpha1.Work) error {
for _, work := range works {
err := c.Client.Delete(context.TODO(), &work)
if err != nil {
return err
}
klog.Infof("Delete orphan propagationWork %s/%s successfully.", work.GetNamespace(), work.GetName())
klog.Infof("Delete orphan work %s/%s successfully.", work.GetNamespace(), work.GetName())
}
return nil
}
// findOrphanWorks will find orphan propagationWorks that don't match current propagationBinding clusters.
func (c *PropagationBindingController) findOrphanWorks(ownerLabel string, clusterNames []string) ([]v1alpha1.PropagationWork, error) {
// findOrphanWorks will find orphan works that don't match current propagationBinding clusters.
func (c *PropagationBindingController) findOrphanWorks(ownerLabel string, clusterNames []string) ([]v1alpha1.Work, error) {
labelRequirement, err := labels.NewRequirement(util.OwnerLabel, selection.Equals, []string{ownerLabel})
if err != nil {
klog.Errorf("Failed to new a requirement. Error: %v", err)
return nil, err
}
selector := labels.NewSelector().Add(*labelRequirement)
propagationWorkList := &v1alpha1.PropagationWorkList{}
if err := c.Client.List(context.TODO(), propagationWorkList, &client.ListOptions{LabelSelector: selector}); err != nil {
workList := &v1alpha1.WorkList{}
if err := c.Client.List(context.TODO(), workList, &client.ListOptions{LabelSelector: selector}); err != nil {
return nil, err
}
var orphanWorks []v1alpha1.PropagationWork
var orphanWorks []v1alpha1.Work
expectClusters := sets.NewString(clusterNames...)
for _, work := range propagationWorkList.Items {
for _, work := range workList.Items {
workTargetCluster, err := names.GetClusterName(work.GetNamespace())
if err != nil {
klog.Errorf("Failed to get cluster name which PropagationWork %s/%s belongs to. Error: %v.",
klog.Errorf("Failed to get cluster name which Work %s/%s belongs to. Error: %v.",
work.GetNamespace(), work.GetName(), err)
return nil, err
}
@ -168,7 +168,7 @@ func (c *PropagationBindingController) removeIrrelevantField(workload *unstructu
unstructured.RemoveNestedField(workload.Object, "status")
}
// transformBindingToWorks will transform propagationBinding to propagationWorks
// transformBindingToWorks will transform propagationBinding to Works
func (c *PropagationBindingController) transformBindingToWorks(binding *v1alpha1.PropagationBinding, clusterNames []string) error {
dynamicResource, err := restmapper.GetGroupVersionResource(c.RESTMapper,
schema.FromAPIVersionAndKind(binding.Spec.Resource.APIVersion, binding.Spec.Resource.Kind))
@ -185,15 +185,15 @@ func (c *PropagationBindingController) transformBindingToWorks(binding *v1alpha1
return err
}
err = c.ensurePropagationWork(workload, clusterNames, binding)
err = c.ensureWork(workload, clusterNames, binding)
if err != nil {
return err
}
return nil
}
// ensurePropagationWork ensure PropagationWork to be created or updated
func (c *PropagationBindingController) ensurePropagationWork(workload *unstructured.Unstructured, clusterNames []string,
// ensureWork ensure Work to be created or updated
func (c *PropagationBindingController) ensureWork(workload *unstructured.Unstructured, clusterNames []string,
binding *v1alpha1.PropagationBinding) error {
c.removeIrrelevantField(workload)
@ -215,11 +215,11 @@ func (c *PropagationBindingController) ensurePropagationWork(workload *unstructu
executionSpace, err := names.GenerateExecutionSpaceName(clusterName)
if err != nil {
klog.Errorf("Failed to ensure PropagationWork for cluster: %s. Error: %v.", clusterName, err)
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", clusterName, err)
return err
}
propagationWork := &v1alpha1.PropagationWork{
work := &v1alpha1.Work{
ObjectMeta: metav1.ObjectMeta{
Name: binding.Name,
Namespace: executionSpace,
@ -229,7 +229,7 @@ func (c *PropagationBindingController) ensurePropagationWork(workload *unstructu
},
Labels: map[string]string{util.OwnerLabel: names.GenerateOwnerLabelValue(binding.GetNamespace(), binding.GetName())},
},
Spec: v1alpha1.PropagationWorkSpec{
Spec: v1alpha1.WorkSpec{
Workload: v1alpha1.WorkloadTemplate{
Manifests: []v1alpha1.Manifest{
{
@ -242,22 +242,22 @@ func (c *PropagationBindingController) ensurePropagationWork(workload *unstructu
},
}
runtimeObject := propagationWork.DeepCopy()
runtimeObject := work.DeepCopy()
operationResult, err := controllerutil.CreateOrUpdate(context.TODO(), c.Client, runtimeObject, func() error {
runtimeObject.Spec = propagationWork.Spec
runtimeObject.Spec = work.Spec
return nil
})
if err != nil {
klog.Errorf("Failed to create/update propagationWork %s/%s. Error: %v", propagationWork.GetNamespace(), propagationWork.GetName(), err)
klog.Errorf("Failed to create/update work %s/%s. Error: %v", work.GetNamespace(), work.GetName(), err)
return err
}
if operationResult == controllerutil.OperationResultCreated {
klog.Infof("Create propagationWork %s/%s successfully.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.Infof("Create work %s/%s successfully.", work.GetNamespace(), work.GetName())
} else if operationResult == controllerutil.OperationResultUpdated {
klog.Infof("Update propagationWork %s/%s successfully.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.Infof("Update work %s/%s successfully.", work.GetNamespace(), work.GetName())
} else {
klog.V(2).Infof("PropagationWork %s/%s is up to date.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.V(2).Infof("Work %s/%s is up to date.", work.GetNamespace(), work.GetName())
}
}
return nil

View File

@ -29,9 +29,9 @@ const (
ControllerName = "execution-controller"
)
// Controller is to sync PropagationWork.
// Controller is to sync Work.
type Controller struct {
client.Client // used to operate PropagationWork resources.
client.Client // used to operate Work resources.
KubeClientSet kubernetes.Interface // used to get kubernetes resources.
EventRecorder record.EventRecorder
RESTMapper meta.RESTMapper
@ -42,9 +42,9 @@ type Controller struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling PropagationWork %s", req.NamespacedName.String())
klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String())
work := &policyv1alpha1.PropagationWork{}
work := &policyv1alpha1.Work{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil {
// The resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
@ -59,7 +59,7 @@ func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime
if applied {
err := c.tryDeleteWorkload(work)
if err != nil {
klog.Errorf("Failed to delete propagationWork %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
klog.Errorf("Failed to delete work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
return controllerruntime.Result{Requeue: true}, err
}
}
@ -72,15 +72,15 @@ func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime
// SetupWithManager creates a controller and register to controller manager.
func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).
For(&policyv1alpha1.PropagationWork{}).
For(&policyv1alpha1.Work{}).
WithEventFilter(predicate.GenerationChangedPredicate{}).
Complete(c)
}
func (c *Controller) syncWork(propagationWork *policyv1alpha1.PropagationWork) (controllerruntime.Result, error) {
err := c.dispatchPropagationWork(propagationWork)
func (c *Controller) syncWork(work *policyv1alpha1.Work) (controllerruntime.Result, error) {
err := c.dispatchWork(work)
if err != nil {
klog.Errorf("Failed to dispatch propagationWork %q, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
klog.Errorf("Failed to dispatch work %q, namespace is %v, err is %v", work.Name, work.Namespace, err)
return controllerruntime.Result{Requeue: true}, err
}
@ -88,8 +88,8 @@ func (c *Controller) syncWork(propagationWork *policyv1alpha1.PropagationWork) (
}
// isResourceApplied checking weather resource has been dispatched to member cluster or not
func (c *Controller) isResourceApplied(propagationWorkStatus *policyv1alpha1.PropagationWorkStatus) bool {
for _, condition := range propagationWorkStatus.Conditions {
func (c *Controller) isResourceApplied(workStatus *policyv1alpha1.WorkStatus) bool {
for _, condition := range workStatus.Conditions {
if condition.Type == policyv1alpha1.WorkApplied {
if condition.Status == metav1.ConditionTrue {
return true
@ -101,10 +101,10 @@ func (c *Controller) isResourceApplied(propagationWorkStatus *policyv1alpha1.Pro
// tryDeleteWorkload tries to delete resource in the given member cluster.
// Abort deleting when the member cluster is unready, otherwise we can't unjoin the member cluster when the member cluster is unready
func (c *Controller) tryDeleteWorkload(propagationWork *policyv1alpha1.PropagationWork) error {
clusterName, err := names.GetClusterName(propagationWork.Namespace)
func (c *Controller) tryDeleteWorkload(work *policyv1alpha1.Work) error {
clusterName, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.Errorf("Failed to get member cluster name for propagationWork %s/%s", propagationWork.Namespace, propagationWork.Name)
klog.Errorf("Failed to get member cluster name for work %s/%s", work.Namespace, work.Name)
return err
}
@ -120,7 +120,7 @@ func (c *Controller) tryDeleteWorkload(propagationWork *policyv1alpha1.Propagati
return nil
}
for _, manifest := range propagationWork.Spec.Workload.Manifests {
for _, manifest := range work.Spec.Workload.Manifests {
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
@ -138,10 +138,10 @@ func (c *Controller) tryDeleteWorkload(propagationWork *policyv1alpha1.Propagati
return nil
}
func (c *Controller) dispatchPropagationWork(propagationWork *policyv1alpha1.PropagationWork) error {
clusterName, err := names.GetClusterName(propagationWork.Namespace)
func (c *Controller) dispatchWork(work *policyv1alpha1.Work) error {
clusterName, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.Errorf("Failed to get member cluster name for propagationWork %s/%s", propagationWork.Namespace, propagationWork.Name)
klog.Errorf("Failed to get member cluster name for work %s/%s", work.Namespace, work.Name)
return err
}
@ -156,9 +156,9 @@ func (c *Controller) dispatchPropagationWork(propagationWork *policyv1alpha1.Pro
return fmt.Errorf("cluster %s is not ready, requeuing operation until cluster state is ready", cluster.Name)
}
err = c.syncToClusters(cluster, propagationWork)
err = c.syncToClusters(cluster, work)
if err != nil {
klog.Errorf("Failed to dispatch propagationWork %v, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
klog.Errorf("Failed to dispatch work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
return err
}
@ -166,13 +166,13 @@ func (c *Controller) dispatchPropagationWork(propagationWork *policyv1alpha1.Pro
}
// syncToClusters ensures that the state of the given object is synchronized to member clusters.
func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, propagationWork *policyv1alpha1.PropagationWork) error {
func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, work *policyv1alpha1.Work) error {
clusterDynamicClient, err := util.NewClusterDynamicClientSet(cluster, c.KubeClientSet)
if err != nil {
return err
}
for _, manifest := range propagationWork.Spec.Workload.Manifests {
for _, manifest := range work.Spec.Workload.Manifests {
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
@ -180,9 +180,9 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, propagationWork *
return err
}
util.MergeLabel(workload, util.OwnerLabel, names.GenerateOwnerLabelValue(propagationWork.GetNamespace(), propagationWork.GetName()))
util.MergeLabel(workload, util.OwnerLabel, names.GenerateOwnerLabelValue(work.GetNamespace(), work.GetName()))
applied := c.isResourceApplied(&propagationWork.Status)
applied := c.isResourceApplied(&work.Status)
if applied {
// todo: get clusterObj from cache
dynamicResource, err := restmapper.GetGroupVersionResource(c.RESTMapper, workload.GroupVersionKind())
@ -209,9 +209,9 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, propagationWork *
return err
}
err := c.updateAppliedCondition(propagationWork)
err := c.updateAppliedCondition(work)
if err != nil {
klog.Errorf("Failed to update applied status for given propagationWork %v, namespace is %v, err is %v", propagationWork.Name, propagationWork.Namespace, err)
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
return err
}
}
@ -219,30 +219,30 @@ func (c *Controller) syncToClusters(cluster *v1alpha1.Cluster, propagationWork *
return nil
}
// removeFinalizer remove finalizer from the given propagationWork
func (c *Controller) removeFinalizer(propagationWork *policyv1alpha1.PropagationWork) (controllerruntime.Result, error) {
if !controllerutil.ContainsFinalizer(propagationWork, util.ExecutionControllerFinalizer) {
// removeFinalizer remove finalizer from the given Work
func (c *Controller) removeFinalizer(work *policyv1alpha1.Work) (controllerruntime.Result, error) {
if !controllerutil.ContainsFinalizer(work, util.ExecutionControllerFinalizer) {
return controllerruntime.Result{}, nil
}
controllerutil.RemoveFinalizer(propagationWork, util.ExecutionControllerFinalizer)
err := c.Client.Update(context.TODO(), propagationWork)
controllerutil.RemoveFinalizer(work, util.ExecutionControllerFinalizer)
err := c.Client.Update(context.TODO(), work)
if err != nil {
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
// updateAppliedCondition update the Applied condition for the given PropagationWork
func (c *Controller) updateAppliedCondition(propagationWork *policyv1alpha1.PropagationWork) error {
newPropagationWorkAppliedCondition := metav1.Condition{
// updateAppliedCondition update the Applied condition for the given Work
func (c *Controller) updateAppliedCondition(work *policyv1alpha1.Work) error {
newWorkAppliedCondition := metav1.Condition{
Type: policyv1alpha1.WorkApplied,
Status: metav1.ConditionTrue,
Reason: "AppliedSuccessful",
Message: "Manifest has been successfully applied",
LastTransitionTime: metav1.Now(),
}
propagationWork.Status.Conditions = append(propagationWork.Status.Conditions, newPropagationWorkAppliedCondition)
err := c.Client.Status().Update(context.TODO(), propagationWork)
work.Status.Conditions = append(work.Status.Conditions, newWorkAppliedCondition)
err := c.Client.Status().Update(context.TODO(), work)
return err
}

View File

@ -58,23 +58,23 @@ func (c *HorizontalPodAutoscalerController) Reconcile(req controllerruntime.Requ
return c.syncHPA(hpa)
}
// syncHPA gets placement from propagationBinding according to targetRef in hpa, then builds propagationWorks in target execution namespaces.
// syncHPA gets placement from propagationBinding according to targetRef in hpa, then builds works in target execution namespaces.
func (c *HorizontalPodAutoscalerController) syncHPA(hpa *autoscalingv1.HorizontalPodAutoscaler) (controllerruntime.Result, error) {
clusters, err := c.getTargetPlacement(hpa.Spec.ScaleTargetRef, hpa.GetNamespace())
if err != nil {
klog.Errorf("Failed to get target placement by hpa %s/%s. Error: %v.", hpa.GetNamespace(), hpa.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
err = c.buildPropagationWorks(hpa, clusters)
err = c.buildWorks(hpa, clusters)
if err != nil {
klog.Errorf("Failed to build propagationWork for hpa %s/%s. Error: %v.", hpa.GetNamespace(), hpa.GetName(), err)
klog.Errorf("Failed to build work for hpa %s/%s. Error: %v.", hpa.GetNamespace(), hpa.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
// buildPropagationWorks transforms hpa obj to unstructured, creates or updates propagationWorks in the target execution namespaces.
func (c *HorizontalPodAutoscalerController) buildPropagationWorks(hpa *autoscalingv1.HorizontalPodAutoscaler, clusters []string) error {
// buildWorks transforms hpa obj to unstructured, creates or updates Works in the target execution namespaces.
func (c *HorizontalPodAutoscalerController) buildWorks(hpa *autoscalingv1.HorizontalPodAutoscaler, clusters []string) error {
uncastObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hpa)
if err != nil {
klog.Errorf("Failed to transform hpa %s/%s. Error: %v", hpa.GetNamespace(), hpa.GetName(), err)
@ -92,7 +92,7 @@ func (c *HorizontalPodAutoscalerController) buildPropagationWorks(hpa *autoscali
executionSpace, err := names.GenerateExecutionSpaceName(clusterName)
if err != nil {
klog.Errorf("Failed to ensure PropagationWork for cluster: %s. Error: %v.", clusterName, err)
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", clusterName, err)
return err
}
@ -107,7 +107,7 @@ func (c *HorizontalPodAutoscalerController) buildPropagationWorks(hpa *autoscali
Labels: map[string]string{util.OwnerLabel: names.GenerateOwnerLabelValue(hpa.GetNamespace(), hpa.GetName())},
}
err = util.CreateOrUpdatePropagationWork(c.Client, objectMeta, hpaJSON)
err = util.CreateOrUpdateWork(c.Client, objectMeta, hpaJSON)
if err != nil {
return err
}

View File

@ -31,9 +31,9 @@ import (
// WorkStatusControllerName is the controller name that will be used when reporting events.
const WorkStatusControllerName = "work-status-controller"
// PropagationWorkStatusController is to sync status of PropagationWork.
type PropagationWorkStatusController struct {
client.Client // used to operate PropagationWork resources.
// WorkStatusController is to sync status of Work.
type WorkStatusController struct {
client.Client // used to operate Work resources.
DynamicClient dynamic.Interface // used to fetch arbitrary resources.
EventRecorder record.EventRecorder
RESTMapper meta.RESTMapper
@ -49,10 +49,10 @@ type PropagationWorkStatusController struct {
// Reconcile performs a full reconciliation for the object referred to by the Request.
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *PropagationWorkStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling status of PropagationWork %s.", req.NamespacedName.String())
func (c *WorkStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling status of Work %s.", req.NamespacedName.String())
work := &v1alpha1.PropagationWork{}
work := &v1alpha1.Work{}
if err := c.Client.Get(context.TODO(), req.NamespacedName, work); err != nil {
// The resource may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
@ -70,18 +70,18 @@ func (c *PropagationWorkStatusController) Reconcile(req controllerruntime.Reques
}
// buildResourceInformers builds informer dynamically for managed resources in member cluster.
// The created informer watches resource change and then sync to the relevant PropagationWork object.
func (c *PropagationWorkStatusController) buildResourceInformers(work *v1alpha1.PropagationWork) (controllerruntime.Result, error) {
// The created informer watches resource change and then sync to the relevant Work object.
func (c *WorkStatusController) buildResourceInformers(work *v1alpha1.Work) (controllerruntime.Result, error) {
err := c.registerInformersAndStart(work)
if err != nil {
klog.Errorf("Failed to register informer for propagationWork %s/%s. Error: %v.", work.GetNamespace(), work.GetName(), err)
klog.Errorf("Failed to register informer for Work %s/%s. Error: %v.", work.GetNamespace(), work.GetName(), err)
return controllerruntime.Result{Requeue: true}, err
}
return controllerruntime.Result{}, nil
}
// getEventHandler return callback function that knows how to handle events from the member cluster.
func (c *PropagationWorkStatusController) getEventHandler() cache.ResourceEventHandler {
func (c *WorkStatusController) getEventHandler() cache.ResourceEventHandler {
if c.eventHandler == nil {
c.eventHandler = informermanager.NewHandlerOnAllEvents(c.worker.EnqueueRateLimited)
}
@ -89,14 +89,14 @@ func (c *PropagationWorkStatusController) getEventHandler() cache.ResourceEventH
}
// RunWorkQueue initializes worker and run it, worker will process resource asynchronously.
func (c *PropagationWorkStatusController) RunWorkQueue() {
c.worker = util.NewAsyncWorker(c.syncPropagationWorkStatus, "work-status", time.Second)
func (c *WorkStatusController) RunWorkQueue() {
c.worker = util.NewAsyncWorker(c.syncWorkStatus, "work-status", time.Second)
c.worker.Run(c.WorkerNumber, c.StopChan)
}
// syncPropagationWorkStatus will find propagationWork by label in workload, then update resource status to propagationWork status.
// syncWorkStatus will find work by label in workload, then update resource status to work status.
// label example: "karmada.io/created-by: karmada-es-member-cluster-1.default-deployment-nginx"
func (c *PropagationWorkStatusController) syncPropagationWorkStatus(key string) error {
func (c *WorkStatusController) syncWorkStatus(key string) error {
obj, err := c.getObjectFromCache(key)
if err != nil {
if errors.IsNotFound(err) {
@ -123,14 +123,14 @@ func (c *PropagationWorkStatusController) syncPropagationWorkStatus(key string)
return err
}
workObject := &v1alpha1.PropagationWork{}
workObject := &v1alpha1.Work{}
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: ownerNamespace, Name: ownerName}, workObject); err != nil {
// Stop processing if resource no longer exist.
if errors.IsNotFound(err) {
return nil
}
klog.Errorf("Failed to get PropagationWork(%s/%s) from cache: %v", ownerNamespace, ownerName, err)
klog.Errorf("Failed to get Work(%s/%s) from cache: %v", ownerNamespace, ownerName, err)
return err
}
@ -158,11 +158,11 @@ func (c *PropagationWorkStatusController) syncPropagationWorkStatus(key string)
return c.ObjectWatcher.Update(clusterName, desireObj, obj)
}
klog.Infof("reflecting %s(%s/%s) status of to PropagationWork(%s/%s)", obj.GetKind(), obj.GetNamespace(), obj.GetName(), ownerNamespace, ownerName)
klog.Infof("reflecting %s(%s/%s) status of to Work(%s/%s)", obj.GetKind(), obj.GetNamespace(), obj.GetName(), ownerNamespace, ownerName)
return c.reflectStatus(workObject, obj)
}
func (c *PropagationWorkStatusController) handleDeleteEvent(key string) error {
func (c *WorkStatusController) handleDeleteEvent(key string) error {
clusterWorkload, err := util.SplitMetaKey(key)
if err != nil {
klog.Errorf("Couldn't get key for %s. Error: %v.", key, err)
@ -174,29 +174,29 @@ func (c *PropagationWorkStatusController) handleDeleteEvent(key string) error {
return err
}
propagationWorkName := names.GenerateBindingName(clusterWorkload.Namespace, clusterWorkload.GVK.Kind, clusterWorkload.Name)
propagationWork := &v1alpha1.PropagationWork{}
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: executionSpace, Name: propagationWorkName}, propagationWork); err != nil {
workName := names.GenerateBindingName(clusterWorkload.Namespace, clusterWorkload.GVK.Kind, clusterWorkload.Name)
work := &v1alpha1.Work{}
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: executionSpace, Name: workName}, work); err != nil {
// Stop processing if resource no longer exist.
if errors.IsNotFound(err) {
klog.Infof("workload %v/%v not found", executionSpace, propagationWorkName)
klog.Infof("workload %v/%v not found", executionSpace, workName)
return nil
}
klog.Errorf("Failed to get PropagationWork from cache: %v", err)
klog.Errorf("Failed to get Work from cache: %v", err)
return err
}
if !propagationWork.DeletionTimestamp.IsZero() {
if !work.DeletionTimestamp.IsZero() {
klog.Infof("resource %v/%v/%v in member cluster %v does not need to recreate", clusterWorkload.GVK.Kind, clusterWorkload.Namespace, clusterWorkload.Name, clusterWorkload.Cluster)
return nil
}
return c.recreateResourceIfNeeded(propagationWork, clusterWorkload)
return c.recreateResourceIfNeeded(work, clusterWorkload)
}
func (c *PropagationWorkStatusController) recreateResourceIfNeeded(propagationWork *v1alpha1.PropagationWork, clusterWorkload util.ClusterWorkload) error {
for _, rawManifest := range propagationWork.Spec.Workload.Manifests {
func (c *WorkStatusController) recreateResourceIfNeeded(work *v1alpha1.Work, clusterWorkload util.ClusterWorkload) error {
for _, rawManifest := range work.Spec.Workload.Manifests {
manifest := &unstructured.Unstructured{}
if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil {
return err
@ -207,7 +207,7 @@ func (c *PropagationWorkStatusController) recreateResourceIfNeeded(propagationWo
manifest.GetNamespace() == clusterWorkload.Namespace &&
manifest.GetName() == clusterWorkload.Name {
util.MergeLabel(manifest, util.OwnerLabel, names.GenerateOwnerLabelValue(propagationWork.GetNamespace(), propagationWork.GetName()))
util.MergeLabel(manifest, util.OwnerLabel, names.GenerateOwnerLabelValue(work.GetNamespace(), work.GetName()))
klog.Infof("recreating %s/%s/%s in member cluster %s", clusterWorkload.GVK.Kind, clusterWorkload.Namespace, clusterWorkload.Name, clusterWorkload.Cluster)
return c.ObjectWatcher.Create(clusterWorkload.Cluster, manifest)
@ -216,8 +216,8 @@ func (c *PropagationWorkStatusController) recreateResourceIfNeeded(propagationWo
return nil
}
// reflectStatus grabs cluster object's running status then updates to it's owner object(PropagationWork).
func (c *PropagationWorkStatusController) reflectStatus(work *v1alpha1.PropagationWork, clusterObj *unstructured.Unstructured) error {
// reflectStatus grabs cluster object's running status then updates to it's owner object(Work).
func (c *WorkStatusController) reflectStatus(work *v1alpha1.Work, clusterObj *unstructured.Unstructured) error {
// Stop processing if resource(such as ConfigMap,Secret,ClusterRole, etc.) doesn't contain 'spec.status' fields.
statusMap, exist, err := unstructured.NestedMap(clusterObj.Object, "status")
if err != nil {
@ -249,7 +249,7 @@ func (c *PropagationWorkStatusController) reflectStatus(work *v1alpha1.Propagati
return c.Client.Status().Update(context.TODO(), work)
}
func (c *PropagationWorkStatusController) buildStatusIdentifier(work *v1alpha1.PropagationWork, clusterObj *unstructured.Unstructured) (*v1alpha1.ResourceIdentifier, error) {
func (c *WorkStatusController) buildStatusIdentifier(work *v1alpha1.Work, clusterObj *unstructured.Unstructured) (*v1alpha1.ResourceIdentifier, error) {
ordinal, err := c.getManifestIndex(work.Spec.Workload.Manifests, clusterObj)
if err != nil {
return nil, err
@ -262,11 +262,11 @@ func (c *PropagationWorkStatusController) buildStatusIdentifier(work *v1alpha1.P
identifier := &v1alpha1.ResourceIdentifier{
Ordinal: ordinal,
// TODO(RainbowMango): Consider merge Group and Version to APIVersion from PropagationWork API.
// TODO(RainbowMango): Consider merge Group and Version to APIVersion from Work API.
Group: groupVersion.Group,
Version: groupVersion.Version,
Kind: clusterObj.GetKind(),
// TODO(RainbowMango): Consider remove Resource from PropagationWork API.
// TODO(RainbowMango): Consider remove Resource from Work API.
Resource: "", // we don't need this fields.
Namespace: clusterObj.GetNamespace(),
Name: clusterObj.GetName(),
@ -275,7 +275,7 @@ func (c *PropagationWorkStatusController) buildStatusIdentifier(work *v1alpha1.P
return identifier, nil
}
func (c *PropagationWorkStatusController) buildStatusRawExtension(status map[string]interface{}) (*runtime.RawExtension, error) {
func (c *WorkStatusController) buildStatusRawExtension(status map[string]interface{}) (*runtime.RawExtension, error) {
statusJSON, err := json.Marshal(status)
if err != nil {
klog.Errorf("Failed to marshal status. Error: %v.", statusJSON)
@ -287,13 +287,13 @@ func (c *PropagationWorkStatusController) buildStatusRawExtension(status map[str
}, nil
}
func (c *PropagationWorkStatusController) mergeStatus(statuses []v1alpha1.ManifestStatus, newStatus v1alpha1.ManifestStatus) []v1alpha1.ManifestStatus {
func (c *WorkStatusController) mergeStatus(statuses []v1alpha1.ManifestStatus, newStatus v1alpha1.ManifestStatus) []v1alpha1.ManifestStatus {
// TODO(RainbowMango): update 'statuses' if 'newStatus' already exist.
// For now, we only have at most one manifest in PropagationWork, so just override current 'statuses'.
// For now, we only have at most one manifest in Work, so just override current 'statuses'.
return []v1alpha1.ManifestStatus{newStatus}
}
func (c *PropagationWorkStatusController) getManifestIndex(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (int, error) {
func (c *WorkStatusController) getManifestIndex(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (int, error) {
for index, rawManifest := range manifests {
manifest := &unstructured.Unstructured{}
if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil {
@ -311,7 +311,7 @@ func (c *PropagationWorkStatusController) getManifestIndex(manifests []v1alpha1.
return -1, fmt.Errorf("no such manifest exist")
}
func (c *PropagationWorkStatusController) getRawManifest(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
func (c *WorkStatusController) getRawManifest(manifests []v1alpha1.Manifest, clusterObj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
for _, rawManifest := range manifests {
manifest := &unstructured.Unstructured{}
if err := manifest.UnmarshalJSON(rawManifest.Raw); err != nil {
@ -330,7 +330,7 @@ func (c *PropagationWorkStatusController) getRawManifest(manifests []v1alpha1.Ma
}
// getObjectFromCache gets full object information from cache by key in worker queue.
func (c *PropagationWorkStatusController) getObjectFromCache(key string) (*unstructured.Unstructured, error) {
func (c *WorkStatusController) getObjectFromCache(key string) (*unstructured.Unstructured, error) {
clusterWorkload, err := util.SplitMetaKey(key)
if err != nil {
klog.Errorf("Couldn't get key for %s. Error: %v.", key, err)
@ -361,7 +361,7 @@ func (c *PropagationWorkStatusController) getObjectFromCache(key string) (*unstr
// registerInformersAndStart builds informer manager for cluster if it doesn't exist, then constructs informers for gvr
// and start it.
func (c *PropagationWorkStatusController) registerInformersAndStart(work *v1alpha1.PropagationWork) error {
func (c *WorkStatusController) registerInformersAndStart(work *v1alpha1.Work) error {
clusterName, err := names.GetClusterName(work.GetNamespace())
if err != nil {
klog.Errorf("Failed to get member cluster name by %s. Error: %v.", work.GetNamespace(), err)
@ -373,7 +373,7 @@ func (c *PropagationWorkStatusController) registerInformersAndStart(work *v1alph
return err
}
gvrTargets, err := c.getGVRsFromPropagationWork(work)
gvrTargets, err := c.getGVRsFromWork(work)
if err != nil {
return err
}
@ -397,8 +397,8 @@ func (c *PropagationWorkStatusController) registerInformersAndStart(work *v1alph
return nil
}
// getGVRsFromPropagationWork traverses the manifests in propagationWork to find groupVersionResource list.
func (c *PropagationWorkStatusController) getGVRsFromPropagationWork(work *v1alpha1.PropagationWork) (map[schema.GroupVersionResource]bool, error) {
// getGVRsFromWork traverses the manifests in work to find groupVersionResource list.
func (c *WorkStatusController) getGVRsFromWork(work *v1alpha1.Work) (map[schema.GroupVersionResource]bool, error) {
gvrTargets := map[schema.GroupVersionResource]bool{}
for _, manifest := range work.Spec.Workload.Manifests {
workload := &unstructured.Unstructured{}
@ -419,7 +419,7 @@ func (c *PropagationWorkStatusController) getGVRsFromPropagationWork(work *v1alp
// getSingleClusterManager gets singleClusterInformerManager with clusterName.
// If manager is not exist, create it, otherwise gets it from map.
func (c *PropagationWorkStatusController) getSingleClusterManager(clusterName string) (informermanager.SingleClusterInformerManager, error) {
func (c *WorkStatusController) getSingleClusterManager(clusterName string) (informermanager.SingleClusterInformerManager, error) {
// TODO(chenxianpao): If cluster A is removed, then a new cluster that name also is A joins karmada,
// the cache in informer manager should be updated.
singleClusterInformerManager := c.InformerManager.GetSingleClusterManager(clusterName)
@ -435,6 +435,6 @@ func (c *PropagationWorkStatusController) getSingleClusterManager(clusterName st
}
// SetupWithManager creates a controller and register to controller manager.
func (c *PropagationWorkStatusController) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.PropagationWork{}).Complete(c)
func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).For(&v1alpha1.Work{}).Complete(c)
}

View File

@ -24,8 +24,8 @@ func (c *FakePolicyV1alpha1) PropagationPolicies(namespace string) v1alpha1.Prop
return &FakePropagationPolicies{c, namespace}
}
func (c *FakePolicyV1alpha1) PropagationWorks(namespace string) v1alpha1.PropagationWorkInterface {
return &FakePropagationWorks{c, namespace}
func (c *FakePolicyV1alpha1) Works(namespace string) v1alpha1.WorkInterface {
return &FakeWorks{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate

View File

@ -1,126 +0,0 @@
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakePropagationWorks implements PropagationWorkInterface
type FakePropagationWorks struct {
Fake *FakePolicyV1alpha1
ns string
}
var propagationworksResource = schema.GroupVersionResource{Group: "policy.karmada.io", Version: "v1alpha1", Resource: "propagationworks"}
var propagationworksKind = schema.GroupVersionKind{Group: "policy.karmada.io", Version: "v1alpha1", Kind: "PropagationWork"}
// Get takes name of the propagationWork, and returns the corresponding propagationWork object, and an error if there is any.
func (c *FakePropagationWorks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PropagationWork, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(propagationworksResource, c.ns, name), &v1alpha1.PropagationWork{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PropagationWork), err
}
// List takes label and field selectors, and returns the list of PropagationWorks that match those selectors.
func (c *FakePropagationWorks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PropagationWorkList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(propagationworksResource, propagationworksKind, c.ns, opts), &v1alpha1.PropagationWorkList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.PropagationWorkList{ListMeta: obj.(*v1alpha1.PropagationWorkList).ListMeta}
for _, item := range obj.(*v1alpha1.PropagationWorkList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested propagationWorks.
func (c *FakePropagationWorks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(propagationworksResource, c.ns, opts))
}
// Create takes the representation of a propagationWork and creates it. Returns the server's representation of the propagationWork, and an error, if there is any.
func (c *FakePropagationWorks) Create(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.CreateOptions) (result *v1alpha1.PropagationWork, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(propagationworksResource, c.ns, propagationWork), &v1alpha1.PropagationWork{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PropagationWork), err
}
// Update takes the representation of a propagationWork and updates it. Returns the server's representation of the propagationWork, and an error, if there is any.
func (c *FakePropagationWorks) Update(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (result *v1alpha1.PropagationWork, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(propagationworksResource, c.ns, propagationWork), &v1alpha1.PropagationWork{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PropagationWork), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePropagationWorks) UpdateStatus(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (*v1alpha1.PropagationWork, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(propagationworksResource, "status", c.ns, propagationWork), &v1alpha1.PropagationWork{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PropagationWork), err
}
// Delete takes name of the propagationWork and deletes it. Returns an error if one occurs.
func (c *FakePropagationWorks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(propagationworksResource, c.ns, name), &v1alpha1.PropagationWork{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePropagationWorks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(propagationworksResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.PropagationWorkList{})
return err
}
// Patch applies the patch and returns the patched propagationWork.
func (c *FakePropagationWorks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PropagationWork, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(propagationworksResource, c.ns, name, pt, data, subresources...), &v1alpha1.PropagationWork{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PropagationWork), err
}

View File

@ -0,0 +1,126 @@
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeWorks implements WorkInterface
type FakeWorks struct {
Fake *FakePolicyV1alpha1
ns string
}
var worksResource = schema.GroupVersionResource{Group: "policy.karmada.io", Version: "v1alpha1", Resource: "works"}
var worksKind = schema.GroupVersionKind{Group: "policy.karmada.io", Version: "v1alpha1", Kind: "Work"}
// Get takes name of the work, and returns the corresponding work object, and an error if there is any.
func (c *FakeWorks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Work, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(worksResource, c.ns, name), &v1alpha1.Work{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Work), err
}
// List takes label and field selectors, and returns the list of Works that match those selectors.
func (c *FakeWorks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(worksResource, worksKind, c.ns, opts), &v1alpha1.WorkList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.WorkList{ListMeta: obj.(*v1alpha1.WorkList).ListMeta}
for _, item := range obj.(*v1alpha1.WorkList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested works.
func (c *FakeWorks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(worksResource, c.ns, opts))
}
// Create takes the representation of a work and creates it. Returns the server's representation of the work, and an error, if there is any.
func (c *FakeWorks) Create(ctx context.Context, work *v1alpha1.Work, opts v1.CreateOptions) (result *v1alpha1.Work, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(worksResource, c.ns, work), &v1alpha1.Work{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Work), err
}
// Update takes the representation of a work and updates it. Returns the server's representation of the work, and an error, if there is any.
func (c *FakeWorks) Update(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (result *v1alpha1.Work, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(worksResource, c.ns, work), &v1alpha1.Work{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Work), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeWorks) UpdateStatus(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (*v1alpha1.Work, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(worksResource, "status", c.ns, work), &v1alpha1.Work{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Work), err
}
// Delete takes name of the work and deletes it. Returns an error if one occurs.
func (c *FakeWorks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(worksResource, c.ns, name), &v1alpha1.Work{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeWorks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(worksResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.WorkList{})
return err
}
// Patch applies the patch and returns the patched work.
func (c *FakeWorks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Work, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(worksResource, c.ns, name, pt, data, subresources...), &v1alpha1.Work{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Work), err
}

View File

@ -8,4 +8,4 @@ type PropagationBindingExpansion interface{}
type PropagationPolicyExpansion interface{}
type PropagationWorkExpansion interface{}
type WorkExpansion interface{}

View File

@ -13,7 +13,7 @@ type PolicyV1alpha1Interface interface {
OverridePoliciesGetter
PropagationBindingsGetter
PropagationPoliciesGetter
PropagationWorksGetter
WorksGetter
}
// PolicyV1alpha1Client is used to interact with features provided by the policy.karmada.io group.
@ -33,8 +33,8 @@ func (c *PolicyV1alpha1Client) PropagationPolicies(namespace string) Propagation
return newPropagationPolicies(c, namespace)
}
func (c *PolicyV1alpha1Client) PropagationWorks(namespace string) PropagationWorkInterface {
return newPropagationWorks(c, namespace)
func (c *PolicyV1alpha1Client) Works(namespace string) WorkInterface {
return newWorks(c, namespace)
}
// NewForConfig creates a new PolicyV1alpha1Client for the given config.

View File

@ -1,179 +0,0 @@
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
scheme "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// PropagationWorksGetter has a method to return a PropagationWorkInterface.
// A group's client should implement this interface.
type PropagationWorksGetter interface {
PropagationWorks(namespace string) PropagationWorkInterface
}
// PropagationWorkInterface has methods to work with PropagationWork resources.
type PropagationWorkInterface interface {
Create(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.CreateOptions) (*v1alpha1.PropagationWork, error)
Update(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (*v1alpha1.PropagationWork, error)
UpdateStatus(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (*v1alpha1.PropagationWork, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PropagationWork, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PropagationWorkList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PropagationWork, err error)
PropagationWorkExpansion
}
// propagationWorks implements PropagationWorkInterface
type propagationWorks struct {
client rest.Interface
ns string
}
// newPropagationWorks returns a PropagationWorks
func newPropagationWorks(c *PolicyV1alpha1Client, namespace string) *propagationWorks {
return &propagationWorks{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the propagationWork, and returns the corresponding propagationWork object, and an error if there is any.
func (c *propagationWorks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PropagationWork, err error) {
result = &v1alpha1.PropagationWork{}
err = c.client.Get().
Namespace(c.ns).
Resource("propagationworks").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of PropagationWorks that match those selectors.
func (c *propagationWorks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PropagationWorkList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.PropagationWorkList{}
err = c.client.Get().
Namespace(c.ns).
Resource("propagationworks").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested propagationWorks.
func (c *propagationWorks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("propagationworks").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a propagationWork and creates it. Returns the server's representation of the propagationWork, and an error, if there is any.
func (c *propagationWorks) Create(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.CreateOptions) (result *v1alpha1.PropagationWork, err error) {
result = &v1alpha1.PropagationWork{}
err = c.client.Post().
Namespace(c.ns).
Resource("propagationworks").
VersionedParams(&opts, scheme.ParameterCodec).
Body(propagationWork).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a propagationWork and updates it. Returns the server's representation of the propagationWork, and an error, if there is any.
func (c *propagationWorks) Update(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (result *v1alpha1.PropagationWork, err error) {
result = &v1alpha1.PropagationWork{}
err = c.client.Put().
Namespace(c.ns).
Resource("propagationworks").
Name(propagationWork.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(propagationWork).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *propagationWorks) UpdateStatus(ctx context.Context, propagationWork *v1alpha1.PropagationWork, opts v1.UpdateOptions) (result *v1alpha1.PropagationWork, err error) {
result = &v1alpha1.PropagationWork{}
err = c.client.Put().
Namespace(c.ns).
Resource("propagationworks").
Name(propagationWork.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(propagationWork).
Do(ctx).
Into(result)
return
}
// Delete takes name of the propagationWork and deletes it. Returns an error if one occurs.
func (c *propagationWorks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("propagationworks").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *propagationWorks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("propagationworks").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched propagationWork.
func (c *propagationWorks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PropagationWork, err error) {
result = &v1alpha1.PropagationWork{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("propagationworks").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
scheme "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// WorksGetter has a method to return a WorkInterface.
// A group's client should implement this interface.
type WorksGetter interface {
Works(namespace string) WorkInterface
}
// WorkInterface has methods to work with Work resources.
type WorkInterface interface {
Create(ctx context.Context, work *v1alpha1.Work, opts v1.CreateOptions) (*v1alpha1.Work, error)
Update(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (*v1alpha1.Work, error)
UpdateStatus(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (*v1alpha1.Work, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Work, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Work, err error)
WorkExpansion
}
// works implements WorkInterface
type works struct {
client rest.Interface
ns string
}
// newWorks returns a Works
func newWorks(c *PolicyV1alpha1Client, namespace string) *works {
return &works{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the work, and returns the corresponding work object, and an error if there is any.
func (c *works) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Work, err error) {
result = &v1alpha1.Work{}
err = c.client.Get().
Namespace(c.ns).
Resource("works").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of Works that match those selectors.
func (c *works) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.WorkList{}
err = c.client.Get().
Namespace(c.ns).
Resource("works").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested works.
func (c *works) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("works").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a work and creates it. Returns the server's representation of the work, and an error, if there is any.
func (c *works) Create(ctx context.Context, work *v1alpha1.Work, opts v1.CreateOptions) (result *v1alpha1.Work, err error) {
result = &v1alpha1.Work{}
err = c.client.Post().
Namespace(c.ns).
Resource("works").
VersionedParams(&opts, scheme.ParameterCodec).
Body(work).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a work and updates it. Returns the server's representation of the work, and an error, if there is any.
func (c *works) Update(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (result *v1alpha1.Work, err error) {
result = &v1alpha1.Work{}
err = c.client.Put().
Namespace(c.ns).
Resource("works").
Name(work.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(work).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *works) UpdateStatus(ctx context.Context, work *v1alpha1.Work, opts v1.UpdateOptions) (result *v1alpha1.Work, err error) {
result = &v1alpha1.Work{}
err = c.client.Put().
Namespace(c.ns).
Resource("works").
Name(work.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(work).
Do(ctx).
Into(result)
return
}
// Delete takes name of the work and deletes it. Returns an error if one occurs.
func (c *works) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("works").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *works) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("works").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched work.
func (c *works) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Work, err error) {
result = &v1alpha1.Work{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("works").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -48,8 +48,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().PropagationBindings().Informer()}, nil
case policyv1alpha1.SchemeGroupVersion.WithResource("propagationpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().PropagationPolicies().Informer()}, nil
case policyv1alpha1.SchemeGroupVersion.WithResource("propagationworks"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().PropagationWorks().Informer()}, nil
case policyv1alpha1.SchemeGroupVersion.WithResource("works"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().Works().Informer()}, nil
}

View File

@ -14,8 +14,8 @@ type Interface interface {
PropagationBindings() PropagationBindingInformer
// PropagationPolicies returns a PropagationPolicyInformer.
PropagationPolicies() PropagationPolicyInformer
// PropagationWorks returns a PropagationWorkInformer.
PropagationWorks() PropagationWorkInformer
// Works returns a WorkInformer.
Works() WorkInformer
}
type version struct {
@ -44,7 +44,7 @@ func (v *version) PropagationPolicies() PropagationPolicyInformer {
return &propagationPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PropagationWorks returns a PropagationWorkInformer.
func (v *version) PropagationWorks() PropagationWorkInformer {
return &propagationWorkInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
// Works returns a WorkInformer.
func (v *version) Works() WorkInformer {
return &workInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

View File

@ -1,74 +0,0 @@
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
versioned "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
internalinterfaces "github.com/karmada-io/karmada/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PropagationWorkInformer provides access to a shared informer and lister for
// PropagationWorks.
type PropagationWorkInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.PropagationWorkLister
}
type propagationWorkInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPropagationWorkInformer constructs a new informer for PropagationWork type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPropagationWorkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPropagationWorkInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPropagationWorkInformer constructs a new informer for PropagationWork type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPropagationWorkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().PropagationWorks(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().PropagationWorks(namespace).Watch(context.TODO(), options)
},
},
&policyv1alpha1.PropagationWork{},
resyncPeriod,
indexers,
)
}
func (f *propagationWorkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPropagationWorkInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *propagationWorkInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&policyv1alpha1.PropagationWork{}, f.defaultInformer)
}
func (f *propagationWorkInformer) Lister() v1alpha1.PropagationWorkLister {
return v1alpha1.NewPropagationWorkLister(f.Informer().GetIndexer())
}

View File

@ -0,0 +1,74 @@
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
versioned "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
internalinterfaces "github.com/karmada-io/karmada/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// WorkInformer provides access to a shared informer and lister for
// Works.
type WorkInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.WorkLister
}
type workInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewWorkInformer constructs a new informer for Work type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewWorkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredWorkInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredWorkInformer constructs a new informer for Work type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredWorkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().Works(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().Works(namespace).Watch(context.TODO(), options)
},
},
&policyv1alpha1.Work{},
resyncPeriod,
indexers,
)
}
func (f *workInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredWorkInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *workInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&policyv1alpha1.Work{}, f.defaultInformer)
}
func (f *workInformer) Lister() v1alpha1.WorkLister {
return v1alpha1.NewWorkLister(f.Informer().GetIndexer())
}

View File

@ -26,10 +26,10 @@ type PropagationPolicyListerExpansion interface{}
// PropagationPolicyNamespaceLister.
type PropagationPolicyNamespaceListerExpansion interface{}
// PropagationWorkListerExpansion allows custom methods to be added to
// PropagationWorkLister.
type PropagationWorkListerExpansion interface{}
// WorkListerExpansion allows custom methods to be added to
// WorkLister.
type WorkListerExpansion interface{}
// PropagationWorkNamespaceListerExpansion allows custom methods to be added to
// PropagationWorkNamespaceLister.
type PropagationWorkNamespaceListerExpansion interface{}
// WorkNamespaceListerExpansion allows custom methods to be added to
// WorkNamespaceLister.
type WorkNamespaceListerExpansion interface{}

View File

@ -1,83 +0,0 @@
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PropagationWorkLister helps list PropagationWorks.
// All objects returned here must be treated as read-only.
type PropagationWorkLister interface {
// List lists all PropagationWorks in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.PropagationWork, err error)
// PropagationWorks returns an object that can list and get PropagationWorks.
PropagationWorks(namespace string) PropagationWorkNamespaceLister
PropagationWorkListerExpansion
}
// propagationWorkLister implements the PropagationWorkLister interface.
type propagationWorkLister struct {
indexer cache.Indexer
}
// NewPropagationWorkLister returns a new PropagationWorkLister.
func NewPropagationWorkLister(indexer cache.Indexer) PropagationWorkLister {
return &propagationWorkLister{indexer: indexer}
}
// List lists all PropagationWorks in the indexer.
func (s *propagationWorkLister) List(selector labels.Selector) (ret []*v1alpha1.PropagationWork, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.PropagationWork))
})
return ret, err
}
// PropagationWorks returns an object that can list and get PropagationWorks.
func (s *propagationWorkLister) PropagationWorks(namespace string) PropagationWorkNamespaceLister {
return propagationWorkNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PropagationWorkNamespaceLister helps list and get PropagationWorks.
// All objects returned here must be treated as read-only.
type PropagationWorkNamespaceLister interface {
// List lists all PropagationWorks in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.PropagationWork, err error)
// Get retrieves the PropagationWork from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.PropagationWork, error)
PropagationWorkNamespaceListerExpansion
}
// propagationWorkNamespaceLister implements the PropagationWorkNamespaceLister
// interface.
type propagationWorkNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PropagationWorks in the indexer for a given namespace.
func (s propagationWorkNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PropagationWork, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.PropagationWork))
})
return ret, err
}
// Get retrieves the PropagationWork from the indexer for a given namespace and name.
func (s propagationWorkNamespaceLister) Get(name string) (*v1alpha1.PropagationWork, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("propagationwork"), name)
}
return obj.(*v1alpha1.PropagationWork), nil
}

View File

@ -0,0 +1,83 @@
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// WorkLister helps list Works.
// All objects returned here must be treated as read-only.
type WorkLister interface {
// List lists all Works in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.Work, err error)
// Works returns an object that can list and get Works.
Works(namespace string) WorkNamespaceLister
WorkListerExpansion
}
// workLister implements the WorkLister interface.
type workLister struct {
indexer cache.Indexer
}
// NewWorkLister returns a new WorkLister.
func NewWorkLister(indexer cache.Indexer) WorkLister {
return &workLister{indexer: indexer}
}
// List lists all Works in the indexer.
func (s *workLister) List(selector labels.Selector) (ret []*v1alpha1.Work, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.Work))
})
return ret, err
}
// Works returns an object that can list and get Works.
func (s *workLister) Works(namespace string) WorkNamespaceLister {
return workNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// WorkNamespaceLister helps list and get Works.
// All objects returned here must be treated as read-only.
type WorkNamespaceLister interface {
// List lists all Works in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.Work, err error)
// Get retrieves the Work from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.Work, error)
WorkNamespaceListerExpansion
}
// workNamespaceLister implements the WorkNamespaceLister
// interface.
type workNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all Works in the indexer for a given namespace.
func (s workNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Work, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.Work))
})
return ret, err
}
// Get retrieves the Work from the indexer for a given namespace and name.
func (s workNamespaceLister) Get(name string) (*v1alpha1.Work, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("work"), name)
}
return obj.(*v1alpha1.Work), nil
}

View File

@ -120,7 +120,7 @@ func RunUnjoin(cmdOut io.Writer, karmadaConfig KarmadaConfig, opts CommandUnjoin
controlPlaneKubeClient := kubeclient.NewForConfigOrDie(controlPlaneRestConfig)
// todo: taint cluster object instead of deleting execution space.
// Once the cluster is tainted, eviction controller will delete all propagationwork in the execution space of the cluster.
// Once the cluster is tainted, eviction controller will delete all Work in the execution space of the cluster.
executionSpaceName, err := names.GenerateExecutionSpaceName(opts.ClusterName)
if err != nil {
return err

View File

@ -21,11 +21,11 @@ func GetBindingClusterNames(binding *v1alpha1.PropagationBinding) []string {
return clusterNames
}
// CreateOrUpdatePropagationWork creates or updates propagationWork by controllerutil.CreateOrUpdate
func CreateOrUpdatePropagationWork(client client.Client, objectMeta metav1.ObjectMeta, rawExtension []byte) error {
propagationWork := &v1alpha1.PropagationWork{
// CreateOrUpdateWork creates a Work object if not exist, or updates if it already exist.
func CreateOrUpdateWork(client client.Client, objectMeta metav1.ObjectMeta, rawExtension []byte) error {
work := &v1alpha1.Work{
ObjectMeta: objectMeta,
Spec: v1alpha1.PropagationWorkSpec{
Spec: v1alpha1.WorkSpec{
Workload: v1alpha1.WorkloadTemplate{
Manifests: []v1alpha1.Manifest{
{
@ -38,22 +38,22 @@ func CreateOrUpdatePropagationWork(client client.Client, objectMeta metav1.Objec
},
}
runtimeObject := propagationWork.DeepCopy()
runtimeObject := work.DeepCopy()
operationResult, err := controllerutil.CreateOrUpdate(context.TODO(), client, runtimeObject, func() error {
runtimeObject.Spec = propagationWork.Spec
runtimeObject.Spec = work.Spec
return nil
})
if err != nil {
klog.Errorf("Failed to create/update propagationWork %s/%s. Error: %v", propagationWork.GetNamespace(), propagationWork.GetName(), err)
klog.Errorf("Failed to create/update work %s/%s. Error: %v", work.GetNamespace(), work.GetName(), err)
return err
}
if operationResult == controllerutil.OperationResultCreated {
klog.Infof("Create propagationWork %s/%s successfully.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.Infof("Create work %s/%s successfully.", work.GetNamespace(), work.GetName())
} else if operationResult == controllerutil.OperationResultUpdated {
klog.Infof("Update propagationWork %s/%s successfully.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.Infof("Update work %s/%s successfully.", work.GetNamespace(), work.GetName())
} else {
klog.V(2).Infof("PropagationWork %s/%s is up to date.", propagationWork.GetNamespace(), propagationWork.GetName())
klog.V(2).Infof("Work %s/%s is up to date.", work.GetNamespace(), work.GetName())
}
return nil
}

View File

@ -7,10 +7,10 @@ const (
// OwnerLabel will set in karmada CRDs, indicates that who created it.
// We can use labelSelector to find who created it quickly.
// example1: set it in propagationBinding, the label value is propagationPolicy.
// example2: set it in propagationWork, the label value is propagationBinding.
// example3: set it in propagationWork, the label value is HPA.
// example2: set it in Work, the label value is propagationBinding.
// example3: set it in Work, the label value is HPA.
OwnerLabel = "karmada.io/created-by"
// OverrideClaimKey will set in propagationwork resource, indicates that
// OverrideClaimKey will set in Work resource, indicates that
// the resource is overridden by override policies
OverrideClaimKey = "karmada.io/overridden-by"
@ -27,11 +27,11 @@ const (
// Define finalizers used by karmada system.
const (
// ClusterControllerFinalizer is added to Cluster to ensure PropagationWork as well as the
// ClusterControllerFinalizer is added to Cluster to ensure Work as well as the
// execution space (namespace) is deleted before itself is deleted.
ClusterControllerFinalizer = "karmada.io/cluster-controller"
// ExecutionControllerFinalizer is added to PropagationWork to ensure manifests propagated to member cluster
// is deleted before PropagationWork itself is deleted.
// ExecutionControllerFinalizer is added to Work to ensure manifests propagated to member cluster
// is deleted before Work itself is deleted.
ExecutionControllerFinalizer = "karmada.io/execution-controller"
)