Cleanup cyclomatic complexity with function EnsureWork
Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
parent
e29e531802
commit
12e2585e82
|
@ -92,7 +92,7 @@ func (c *ResourceBindingController) syncBinding(binding *workv1alpha1.ResourceBi
|
||||||
return controllerruntime.Result{Requeue: true}, err
|
return controllerruntime.Result{Requeue: true}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = helper.EnsureWork(c.Client, workload, clusterNames, binding.Spec.Clusters, c.OverrideManager, binding, apiextensionsv1.NamespaceScoped)
|
err = helper.EnsureWork(c.Client, workload, c.OverrideManager, binding, apiextensionsv1.NamespaceScoped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to transform resourceBinding(%s/%s) to works. Error: %v.",
|
klog.Errorf("Failed to transform resourceBinding(%s/%s) to works. Error: %v.",
|
||||||
binding.GetNamespace(), binding.GetName(), err)
|
binding.GetNamespace(), binding.GetName(), err)
|
||||||
|
|
|
@ -88,7 +88,7 @@ func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha1.Clu
|
||||||
return controllerruntime.Result{Requeue: true}, err
|
return controllerruntime.Result{Requeue: true}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = helper.EnsureWork(c.Client, workload, clusterNames, binding.Spec.Clusters, c.OverrideManager, binding, apiextensionsv1.ClusterScoped)
|
err = helper.EnsureWork(c.Client, workload, c.OverrideManager, binding, apiextensionsv1.ClusterScoped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to transform clusterResourceBinding(%s) to works. Error: %v.", binding.GetName(), err)
|
klog.Errorf("Failed to transform clusterResourceBinding(%s) to works. Error: %v.", binding.GetName(), err)
|
||||||
return controllerruntime.Result{Requeue: true}, err
|
return controllerruntime.Result{Requeue: true}, err
|
||||||
|
|
|
@ -357,13 +357,7 @@ func (c *ServiceExportController) reportEndpointSliceWithEndpointSliceCreateOrUp
|
||||||
|
|
||||||
// reportEndpointSlice report EndPointSlice objects to control-plane.
|
// reportEndpointSlice report EndPointSlice objects to control-plane.
|
||||||
func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructured, clusterName string) error {
|
func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructured, clusterName string) error {
|
||||||
var (
|
executionSpace, err := names.GenerateExecutionSpaceName(clusterName)
|
||||||
executionSpace string
|
|
||||||
endpointSliceWork *workv1alpha1.Work
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
executionSpace, err = names.GenerateExecutionSpaceName(clusterName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -379,11 +373,7 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointSliceWork, err = helper.WarpResourceWithWork(workMeta, endpointSlice)
|
if err = helper.CreateOrUpdateWork(c, workMeta, endpointSlice); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = helper.CreateOrUpdateWork(c, endpointSliceWork); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
@ -17,7 +16,6 @@ import (
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
|
||||||
|
|
||||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||||
|
@ -162,144 +160,128 @@ func FetchWorkload(dynamicClient dynamic.Interface, restMapper meta.RESTMapper,
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureWork ensure Work to be created or updated.
|
// EnsureWork ensure Work to be created or updated.
|
||||||
//nolint:gocyclo
|
func EnsureWork(c client.Client, workload *unstructured.Unstructured, overrideManager overridemanager.OverrideManager, binding metav1.Object, scope apiextensionsv1.ResourceScope) error {
|
||||||
// Note: ignore the cyclomatic complexity issue to get gocyclo on board. Tracked by: https://github.com/karmada-io/karmada/issues/460
|
var targetClusters []workv1alpha1.TargetCluster
|
||||||
func EnsureWork(c client.Client, workload *unstructured.Unstructured, clusterNames []string, scheduleResult []workv1alpha1.TargetCluster,
|
switch scope {
|
||||||
overrideManager overridemanager.OverrideManager, binding metav1.Object, scope apiextensionsv1.ResourceScope) error {
|
case apiextensionsv1.NamespaceScoped:
|
||||||
var desireReplicaInfos map[string]int64
|
bindingObj := binding.(*workv1alpha1.ResourceBinding)
|
||||||
var referenceRSP *v1alpha1.ReplicaSchedulingPolicy
|
targetClusters = bindingObj.Spec.Clusters
|
||||||
var err error
|
case apiextensionsv1.ClusterScoped:
|
||||||
hasScheduledReplica := HasScheduledReplica(scheduleResult)
|
bindingObj := binding.(*workv1alpha1.ClusterResourceBinding)
|
||||||
if hasScheduledReplica {
|
targetClusters = bindingObj.Spec.Clusters
|
||||||
desireReplicaInfos = transScheduleResultToMap(scheduleResult)
|
|
||||||
} else {
|
|
||||||
referenceRSP, desireReplicaInfos, err = calculateReplicasIfNeeded(c, workload, clusterNames)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Failed to get ReplicaSchedulingPolicy for %s/%s/%s, err is: %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var workLabel = make(map[string]string)
|
hasScheduledReplica, referenceRSP, desireReplicaInfos, err := getRSPAndReplicaInfos(c, workload, targetClusters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
for _, clusterName := range clusterNames {
|
for _, targetCluster := range targetClusters {
|
||||||
// apply override policies
|
|
||||||
clonedWorkload := workload.DeepCopy()
|
clonedWorkload := workload.DeepCopy()
|
||||||
cops, ops, err := overrideManager.ApplyOverridePolicies(clonedWorkload, clusterName)
|
cops, ops, err := overrideManager.ApplyOverridePolicies(clonedWorkload, targetCluster.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to apply overrides for %s/%s/%s, err is: %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
|
klog.Errorf("Failed to apply overrides for %s/%s/%s, err is: %v", clonedWorkload.GetKind(), clonedWorkload.GetNamespace(), clonedWorkload.GetName(), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
workName := names.GenerateWorkName(workload.GetKind(), workload.GetName(), workload.GetNamespace())
|
workNamespace, err := names.GenerateExecutionSpaceName(targetCluster.Name)
|
||||||
workNamespace, err := names.GenerateExecutionSpaceName(clusterName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", clusterName, err)
|
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", targetCluster.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
util.MergeLabel(clonedWorkload, util.WorkNamespaceLabel, workNamespace)
|
workLabel := mergeLabel(clonedWorkload, workNamespace, binding, scope)
|
||||||
util.MergeLabel(clonedWorkload, util.WorkNameLabel, workName)
|
|
||||||
|
|
||||||
if scope == apiextensionsv1.NamespaceScoped {
|
|
||||||
util.MergeLabel(clonedWorkload, util.ResourceBindingNamespaceLabel, binding.GetNamespace())
|
|
||||||
util.MergeLabel(clonedWorkload, util.ResourceBindingNameLabel, binding.GetName())
|
|
||||||
workLabel[util.ResourceBindingNamespaceLabel] = binding.GetNamespace()
|
|
||||||
workLabel[util.ResourceBindingNameLabel] = binding.GetName()
|
|
||||||
} else {
|
|
||||||
util.MergeLabel(clonedWorkload, util.ClusterResourceBindingLabel, binding.GetName())
|
|
||||||
workLabel[util.ClusterResourceBindingLabel] = binding.GetName()
|
|
||||||
}
|
|
||||||
|
|
||||||
if clonedWorkload.GetKind() == util.DeploymentKind && (referenceRSP != nil || hasScheduledReplica) {
|
if clonedWorkload.GetKind() == util.DeploymentKind && (referenceRSP != nil || hasScheduledReplica) {
|
||||||
err = applyReplicaSchedulingPolicy(clonedWorkload, desireReplicaInfos[clusterName])
|
err = applyReplicaSchedulingPolicy(clonedWorkload, desireReplicaInfos[targetCluster.Name])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to apply ReplicaSchedulingPolicy for %s/%s/%s in cluster %s, err is: %v",
|
klog.Errorf("failed to apply ReplicaSchedulingPolicy for %s/%s/%s in cluster %s, err is: %v",
|
||||||
clonedWorkload.GetKind(), clonedWorkload.GetNamespace(), clonedWorkload.GetName(), clusterName, err)
|
clonedWorkload.GetKind(), clonedWorkload.GetNamespace(), clonedWorkload.GetName(), targetCluster.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(@XiShanYongYe-Chang): refactor util.CreateOrUpdateWork with pkg/util/helper/work.go
|
annotations, err := recordAppliedOverrides(cops, ops)
|
||||||
workloadJSON, err := clonedWorkload.MarshalJSON()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to marshal workload, kind: %s, namespace: %s, name: %s. Error: %v",
|
klog.Errorf("failed to record appliedOverrides, Error: %v", err)
|
||||||
clonedWorkload.GetKind(), clonedWorkload.GetName(), clonedWorkload.GetNamespace(), err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
work := &workv1alpha1.Work{
|
workMeta := metav1.ObjectMeta{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
Name: names.GenerateWorkName(clonedWorkload.GetKind(), clonedWorkload.GetName(), clonedWorkload.GetNamespace()),
|
||||||
Name: workName,
|
Namespace: workNamespace,
|
||||||
Namespace: workNamespace,
|
Finalizers: []string{util.ExecutionControllerFinalizer},
|
||||||
Finalizers: []string{util.ExecutionControllerFinalizer},
|
Labels: workLabel,
|
||||||
Labels: workLabel,
|
Annotations: annotations,
|
||||||
},
|
|
||||||
Spec: workv1alpha1.WorkSpec{
|
|
||||||
Workload: workv1alpha1.WorkloadTemplate{
|
|
||||||
Manifests: []workv1alpha1.Manifest{
|
|
||||||
{
|
|
||||||
RawExtension: runtime.RawExtension{
|
|
||||||
Raw: workloadJSON,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// set applied override policies if needed.
|
if err = CreateOrUpdateWork(c, workMeta, clonedWorkload); err != nil {
|
||||||
var appliedBytes []byte
|
|
||||||
if cops != nil {
|
|
||||||
appliedBytes, err = cops.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if appliedBytes != nil {
|
|
||||||
if work.Annotations == nil {
|
|
||||||
work.Annotations = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
work.Annotations[util.AppliedClusterOverrides] = string(appliedBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ops != nil {
|
|
||||||
appliedBytes, err = ops.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if appliedBytes != nil {
|
|
||||||
if work.Annotations == nil {
|
|
||||||
work.Annotations = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
work.Annotations[util.AppliedOverrides] = string(appliedBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
runtimeObject := work.DeepCopy()
|
|
||||||
operationResult, err := controllerutil.CreateOrUpdate(context.TODO(), c, runtimeObject, func() error {
|
|
||||||
runtimeObject.Annotations = work.Annotations
|
|
||||||
runtimeObject.Labels = work.Labels
|
|
||||||
runtimeObject.Spec = work.Spec
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("Failed to create/update work %s/%s. Error: %v", work.GetNamespace(), work.GetName(), err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if operationResult == controllerutil.OperationResultCreated {
|
|
||||||
klog.Infof("Create work %s/%s successfully.", work.GetNamespace(), work.GetName())
|
|
||||||
} else if operationResult == controllerutil.OperationResultUpdated {
|
|
||||||
klog.Infof("Update work %s/%s successfully.", work.GetNamespace(), work.GetName())
|
|
||||||
} else {
|
|
||||||
klog.V(2).Infof("Work %s/%s is up to date.", work.GetNamespace(), work.GetName())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRSPAndReplicaInfos(c client.Client, workload *unstructured.Unstructured, targetClusters []workv1alpha1.TargetCluster) (bool, *v1alpha1.ReplicaSchedulingPolicy, map[string]int64, error) {
|
||||||
|
if HasScheduledReplica(targetClusters) {
|
||||||
|
return true, nil, transScheduleResultToMap(targetClusters), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
referenceRSP, desireReplicaInfos, err := calculateReplicasIfNeeded(c, workload, GetBindingClusterNames(targetClusters))
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("Failed to get ReplicaSchedulingPolicy for %s/%s/%s, err is: %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
|
||||||
|
return false, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, referenceRSP, desireReplicaInfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeLabel(workload *unstructured.Unstructured, workNamespace string, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
|
||||||
|
var workLabel = make(map[string]string)
|
||||||
|
util.MergeLabel(workload, util.WorkNamespaceLabel, workNamespace)
|
||||||
|
util.MergeLabel(workload, util.WorkNameLabel, names.GenerateWorkName(workload.GetKind(), workload.GetName(), workload.GetNamespace()))
|
||||||
|
|
||||||
|
if scope == apiextensionsv1.NamespaceScoped {
|
||||||
|
util.MergeLabel(workload, util.ResourceBindingNamespaceLabel, binding.GetNamespace())
|
||||||
|
util.MergeLabel(workload, util.ResourceBindingNameLabel, binding.GetName())
|
||||||
|
workLabel[util.ResourceBindingNamespaceLabel] = binding.GetNamespace()
|
||||||
|
workLabel[util.ResourceBindingNameLabel] = binding.GetName()
|
||||||
|
} else {
|
||||||
|
util.MergeLabel(workload, util.ClusterResourceBindingLabel, binding.GetName())
|
||||||
|
workLabel[util.ClusterResourceBindingLabel] = binding.GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
return workLabel
|
||||||
|
}
|
||||||
|
|
||||||
|
func recordAppliedOverrides(cops *overridemanager.AppliedOverrides, ops *overridemanager.AppliedOverrides) (map[string]string, error) {
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
|
||||||
|
if cops != nil {
|
||||||
|
appliedBytes, err := cops.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if appliedBytes != nil {
|
||||||
|
annotations[util.AppliedClusterOverrides] = string(appliedBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ops != nil {
|
||||||
|
appliedBytes, err := ops.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if appliedBytes != nil {
|
||||||
|
annotations[util.AppliedOverrides] = string(appliedBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return annotations, nil
|
||||||
|
}
|
||||||
|
|
||||||
func transScheduleResultToMap(scheduleResult []workv1alpha1.TargetCluster) map[string]int64 {
|
func transScheduleResultToMap(scheduleResult []workv1alpha1.TargetCluster) map[string]int64 {
|
||||||
var desireReplicaInfos = make(map[string]int64)
|
var desireReplicaInfos = make(map[string]int64, len(scheduleResult))
|
||||||
for _, clusterInfo := range scheduleResult {
|
for _, clusterInfo := range scheduleResult {
|
||||||
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
|
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,16 +13,16 @@ import (
|
||||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WarpResourceWithWork will warp resource into Work.
|
// CreateOrUpdateWork creates a Work object if not exist, or updates if it already exist.
|
||||||
func WarpResourceWithWork(workMeta metav1.ObjectMeta, resource *unstructured.Unstructured) (*workv1alpha1.Work, error) {
|
func CreateOrUpdateWork(client client.Client, workMeta metav1.ObjectMeta, resource *unstructured.Unstructured) error {
|
||||||
workload := resource.DeepCopy()
|
workload := resource.DeepCopy()
|
||||||
workloadJSON, err := workload.MarshalJSON()
|
workloadJSON, err := workload.MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to marshal workload(%s/%s), Error: %v", workload.GetNamespace(), workload.GetName(), err)
|
klog.Errorf("Failed to marshal workload(%s/%s), Error: %v", workload.GetNamespace(), workload.GetName(), err)
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &workv1alpha1.Work{
|
work := &workv1alpha1.Work{
|
||||||
ObjectMeta: workMeta,
|
ObjectMeta: workMeta,
|
||||||
Spec: workv1alpha1.WorkSpec{
|
Spec: workv1alpha1.WorkSpec{
|
||||||
Workload: workv1alpha1.WorkloadTemplate{
|
Workload: workv1alpha1.WorkloadTemplate{
|
||||||
|
@ -35,15 +35,13 @@ func WarpResourceWithWork(workMeta metav1.ObjectMeta, resource *unstructured.Uns
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOrUpdateWork creates a Work object if not exist, or updates if it already exist.
|
|
||||||
func CreateOrUpdateWork(client client.Client, work *workv1alpha1.Work) error {
|
|
||||||
runtimeObject := work.DeepCopy()
|
runtimeObject := work.DeepCopy()
|
||||||
operationResult, err := controllerutil.CreateOrUpdate(context.TODO(), client, runtimeObject, func() error {
|
operationResult, err := controllerutil.CreateOrUpdate(context.TODO(), client, runtimeObject, func() error {
|
||||||
runtimeObject.Spec = work.Spec
|
runtimeObject.Spec = work.Spec
|
||||||
runtimeObject.Labels = work.Labels
|
runtimeObject.Labels = work.Labels
|
||||||
|
runtimeObject.Annotations = work.Annotations
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in New Issue