Merge pull request #2935 from jwcesign/code-review-fix-v2
Delete the redundant logic in func GenerateExecutionSpaceName
This commit is contained in:
commit
a20dd6e630
|
@ -170,10 +170,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
return fmt.Errorf("failed to register with karmada control plane: %w", err)
|
||||
}
|
||||
|
||||
executionSpace, err := names.GenerateExecutionSpaceName(opts.ClusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate execution space name for member cluster %s, err is %v", opts.ClusterName, err)
|
||||
}
|
||||
executionSpace := names.GenerateExecutionSpaceName(opts.ClusterName)
|
||||
|
||||
controllerManager, err := controllerruntime.NewManager(controlPlaneRestConfig, controllerruntime.Options{
|
||||
Scheme: gclient.NewSchema(),
|
||||
|
|
|
@ -87,11 +87,7 @@ func ensureWork(
|
|||
targetCluster := targetClusters[i]
|
||||
clonedWorkload := workload.DeepCopy()
|
||||
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(targetCluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", targetCluster.Name, err)
|
||||
return err
|
||||
}
|
||||
workNamespace := names.GenerateExecutionSpaceName(targetCluster.Name)
|
||||
|
||||
if hasScheduledReplica {
|
||||
if resourceInterpreter.HookEnabled(clonedWorkload.GroupVersionKind(), configv1alpha1.InterpreterOperationReviseReplica) {
|
||||
|
|
|
@ -299,11 +299,7 @@ func (c *Controller) isTargetClusterRemoved(ctx context.Context, cluster *cluste
|
|||
|
||||
// removeExecutionSpace deletes the given execution space
|
||||
func (c *Controller) removeExecutionSpace(cluster *clusterv1alpha1.Cluster) error {
|
||||
executionSpaceName, err := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
executionSpaceName := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
|
||||
executionSpaceObj := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -320,14 +316,10 @@ func (c *Controller) removeExecutionSpace(cluster *clusterv1alpha1.Cluster) erro
|
|||
|
||||
// ExecutionSpaceExistForCluster determine whether the execution space exists in the cluster
|
||||
func (c *Controller) ExecutionSpaceExistForCluster(clusterName string) (bool, error) {
|
||||
executionSpaceName, err := names.GenerateExecutionSpaceName(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", clusterName, err)
|
||||
return false, err
|
||||
}
|
||||
executionSpaceName := names.GenerateExecutionSpaceName(clusterName)
|
||||
|
||||
executionSpaceObj := &corev1.Namespace{}
|
||||
err = c.Client.Get(context.TODO(), types.NamespacedName{Name: executionSpaceName}, executionSpaceObj)
|
||||
err := c.Client.Get(context.TODO(), types.NamespacedName{Name: executionSpaceName}, executionSpaceObj)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Execution space(%s) no longer exists", executionSpaceName)
|
||||
return false, nil
|
||||
|
@ -369,15 +361,11 @@ func (c *Controller) ensureFinalizer(cluster *clusterv1alpha1.Cluster) (controll
|
|||
|
||||
// createExecutionSpace creates member cluster execution space when member cluster joined
|
||||
func (c *Controller) createExecutionSpace(cluster *clusterv1alpha1.Cluster) error {
|
||||
executionSpaceName, err := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
executionSpaceName := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
|
||||
// create member cluster execution space when member cluster joined
|
||||
executionSpaceObj := &corev1.Namespace{}
|
||||
err = c.Client.Get(context.TODO(), types.NamespacedName{Name: executionSpaceName}, executionSpaceObj)
|
||||
err := c.Client.Get(context.TODO(), types.NamespacedName{Name: executionSpaceName}, executionSpaceObj)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("Failed to get namespace(%s): %v", executionSpaceName, err)
|
||||
|
@ -390,7 +378,7 @@ func (c *Controller) createExecutionSpace(cluster *clusterv1alpha1.Cluster) erro
|
|||
Name: executionSpaceName,
|
||||
},
|
||||
}
|
||||
err := c.Client.Create(context.TODO(), executionSpace)
|
||||
err = c.Client.Create(context.TODO(), executionSpace)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create execution space for cluster(%s): %v", cluster.Name, err)
|
||||
return err
|
||||
|
|
|
@ -219,7 +219,7 @@ func (c *Controller) syncToClusters(clusterName string, work *workv1alpha1.Work)
|
|||
|
||||
if len(errs) > 0 {
|
||||
total := len(work.Spec.Workload.Manifests)
|
||||
message := fmt.Sprintf("Failed to apply all manifests (%v/%v): %v", syncSucceedNum, total, errors.NewAggregate(errs).Error())
|
||||
message := fmt.Sprintf("Failed to apply all manifests (%d/%d): %s", syncSucceedNum, total, errors.NewAggregate(errs).Error())
|
||||
err := c.updateAppliedCondition(work, metav1.ConditionFalse, "AppliedFailed", message)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update applied status for given work %v, namespace is %v, err is %v", work.Name, work.Namespace, err)
|
||||
|
|
|
@ -144,12 +144,7 @@ func (c *SyncController) cleanUpWorks(namespace, name string) error {
|
|||
func (c *SyncController) buildWorks(quota *policyv1alpha1.FederatedResourceQuota, clusters []clusterv1alpha1.Cluster) error {
|
||||
var errs []error
|
||||
for _, cluster := range clusters {
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for cluster(%s), error: %v", cluster.Name, err)
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
workNamespace := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
workName := names.GenerateWorkName("ResourceQuota", quota.Name, quota.Namespace)
|
||||
|
||||
resourceQuota := &corev1.ResourceQuota{}
|
||||
|
|
|
@ -87,11 +87,7 @@ func (c *HorizontalPodAutoscalerController) buildWorks(hpa *autoscalingv1.Horizo
|
|||
return nil
|
||||
}
|
||||
for _, clusterName := range clusters {
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to ensure Work for cluster: %s. Error: %v.", clusterName, err)
|
||||
return err
|
||||
}
|
||||
workNamespace := names.GenerateExecutionSpaceName(clusterName)
|
||||
workName := names.GenerateWorkName(hpaObj.GetKind(), hpaObj.GetName(), hpa.GetNamespace())
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: workName,
|
||||
|
|
|
@ -383,10 +383,7 @@ func (c *ServiceExportController) reportEndpointSliceWithEndpointSliceCreateOrUp
|
|||
|
||||
// reportEndpointSlice report EndPointSlice objects to control-plane.
|
||||
func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructured, clusterName string) error {
|
||||
executionSpace, err := names.GenerateExecutionSpaceName(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
executionSpace := names.GenerateExecutionSpaceName(clusterName)
|
||||
|
||||
workMeta := metav1.ObjectMeta{
|
||||
Name: names.GenerateWorkName(endpointSlice.GetKind(), endpointSlice.GetName(), endpointSlice.GetNamespace()),
|
||||
|
@ -399,7 +396,7 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
|||
},
|
||||
}
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c, workMeta, endpointSlice); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c, workMeta, endpointSlice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -407,13 +404,10 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
|||
}
|
||||
|
||||
func cleanupWorkWithServiceExportDelete(c client.Client, serviceExportKey keys.FederatedKey) error {
|
||||
executionSpace, err := names.GenerateExecutionSpaceName(serviceExportKey.Cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
executionSpace := names.GenerateExecutionSpaceName(serviceExportKey.Cluster)
|
||||
|
||||
workList := &workv1alpha1.WorkList{}
|
||||
if err = c.List(context.TODO(), workList, &client.ListOptions{
|
||||
if err := c.List(context.TODO(), workList, &client.ListOptions{
|
||||
Namespace: executionSpace,
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
util.ServiceNamespaceLabel: serviceExportKey.Namespace,
|
||||
|
@ -427,7 +421,7 @@ func cleanupWorkWithServiceExportDelete(c client.Client, serviceExportKey keys.F
|
|||
|
||||
var errs []error
|
||||
for index, work := range workList.Items {
|
||||
if err = c.Delete(context.TODO(), &workList.Items[index]); err != nil {
|
||||
if err := c.Delete(context.TODO(), &workList.Items[index]); err != nil {
|
||||
klog.Errorf("Failed to delete work(%s/%s), Error: %v", work.Namespace, work.Name, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
@ -436,17 +430,14 @@ func cleanupWorkWithServiceExportDelete(c client.Client, serviceExportKey keys.F
|
|||
}
|
||||
|
||||
func cleanupWorkWithEndpointSliceDelete(c client.Client, endpointSliceKey keys.FederatedKey) error {
|
||||
executionSpace, err := names.GenerateExecutionSpaceName(endpointSliceKey.Cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
executionSpace := names.GenerateExecutionSpaceName(endpointSliceKey.Cluster)
|
||||
|
||||
workNamespaceKey := types.NamespacedName{
|
||||
Namespace: executionSpace,
|
||||
Name: names.GenerateWorkName(endpointSliceKey.Kind, endpointSliceKey.Name, endpointSliceKey.Namespace),
|
||||
}
|
||||
work := &workv1alpha1.Work{}
|
||||
if err = c.Get(context.TODO(), workNamespaceKey, work); err != nil {
|
||||
if err := c.Get(context.TODO(), workNamespaceKey, work); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
@ -455,7 +446,7 @@ func cleanupWorkWithEndpointSliceDelete(c client.Client, endpointSliceKey keys.F
|
|||
return err
|
||||
}
|
||||
|
||||
if err = c.Delete(context.TODO(), work); err != nil {
|
||||
if err := c.Delete(context.TODO(), work); err != nil {
|
||||
klog.Errorf("Failed to delete work(%s), Error: %v", workNamespaceKey, err)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -129,12 +129,7 @@ func (c *Controller) buildWorks(namespace *corev1.Namespace, clusters []clusterv
|
|||
return
|
||||
}
|
||||
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
|
||||
ch <- fmt.Errorf("sync namespace(%s) to cluster(%s) failed due to: %v", clonedNamespaced.GetName(), cluster.GetName(), err)
|
||||
return
|
||||
}
|
||||
workNamespace := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
|
||||
workName := names.GenerateWorkName(namespaceObj.GetKind(), namespaceObj.GetName(), namespaceObj.GetNamespace())
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
|
|
|
@ -239,10 +239,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
|
|||
}
|
||||
|
||||
func (c *WorkStatusController) handleDeleteEvent(key keys.FederatedKey) error {
|
||||
executionSpace, err := names.GenerateExecutionSpaceName(key.Cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
executionSpace := names.GenerateExecutionSpaceName(key.Cluster)
|
||||
|
||||
// Given the workload might has been deleted from informer cache, so that we can't get work object by it's label,
|
||||
// we have to get work by naming rule as the work's name is generated by the workload's kind, name and namespace.
|
||||
|
|
|
@ -190,11 +190,7 @@ func (c *Controller) buildImpersonationClusterRoleBinding(cluster *clusterv1alph
|
|||
}
|
||||
|
||||
func (c *Controller) buildWorks(cluster *clusterv1alpha1.Cluster, obj *unstructured.Unstructured) error {
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to generate execution space name for member cluster %s, err is %v", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
workNamespace := names.GenerateExecutionSpaceName(cluster.Name)
|
||||
|
||||
clusterRoleBindingWorkName := names.GenerateWorkName(obj.GetKind(), obj.GetName(), obj.GetNamespace())
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
|
@ -209,7 +205,7 @@ func (c *Controller) buildWorks(cluster *clusterv1alpha1.Cluster, obj *unstructu
|
|||
util.MergeLabel(obj, workv1alpha1.WorkNamespaceLabel, workNamespace)
|
||||
util.MergeLabel(obj, workv1alpha1.WorkNameLabel, clusterRoleBindingWorkName)
|
||||
|
||||
if err = helper.CreateOrUpdateWork(c.Client, objectMeta, obj); err != nil {
|
||||
if err := helper.CreateOrUpdateWork(c.Client, objectMeta, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -37,12 +37,8 @@ const derivedServicePrefix = "derived"
|
|||
const estimatorServicePrefix = "karmada-scheduler-estimator"
|
||||
|
||||
// GenerateExecutionSpaceName generates execution space name for the given member cluster
|
||||
func GenerateExecutionSpaceName(clusterName string) (string, error) {
|
||||
if clusterName == "" {
|
||||
return "", fmt.Errorf("the member cluster name is empty")
|
||||
}
|
||||
executionSpace := ExecutionSpacePrefix + clusterName
|
||||
return executionSpace, nil
|
||||
func GenerateExecutionSpaceName(clusterName string) string {
|
||||
return ExecutionSpacePrefix + clusterName
|
||||
}
|
||||
|
||||
// GetClusterName returns member cluster name for the given execution space
|
||||
|
|
|
@ -21,23 +21,13 @@ func TestGenerateExecutionSpaceName(t *testing.T) {
|
|||
wantErr bool
|
||||
}{
|
||||
{name: "normal cluster name",
|
||||
args: args{clusterName: "member-cluster-normal"},
|
||||
want: "karmada-es-member-cluster-normal",
|
||||
wantErr: false,
|
||||
},
|
||||
{name: "empty member cluster name",
|
||||
args: args{clusterName: ""},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
args: args{clusterName: "member-cluster-normal"},
|
||||
want: "karmada-es-member-cluster-normal",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GenerateExecutionSpaceName(tt.args.clusterName)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GenerateExecutionSpaceName() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
got := GenerateExecutionSpaceName(tt.args.clusterName)
|
||||
if got != tt.want {
|
||||
t.Errorf("GenerateExecutionSpaceName() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
|
|
@ -242,7 +242,7 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
serviceName = serviceNamePrefix + rand.String(RandomStrLength)
|
||||
service = helper.NewService(serviceNamespace, serviceName, corev1.ServiceTypeNodePort)
|
||||
workName = names.GenerateWorkName(util.ServiceKind, serviceName, serviceNamespace)
|
||||
esName, _ = names.GenerateExecutionSpaceName(member1)
|
||||
esName = names.GenerateExecutionSpaceName(member1)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
|
|
@ -202,8 +202,7 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
|
|||
framework.UpdateWorkload(clusterDynamicClient, memberWorkload, cluster, "status")
|
||||
|
||||
workName := names.GenerateWorkName(workload.Kind, workload.Name, workload.Namespace)
|
||||
workNamespace, err := names.GenerateExecutionSpaceName(cluster)
|
||||
gomega.Expect(err).Should(gomega.BeNil())
|
||||
workNamespace := names.GenerateExecutionSpaceName(cluster)
|
||||
|
||||
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
|
||||
work, err := karmadaClient.WorkV1alpha1().Works(workNamespace).Get(context.TODO(), workName, metav1.GetOptions{})
|
||||
|
|
Loading…
Reference in New Issue