Merge pull request #4347 from XiShanYongYe-Chang/multiclusterservice-featuregate
Add MultiClusterService FeatureGate
This commit is contained in:
commit
57c4d58ea8
|
@ -32,7 +32,7 @@ spec:
|
|||
- --cluster-status-update-frequency=10s
|
||||
- --bind-address=0.0.0.0
|
||||
- --secure-port=10357
|
||||
- --feature-gates=CustomizedClusterResourceModeling=true
|
||||
- --feature-gates=CustomizedClusterResourceModeling=true,MultiClusterService=true
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
- --secure-port=10357
|
||||
- --failover-eviction-timeout=30s
|
||||
- --controllers=*,hpaReplicasSyncer
|
||||
- --feature-gates=PropagationPolicyPreemption=true
|
||||
- --feature-gates=PropagationPolicyPreemption=true,MultiClusterService=true
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -48,6 +48,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/controllers/mcs"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/multiclusterservice"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/status"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient"
|
||||
"github.com/karmada-io/karmada/pkg/metrics"
|
||||
|
@ -390,6 +391,9 @@ func startServiceExportController(ctx controllerscontext.Context) (bool, error)
|
|||
}
|
||||
|
||||
func startEndpointSliceCollectController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.MultiClusterService) {
|
||||
return false, nil
|
||||
}
|
||||
opts := ctx.Opts
|
||||
endpointSliceCollectController := &multiclusterservice.EndpointSliceCollectController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
|
|
|
@ -464,6 +464,9 @@ func startServiceExportController(ctx controllerscontext.Context) (enabled bool,
|
|||
}
|
||||
|
||||
func startEndpointSliceCollectController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.Failover) {
|
||||
return false, nil
|
||||
}
|
||||
opts := ctx.Opts
|
||||
endpointSliceCollectController := &multiclusterservice.EndpointSliceCollectController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
|
@ -483,6 +486,9 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
|
|||
}
|
||||
|
||||
func startEndpointSliceDispatchController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.Failover) {
|
||||
return false, nil
|
||||
}
|
||||
endpointSliceSyncController := &multiclusterservice.EndpointsliceDispatchController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.EndpointsliceDispatchControllerName),
|
||||
|
@ -668,6 +674,9 @@ func startHPAReplicasSyncerController(ctx controllerscontext.Context) (enabled b
|
|||
}
|
||||
|
||||
func startMCSController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.MultiClusterService) {
|
||||
return false, nil
|
||||
}
|
||||
mcsController := &multiclusterservice.MCSController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.ControllerName),
|
||||
|
|
|
@ -127,8 +127,8 @@ func (c *EndpointSliceCollectController) cleanWorkWithMCSDelete(work *workv1alph
|
|||
if err := c.List(context.TODO(), workList, &client.ListOptions{
|
||||
Namespace: work.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
util.ServiceNameLabel: util.GetLabelValue(work.Labels, util.ServiceNameLabel),
|
||||
util.ServiceNamespaceLabel: util.GetLabelValue(work.Labels, util.ServiceNamespaceLabel),
|
||||
util.MultiClusterServiceNameLabel: util.GetLabelValue(work.Labels, util.MultiClusterServiceNameLabel),
|
||||
util.MultiClusterServiceNamespaceLabel: util.GetLabelValue(work.Labels, util.MultiClusterServiceNamespaceLabel),
|
||||
}),
|
||||
}); err != nil {
|
||||
klog.Errorf("Failed to list workList reported by work(MultiClusterService)(%s/%s): %v", work.Namespace, work.Name, err)
|
||||
|
@ -347,8 +347,8 @@ func (c *EndpointSliceCollectController) handleEndpointSliceEvent(endpointSliceK
|
|||
if err := c.Client.List(context.TODO(), workList, &client.ListOptions{
|
||||
Namespace: names.GenerateExecutionSpaceName(endpointSliceKey.Cluster),
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
util.ServiceNamespaceLabel: endpointSliceKey.Namespace,
|
||||
util.ServiceNameLabel: util.GetLabelValue(endpointSliceObj.GetLabels(), discoveryv1.LabelServiceName),
|
||||
util.MultiClusterServiceNamespaceLabel: endpointSliceKey.Namespace,
|
||||
util.MultiClusterServiceNameLabel: util.GetLabelValue(endpointSliceObj.GetLabels(), discoveryv1.LabelServiceName),
|
||||
})}); err != nil {
|
||||
klog.Errorf("Failed to list workList reported by endpointSlice(%s/%s), error: %v", endpointSliceKey.Namespace, endpointSliceKey.Name, err)
|
||||
return err
|
||||
|
@ -382,8 +382,8 @@ func (c *EndpointSliceCollectController) collectTargetEndpointSlice(work *workv1
|
|||
return err
|
||||
}
|
||||
|
||||
svcNamespace := util.GetLabelValue(work.Labels, util.ServiceNamespaceLabel)
|
||||
svcName := util.GetLabelValue(work.Labels, util.ServiceNameLabel)
|
||||
svcNamespace := util.GetLabelValue(work.Labels, util.MultiClusterServiceNamespaceLabel)
|
||||
svcName := util.GetLabelValue(work.Labels, util.MultiClusterServiceNameLabel)
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
discoveryv1.LabelServiceName: svcName,
|
||||
})
|
||||
|
@ -433,8 +433,8 @@ func reportEndpointSlice(c client.Client, endpointSlice *unstructured.Unstructur
|
|||
Name: names.GenerateMCSWorkName(endpointSlice.GetKind(), endpointSlice.GetName(), endpointSlice.GetNamespace(), clusterName),
|
||||
Namespace: executionSpace,
|
||||
Labels: map[string]string{
|
||||
util.ServiceNamespaceLabel: endpointSlice.GetNamespace(),
|
||||
util.ServiceNameLabel: endpointSlice.GetLabels()[discoveryv1.LabelServiceName],
|
||||
util.MultiClusterServiceNamespaceLabel: endpointSlice.GetNamespace(),
|
||||
util.MultiClusterServiceNameLabel: endpointSlice.GetLabels()[discoveryv1.LabelServiceName],
|
||||
// indicate the Work should be not propagated since it's collected resource.
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
|
||||
|
|
|
@ -87,8 +87,8 @@ func (c *EndpointsliceDispatchController) Reconcile(ctx context.Context, req con
|
|||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
mcsName := util.GetLabelValue(work.Labels, util.ServiceNameLabel)
|
||||
mcsNS := util.GetLabelValue(work.Labels, util.ServiceNamespaceLabel)
|
||||
mcsName := util.GetLabelValue(work.Labels, util.MultiClusterServiceNameLabel)
|
||||
mcsNS := util.GetLabelValue(work.Labels, util.MultiClusterServiceNamespaceLabel)
|
||||
mcs := &networkingv1alpha1.MultiClusterService{}
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Namespace: mcsNS, Name: mcsName}, mcs); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
@ -150,17 +150,17 @@ func (c *EndpointsliceDispatchController) SetupWithManager(mgr controllerruntime
|
|||
workPredicateFun := predicate.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent) bool {
|
||||
// We only care about the EndpointSlice work from provision clusters
|
||||
return util.GetLabelValue(createEvent.Object.GetLabels(), util.ServiceNameLabel) != "" &&
|
||||
return util.GetLabelValue(createEvent.Object.GetLabels(), util.MultiClusterServiceNameLabel) != "" &&
|
||||
util.GetAnnotationValue(createEvent.Object.GetAnnotations(), util.EndpointSliceProvisionClusterAnnotation) == ""
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
// We only care about the EndpointSlice work from provision clusters
|
||||
return util.GetLabelValue(updateEvent.ObjectNew.GetLabels(), util.ServiceNameLabel) != "" &&
|
||||
return util.GetLabelValue(updateEvent.ObjectNew.GetLabels(), util.MultiClusterServiceNameLabel) != "" &&
|
||||
util.GetAnnotationValue(updateEvent.ObjectNew.GetAnnotations(), util.EndpointSliceProvisionClusterAnnotation) == ""
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
// We only care about the EndpointSlice work from provision clusters
|
||||
return util.GetLabelValue(deleteEvent.Object.GetLabels(), util.ServiceNameLabel) != "" &&
|
||||
return util.GetLabelValue(deleteEvent.Object.GetLabels(), util.MultiClusterServiceNameLabel) != "" &&
|
||||
util.GetAnnotationValue(deleteEvent.Object.GetAnnotations(), util.EndpointSliceProvisionClusterAnnotation) == ""
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
|
@ -186,8 +186,8 @@ func (c *EndpointsliceDispatchController) newMultiClusterServiceFunc() handler.M
|
|||
workList := &workv1alpha1.WorkList{}
|
||||
if err := c.Client.List(context.TODO(), workList, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
util.ServiceNameLabel: mcsName,
|
||||
util.ServiceNamespaceLabel: mcsNamespace,
|
||||
util.MultiClusterServiceNameLabel: mcsName,
|
||||
util.MultiClusterServiceNamespaceLabel: mcsNamespace,
|
||||
}),
|
||||
}); err != nil {
|
||||
klog.Errorf("Failed to list work, error: %v", err)
|
||||
|
@ -211,8 +211,8 @@ func (c *EndpointsliceDispatchController) cleanOrphanDispatchedEndpointSlice(ctx
|
|||
workList := &workv1alpha1.WorkList{}
|
||||
if err := c.Client.List(ctx, workList, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
util.ServiceNameLabel: mcs.Name,
|
||||
util.ServiceNamespaceLabel: mcs.Namespace,
|
||||
util.MultiClusterServiceNameLabel: mcs.Name,
|
||||
util.MultiClusterServiceNamespaceLabel: mcs.Namespace,
|
||||
})}); err != nil {
|
||||
klog.Errorf("Failed to list works, error is: %v", err)
|
||||
return err
|
||||
|
@ -312,8 +312,8 @@ func (c *EndpointsliceDispatchController) syncEndpointSlice(ctx context.Context,
|
|||
},
|
||||
Labels: map[string]string{
|
||||
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
|
||||
util.ServiceNameLabel: mcs.Name,
|
||||
util.ServiceNamespaceLabel: mcs.Namespace,
|
||||
util.MultiClusterServiceNameLabel: mcs.Name,
|
||||
util.MultiClusterServiceNamespaceLabel: mcs.Namespace,
|
||||
},
|
||||
}
|
||||
unstructuredEPS, err := helper.ToUnstructured(endpointSlice)
|
||||
|
|
|
@ -290,8 +290,8 @@ func (c *MCSController) ensureMultiClusterServiceWork(ctx context.Context, mcs *
|
|||
networkingv1alpha1.MultiClusterServicePermanentIDLabel: util.GetLabelValue(mcs.Labels, networkingv1alpha1.MultiClusterServicePermanentIDLabel),
|
||||
util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue,
|
||||
util.PropagationInstruction: util.PropagationInstructionSuppressed,
|
||||
util.ServiceNamespaceLabel: mcs.Namespace,
|
||||
util.ServiceNameLabel: mcs.Name,
|
||||
util.MultiClusterServiceNamespaceLabel: mcs.Namespace,
|
||||
util.MultiClusterServiceNameLabel: mcs.Name,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,9 @@ const (
|
|||
|
||||
// PolicyPreemption indicates if high-priority PropagationPolicy/ClusterPropagationPolicy could preempt resource templates which are matched by low-priority PropagationPolicy/ClusterPropagationPolicy.
|
||||
PolicyPreemption featuregate.Feature = "PropagationPolicyPreemption"
|
||||
|
||||
// MultiClusterService indicates if enable multi-cluster service function.
|
||||
MultiClusterService featuregate.Feature = "MultiClusterService"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -50,6 +53,7 @@ var (
|
|||
PropagateDeps: {Default: true, PreRelease: featuregate.Beta},
|
||||
CustomizedClusterResourceModeling: {Default: true, PreRelease: featuregate.Beta},
|
||||
PolicyPreemption: {Default: false, PreRelease: featuregate.Alpha},
|
||||
MultiClusterService: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -25,6 +25,14 @@ const (
|
|||
// ServiceNameLabel is added to work object, which is report by member cluster, to specify service name associated with EndpointSlice.
|
||||
ServiceNameLabel = "endpointslice.karmada.io/name"
|
||||
|
||||
// MultiClusterServiceNamespaceLabel is added to work object, represents the work is managed by the corresponding MultiClusterService
|
||||
// This label indicates the namepsace
|
||||
MultiClusterServiceNamespaceLabel = "multiclusterservice.karmada.io/namespace"
|
||||
|
||||
// MultiClusterServiceNameLabel is added to work object, represents the work is managed by the corresponding MultiClusterService
|
||||
// This label indicates the name
|
||||
MultiClusterServiceNameLabel = "multiclusterservice.karmada.io/name"
|
||||
|
||||
// EndPointSliceProvisionClusterAnnotation is added to EndpointSlice to specify the cluster which cluster provides the EndpointSlice.
|
||||
EndpointSliceProvisionClusterAnnotation = "endpointslice.karmada.io/provision-cluster"
|
||||
|
||||
|
|
|
@ -569,11 +569,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
|
||||
svcName := fmt.Sprintf("http://%s.%s", serviceName, testNamespace)
|
||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, testNamespace, karmadactlTimeout, "exec", deploymentName, "-C", member2Name, "--", "curl", svcName)
|
||||
_, err := cmd.ExecOrDie()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -618,11 +613,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
|
||||
svcName := fmt.Sprintf("http://%s.%s", serviceName, testNamespace)
|
||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, testNamespace, karmadactlTimeout, "exec", deploymentName, "-C", member2Name, "--", "curl", svcName)
|
||||
_, err := cmd.ExecOrDie()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -678,10 +668,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
mcs.Spec.ServiceProvisionClusters = []string{member2Name}
|
||||
mcs.Spec.ServiceConsumptionClusters = []string{member1Name}
|
||||
framework.UpdateMultiClusterService(karmadaClient, mcs)
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -690,9 +676,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
var mcs *networkingv1alpha1.MultiClusterService
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
member2Client = framework.GetClusterClient(member2Name)
|
||||
gomega.Expect(member2Client).ShouldNot(gomega.BeNil())
|
||||
|
||||
mcs = helper.NewCrossClusterMultiClusterService(testNamespace, mcsName, []string{}, []string{member2Name})
|
||||
policy.Spec.Placement.ClusterAffinity = &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{member1Name},
|
||||
|
@ -728,11 +711,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
|
||||
svcName := fmt.Sprintf("http://%s.%s", serviceName, testNamespace)
|
||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, testNamespace, karmadactlTimeout, "exec", deploymentName, "-C", member2Name, "--", "curl", svcName)
|
||||
_, err := cmd.ExecOrDie()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -776,13 +754,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
|
||||
for _, clusterName := range []string{member1Name, member2Name} {
|
||||
svcName := fmt.Sprintf("http://%s.%s", serviceName, testNamespace)
|
||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, testNamespace, karmadactlTimeout, "exec", deploymentName, "-C", clusterName, "--", "curl", svcName)
|
||||
_, err := cmd.ExecOrDie()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -826,13 +797,6 @@ var _ = ginkgo.Describe("CrossCluster MultiClusterService testing", func() {
|
|||
gomega.Eventually(func() bool {
|
||||
return checkEndpointSliceWithMultiClusterService(testNamespace, mcsName, mcs.Spec.ServiceProvisionClusters, mcs.Spec.ServiceConsumptionClusters)
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
|
||||
for _, clusterName := range []string{member1Name, member2Name} {
|
||||
svcName := fmt.Sprintf("http://%s.%s", serviceName, testNamespace)
|
||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, testNamespace, karmadactlTimeout, "exec", deploymentName, "-C", clusterName, "--", "curl", svcName)
|
||||
_, err := cmd.ExecOrDie()
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -879,11 +843,10 @@ func checkEndpointSliceSynced(provisionEPSList, consumptionEPSList *discoveryv1.
|
|||
|
||||
synced := false
|
||||
for _, item := range provisionEPSList.Items {
|
||||
if item.GetLabels()[discoveryv1.LabelManagedBy] == util.EndpointSliceControllerLabelValue {
|
||||
if item.GetLabels()[discoveryv1.LabelManagedBy] == util.EndpointSliceDispatchControllerLabelValue {
|
||||
continue
|
||||
}
|
||||
for _, consumptionItem := range consumptionEPSList.Items {
|
||||
klog.Infof("jw:%v,%v/%v,%v", consumptionItem.Name, len(consumptionItem.Endpoints), item.Name, len(item.Endpoints))
|
||||
if consumptionItem.Name == provisonCluster+"-"+item.Name && len(consumptionItem.Endpoints) == len(item.Endpoints) {
|
||||
synced = true
|
||||
break
|
||||
|
|
Loading…
Reference in New Issue