Merge pull request #2975 from Poor12/fix-region

[bugfix]when replicaDivisionPreference is Weighted and WeightPreference is nil
This commit is contained in:
karmada-bot 2022-12-27 18:16:52 +08:00 committed by GitHub
commit 6af94e239d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 168 additions and 11 deletions

View File

@ -235,20 +235,11 @@ func (info *GroupClustersInfo) generateProviderInfo(spreadConstraints []policyv1
}
func isTopologyIgnored(placement *policyv1alpha1.Placement) bool {
strategy := placement.ReplicaScheduling
spreadConstraints := placement.SpreadConstraints
if len(spreadConstraints) == 0 || (len(spreadConstraints) == 1 && spreadConstraints[0].SpreadByField == policyv1alpha1.SpreadByFieldCluster) {
return true
}
// If the replica division preference is 'static weighted', ignore the declaration specified by spread constraints.
if strategy != nil && strategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDivided &&
strategy.ReplicaDivisionPreference == policyv1alpha1.ReplicaDivisionPreferenceWeighted &&
(strategy.WeightPreference == nil ||
len(strategy.WeightPreference.StaticWeightList) != 0 && strategy.WeightPreference.DynamicWeight == "") {
return true
}
return false
return shouldIgnoreSpreadConstraint(placement)
}

View File

@ -49,7 +49,8 @@ func shouldIgnoreSpreadConstraint(placement *policyv1alpha1.Placement) bool {
// If the replica division preference is 'static weighted', ignore the declaration specified by spread constraints.
if strategy != nil && strategy.ReplicaSchedulingType == policyv1alpha1.ReplicaSchedulingTypeDivided &&
strategy.ReplicaDivisionPreference == policyv1alpha1.ReplicaDivisionPreferenceWeighted &&
(strategy.WeightPreference != nil && len(strategy.WeightPreference.StaticWeightList) != 0 && strategy.WeightPreference.DynamicWeight == "") {
(strategy.WeightPreference == nil ||
len(strategy.WeightPreference.StaticWeightList) != 0 && strategy.WeightPreference.DynamicWeight == "") {
return true
}

View File

@ -314,3 +314,25 @@ func LoadRESTClientConfig(kubeconfig string, context string) (*rest.Config, erro
loader,
).ClientConfig()
}
// SetClusterRegion sets .Spec.Region field for Cluster object.
func SetClusterRegion(c client.Client, clusterName string, regionName string) error {
return wait.PollImmediate(2*time.Second, 10*time.Second, func() (done bool, err error) {
clusterObj := &clusterv1alpha1.Cluster{}
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
if apierrors.IsConflict(err) {
return false, nil
}
return false, err
}
clusterObj.Spec.Region = regionName
if err := c.Update(context.TODO(), clusterObj); err != nil {
if apierrors.IsConflict(err) {
return false, nil
}
return false, err
}
return true, nil
})
}

View File

@ -0,0 +1,143 @@
package e2e
import (
"context"
"strconv"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/controller-runtime/pkg/client"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
testhelper "github.com/karmada-io/karmada/test/helper"
)
var _ = framework.SerialDescribe("spread-by-region testing", func() {
ginkgo.Context("Deployment propagation testing", func() {
var policyNamespace, policyName string
var deploymentNamespace, deploymentName string
var deployment *appsv1.Deployment
var regionGroups, clusterGroups, updatedRegionGroups, numOfRegionClusters int
var policy *policyv1alpha1.PropagationPolicy
var regionClusters []string
ginkgo.BeforeEach(func() {
policyNamespace = testNamespace
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
deploymentNamespace = testNamespace
deploymentName = policyName
deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName)
clusterGroups = 1
regionGroups = 2
updatedRegionGroups = 1
numOfRegionClusters = 2
policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
}, policyv1alpha1.Placement{
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated,
},
SpreadConstraints: []policyv1alpha1.SpreadConstraint{
{
SpreadByField: policyv1alpha1.SpreadByFieldCluster,
MaxGroups: clusterGroups,
MinGroups: clusterGroups,
},
{
SpreadByField: policyv1alpha1.SpreadByFieldRegion,
MaxGroups: regionGroups,
MinGroups: regionGroups,
},
},
})
})
ginkgo.BeforeEach(func() {
clusters := framework.ClusterNames()
temp := numOfRegionClusters
for _, clusterName := range clusters {
if temp > 0 {
err := framework.SetClusterRegion(controlPlaneClient, clusterName, strconv.Itoa(temp))
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
regionClusters = append(regionClusters, clusterName)
temp--
}
}
ginkgo.DeferCleanup(func() {
for _, clusterName := range regionClusters {
err := framework.SetClusterRegion(controlPlaneClient, clusterName, "")
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
}
})
})
ginkgo.BeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
ginkgo.DeferCleanup(func() {
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})
ginkgo.It("multiple region deployment testing", func() {
ginkgo.By("check whether deployment is scheduled to multiple regions", func() {
framework.WaitDeploymentPresentOnClustersFitWith(regionClusters, deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return true
})
})
ginkgo.By("update propagation policy to propagate to one region", func() {
updateSpreadConstraints := []policyv1alpha1.SpreadConstraint{
{
SpreadByField: policyv1alpha1.SpreadByFieldCluster,
MaxGroups: clusterGroups,
MinGroups: clusterGroups,
},
{
SpreadByField: policyv1alpha1.SpreadByFieldRegion,
MaxGroups: updatedRegionGroups,
MinGroups: updatedRegionGroups,
},
}
patch := []map[string]interface{}{
{
"op": "replace",
"path": "/spec/placement/spreadConstraints",
"value": updateSpreadConstraints,
},
}
framework.PatchPropagationPolicy(karmadaClient, policyNamespace, policyName, patch, types.JSONPatchType)
bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name)
binding := &workv1alpha2.ResourceBinding{}
gomega.Eventually(func(g gomega.Gomega) (bool, error) {
err := controlPlaneClient.Get(context.TODO(), client.ObjectKey{Namespace: deployment.Namespace, Name: bindingName}, binding)
g.Expect(err).NotTo(gomega.HaveOccurred())
targetClusterNames := make([]string, 0, len(binding.Spec.Clusters))
for _, cluster := range binding.Spec.Clusters {
targetClusterNames = append(targetClusterNames, cluster.Name)
}
if len(targetClusterNames) != updatedRegionGroups {
return false, nil
}
return true, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
})
})
})
})