add e2e coverage detail
Signed-off-by: Vacant <vacant@MasterNode.local> Signed-off-by: Vacant <vacant@192.168.0.107>
This commit is contained in:
parent
303f2cd24b
commit
5110ce08bd
|
@ -436,6 +436,5 @@ var _ = ginkgo.Describe("[ClusterAffinities] propagation testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
### cluster affinities e2e test coverage analysis
|
||||
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|------------------------------------------------------------------------------------------------|---------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [PropagationPolicy] Test ClusterAffinities when schedule with multi clusterAffinity | propagate deployment and then update the cluster label | [Cluster Affinities](https://karmada.io/docs/next/userguide/scheduling/resource-propagating/#multiple-cluster-affinity-groups) |
|
||||
| [PropagationPolicy] Test when schedule change from clusterAffinity to clusterAffinities | propagate deployment and then update the cluster label | |
|
||||
| [PropagationPolicy] Test when schedule change from clusterAffinities to clusterAffinity | propagate deployment and then update the cluster label | |
|
||||
| [ClusterPropagationPolicy] Test ClusterAffinities when schedule with multi clusterAffinity | propagate clusterRole and then update the cluster label | |
|
||||
| [ClusterPropagationPolicy] Test when schedule change from clusterAffinity to clusterAffinities | propagate clusterRole and then update the cluster label | |
|
||||
| [ClusterPropagationPolicy] Test when schedule change from clusterAffinities to clusterAffinity | propagate clusterRole and then update the cluster label | |
|
||||
| Test ClusterAffinities reschedule when member cluster become unReachable | deployment failover testing | |
|
|
@ -0,0 +1,24 @@
|
|||
### resource interpreter e2e test coverage analysis
|
||||
|
||||
#### Resource interpreter webhook testing
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|--------------------------------------------------------------------------------------------------------------------|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Test InterpreterOperation InterpretReplica | InterpretReplica testing | [Resource Interpreter Webhook Configuration](https://karmada.io/docs/next/reference/karmada-api/config-resources/resource-interpreter-webhook-configuration-v1alpha1/) |
|
||||
| Test InterpreterOperation Retain when construct two values that need to be changed, and only one value is retained | Retain testing | |
|
||||
| Test InterpreterOperation ReviseReplica | ReviseReplica testing | |
|
||||
| Test InterpreterOperation AggregateStatus whether the workload status can be correctly collected | AggregateStatus testing | |
|
||||
| Test InterpreterOperation InterpretStatus | InterpretStatus testing | |
|
||||
| Test InterpreterOperation InterpretHealth whether the health status can be correctly collected | InterpretHealth testing | |
|
||||
| Test InterpreterOperation InterpretDependency whether the workload's dependency can be interpreted | InterpretDependency testing | |
|
||||
| Test InterpreterOperation InterpretDependency whether will update/delete the dependency cr correctly | Dependency cr propagation testing | |
|
||||
|
||||
#### Resource interpreter customization testing
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|-----------------------------------------------------------------------------------------------------------------------------------|----------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Test InterpreterOperation InterpretReplica with Lua scripts | InterpretReplica testing | [Customizing Resource Interpreter](https://karmada.io/docs/next/userguide/globalview/customizing-resource-interpreter/) |
|
||||
| Test InterpreterOperation ReviseReplica with Lua scripts | ReviseReplica testing | |
|
||||
| Test InterpreterOperation Retain when construct by Lua scripts two values that need to be changed, and only one value is retained | Retain testing | |
|
||||
| Test InterpreterOperation AggregateStatus whether the workload status can be correctly collected | AggregateStatus testing | |
|
||||
| Test InterpreterOperation InterpretStatus with Lua scripts | InterpretStatus testing | |
|
||||
| Test InterpreterOperation InterpretHealth whether the health status can be correctly collected | InterpretHealth testing | |
|
||||
| Test InterpreterOperation InterpretDependency whether will create the dependency cr correctly | DependencyInterpretation testing | |
|
|
@ -0,0 +1,23 @@
|
|||
### scheduling e2e test coverage analysis
|
||||
|
||||
#### Propagation with label and group constraints
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|---------------------------------------------------------------|--------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Test schedule the Deployment with label and group constraints | Deployment propagation with label and group constraints testing | |
|
||||
| Test schedule the CRD with label and group constraints | Crd with specified label and group constraints propagation testing | |
|
||||
| Test schedule the Job with label and group constraints | Job propagation with label and group constraints testing | |
|
||||
|
||||
#### Replica Scheduling Strategy testing
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|----------|
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Duplicated | replicas duplicated testing | |
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Duplicated, trigger rescheduling when replicas have changed | replicas duplicated testing when rescheduling | |
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Divided, ReplicaDivisionPreference value is Weighted, WeightPreference is nil | replicas divided and weighted testing | |
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Divided, ReplicaDivisionPreference value is Weighted, WeightPreference is nil, trigger rescheduling when replicas have changed | replicas divided and weighted testing when rescheduling | |
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Divided, ReplicaDivisionPreference value is Weighted, WeightPreference isn't nil | replicas divided and weighted testing | |
|
||||
| Test ReplicaScheduling when ReplicaSchedulingType value is Divided, ReplicaDivisionPreference value is Weighted, WeightPreference isn't nil, trigger rescheduling when replicas have changed | replicas divided and weighted testing when rescheduling | |
|
||||
|
||||
#### Job Replica Scheduling Strategy testing
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|---------------------------------------------------------------|-------------------------------------------|----------|
|
||||
| Test Job's parallelism are divided equally on member clusters | Job replicas divided and weighted testing | |
|
|
@ -0,0 +1,32 @@
|
|||
### search e2e test coverage analysis
|
||||
|
||||
#### Test search
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|-------------------------------------------------------------------|----------------------------------------------------------|----------|
|
||||
| Test search Service when there is no ResourceRegistry | [Service] should be not searchable | |
|
||||
| Test search a searchable deployment | [member1 deployments] should be searchable | |
|
||||
| Test search a not searchable deployment | [member2 deployments] should be not searchable | |
|
||||
| Test search a searchable namespace | [member1 deployments namespace] should be searchable | |
|
||||
| Test search a not searchable namespace | [member2 deployments namespace] should be not searchable | |
|
||||
| Test search node member1 | [member1 nodes] should be searchable | |
|
||||
| Test search node member2 | [member2 nodes] should be searchable | |
|
||||
| Test search member1's pods | [member1 pods] should be searchable | |
|
||||
| Test search member2's pods | [member2 pods] should be searchable | |
|
||||
| Test search clusterRole | [clusterrole] should be searchable | |
|
||||
| Test search a not searchable clusterRoleBinding | [clusterrolebinding] should not be searchable | |
|
||||
| Test search a clusterRoleBinding | [clusterrolebinding] should be searchable | |
|
||||
| Test search a daemonSet | [daemonset] should be searchable | |
|
||||
| Test search a not searchable daemonSet | [daemonset] should not be searchable | |
|
||||
| Test search pods/nodes when create/update/delete resourceRegistry | create, update, delete resourceRegistry | |
|
||||
| Test get node | could get node | |
|
||||
| Test list nodes | could list nodes | |
|
||||
| Test get chunk list nodes | could chunk list nodes | |
|
||||
| Test list and watch nodes | could list & watch nodes | |
|
||||
| Test patch nodes | could path nodes | |
|
||||
| Test update node | could update node | |
|
||||
|
||||
#### Test search after reconcile ResourceRegistry when clusters joined/updated
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|---------------------------------------------------------------|---------------------------------------------------------------------------|----------|
|
||||
| Test search deployment after join the cluster | [member clusters joined] could reconcile ResourceRegistry | |
|
||||
| Test search deployment after manage and unmanage the clusters | [member clusters updated, deleted label] could reconcile ResourceRegistry | |
|
|
@ -0,0 +1,5 @@
|
|||
### taint toleration e2e test coverage analysis
|
||||
|
||||
| Test Case | E2E Describe Text | Comments |
|
||||
|-----------------------------------------------------|---------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Test Deployment propagate with taint and toleration | deployment with cluster tolerations testing | [Schedule Based on Taint Toleration](https://karmada.io/docs/next/userguide/scheduling/resource-propagating/#schedule-based-on-taints-and-tolerations) |
|
|
@ -241,10 +241,7 @@ var _ = ginkgo.Describe("Resource interpreter webhook testing", func() {
|
|||
// not collect status.conditions in webhook
|
||||
klog.Infof("work(%s/%s) length of conditions: %v, want: %v", workNamespace, workName, len(observedStatus.Conditions), 0)
|
||||
|
||||
if observedStatus.ReadyReplicas == *workload.Spec.Replicas && len(observedStatus.Conditions) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return observedStatus.ReadyReplicas == *workload.Spec.Replicas && len(observedStatus.Conditions) == 0, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||
}
|
||||
})
|
||||
|
@ -459,7 +456,7 @@ end
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.It("dependency cr propagation testing", func() {
|
||||
ginkgo.It("Dependency cr propagation testing", func() {
|
||||
framework.GetCRD(dynamicClient, crd.Name)
|
||||
framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(),
|
||||
fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1"), crd.Spec.Names.Kind)
|
||||
|
@ -785,7 +782,7 @@ var _ = framework.SerialDescribe("Resource interpreter customization testing", f
|
|||
end
|
||||
replicas = 0
|
||||
for i = 1, #statusItems do
|
||||
if statusItems[i].status ~= nil and statusItems[i].status.replicas ~= nil then
|
||||
if statusItems[i].status ~= nil and statusItems[i].status.replicas ~= nil then
|
||||
replicas = replicas + statusItems[i].status.replicas + 1
|
||||
end
|
||||
end
|
||||
|
@ -1045,7 +1042,6 @@ end `,
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -99,7 +99,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.It("deployment propagation with label and group constraints testing", func() {
|
||||
ginkgo.It("Deployment propagation with label and group constraints testing", func() {
|
||||
ginkgo.By("collect the target clusters in resource binding", func() {
|
||||
targetClusterNames = framework.ExtractTargetClustersFrom(controlPlaneClient, deployment)
|
||||
gomega.Expect(len(targetClusterNames) == minGroups).ShouldNot(gomega.BeFalse())
|
||||
|
@ -191,7 +191,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.It("crd with specified label and group constraints propagation testing", func() {
|
||||
ginkgo.It("CRD with specified label and group constraints propagation testing", func() {
|
||||
ginkgo.By("collect the target clusters in cluster resource binding", func() {
|
||||
bindingName := names.GenerateBindingName(crd.Kind, crd.Name)
|
||||
fmt.Printf("crd kind is %s, name is %s\n", crd.Kind, crd.Name)
|
||||
|
@ -237,7 +237,7 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
}
|
||||
targetCluster, _ := util.GetCluster(controlPlaneClient, targetClusterName)
|
||||
groupMatchedClusters = append(groupMatchedClusters, targetCluster)
|
||||
fmt.Printf("Crd(%s) is present on cluster(%s).\n", crd.Name, targetClusterName)
|
||||
fmt.Printf("CRD(%s) is present on cluster(%s).\n", crd.Name, targetClusterName)
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
@ -413,7 +413,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
klog.Infof("check if deployment's replicas are duplicate on member clusters")
|
||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
||||
func(deploy *appsv1.Deployment) bool {
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d, expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d, expected: %d.",
|
||||
deploy.Namespace, deploy.Name, *deploy.Spec.Replicas, *deployment.Spec.Replicas)
|
||||
return *deploy.Spec.Replicas == *deployment.Spec.Replicas
|
||||
})
|
||||
|
@ -433,7 +433,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
klog.Infof("check if deployment's replicas have been updated on member clusters")
|
||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
||||
func(deploy *appsv1.Deployment) bool {
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d, expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d, expected: %d.",
|
||||
deploy.Namespace, deploy.Name, *deploy.Spec.Replicas, updateDeploymentReplicas)
|
||||
return *deploy.Spec.Replicas == updateDeploymentReplicas
|
||||
})
|
||||
|
@ -459,7 +459,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
klog.Infof("check if deployment's replicas are divided equally on member clusters")
|
||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
||||
func(deploy *appsv1.Deployment) bool {
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d, expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d, expected: %d.",
|
||||
deploy.Namespace, deploy.Name, *deploy.Spec.Replicas, expectedReplicas)
|
||||
return *deploy.Spec.Replicas == expectedReplicas
|
||||
})
|
||||
|
@ -490,7 +490,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
klog.Infof("check if deployment's replicas are divided equally on member clusters")
|
||||
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
|
||||
func(deploy *appsv1.Deployment) bool {
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d, expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d, expected: %d.",
|
||||
deploy.Namespace, deploy.Name, *deploy.Spec.Replicas, expectedReplicas)
|
||||
return *deploy.Spec.Replicas == expectedReplicas
|
||||
})
|
||||
|
@ -539,7 +539,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
memberDeployment, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d on cluster(%s), expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d on cluster(%s), expected: %d.",
|
||||
deploymentNamespace, deploymentName, *memberDeployment.Spec.Replicas, cluster.Name, expectedReplicas)
|
||||
return *memberDeployment.Spec.Replicas, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(expectedReplicas))
|
||||
|
@ -602,7 +602,7 @@ var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing",
|
|||
memberDeployment, err := clusterClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
klog.Infof("Deployment(%s/%s)'s replcas is %d on cluster(%s), expected: %d.",
|
||||
klog.Infof("Deployment(%s/%s)'s replicas is %d on cluster(%s), expected: %d.",
|
||||
deploymentNamespace, deploymentName, *memberDeployment.Spec.Replicas, cluster.Name, expectedReplicas)
|
||||
return *memberDeployment.Spec.Replicas, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(expectedReplicas))
|
||||
|
@ -652,7 +652,7 @@ var _ = ginkgo.Describe("[JobReplicaScheduling] JobReplicaSchedulingStrategy tes
|
|||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("job replicas divided and weighted testing", func() {
|
||||
ginkgo.It("Job replicas divided and weighted testing", func() {
|
||||
sumWeight := 0
|
||||
staticWeightLists := make([]policyv1alpha1.StaticClusterWeight, 0)
|
||||
for index, clusterName := range framework.ClusterNames() {
|
||||
|
|
Loading…
Reference in New Issue