Merge pull request #4095 from JadeFlute0127/dev2
fix spell error in test and docs
This commit is contained in:
commit
3079ed201a
|
@ -177,7 +177,7 @@ spec:
|
||||||
name: my-config
|
name: my-config
|
||||||
```
|
```
|
||||||
|
|
||||||
Creating a propagation policy to propagate the deployment to specific clusters. To enable auto-propagating dependencies, we need to set `propagateDeps` as `ture`.
|
Creating a propagation policy to propagate the deployment to specific clusters. To enable auto-propagating dependencies, we need to set `propagateDeps` as `true`.
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: policy.karmada.io/v1alpha1
|
apiVersion: policy.karmada.io/v1alpha1
|
||||||
kind: PropagationPolicy
|
kind: PropagationPolicy
|
||||||
|
|
|
@ -348,7 +348,7 @@ For example:
|
||||||
| Run once an hour at the beginning of the hour | 0 * * * * |
|
| Run once an hour at the beginning of the hour | 0 * * * * |
|
||||||
|
|
||||||
#### karmada-webhook
|
#### karmada-webhook
|
||||||
In order to make sure the applied configuration is corrent, some validations are necessary for `CronFederatedHPA`, these logic should be implemented in `karmada-webhook`:
|
In order to make sure the applied configuration is correct, some validations are necessary for `CronFederatedHPA`, these logic should be implemented in `karmada-webhook`:
|
||||||
* If `spec.scaleTargetRef.apiVersion` is `autoscaling.karmada.io/v1alpha1`, `spec.scaleTargetRef.kind` can only be `FederatedHPA`, `spec.rules[*].targetMinReplicas` and `spec.rules[*].targetMaxReplicas` cannot be empty at the same time.
|
* If `spec.scaleTargetRef.apiVersion` is `autoscaling.karmada.io/v1alpha1`, `spec.scaleTargetRef.kind` can only be `FederatedHPA`, `spec.rules[*].targetMinReplicas` and `spec.rules[*].targetMaxReplicas` cannot be empty at the same time.
|
||||||
* If `spec.scaleTargetRef.apiVersion` is not `autoscaling.karmada.io/v1alpha1`, `spec.rules[*].targetReplicas` cannot be empty.
|
* If `spec.scaleTargetRef.apiVersion` is not `autoscaling.karmada.io/v1alpha1`, `spec.rules[*].targetReplicas` cannot be empty.
|
||||||
* `spec.rules[*].schedule` should be a valid cron format.
|
* `spec.rules[*].schedule` should be a valid cron format.
|
||||||
|
|
|
@ -83,7 +83,7 @@ The diagram is explained below:
|
||||||
|
|
||||||
* Here give a service named `foo` in `member1`. We should use the full domain name: `foo.default.svc.cluster.local` to access this service. But we cannot use the same domain name in `member2`.
|
* Here give a service named `foo` in `member1`. We should use the full domain name: `foo.default.svc.cluster.local` to access this service. But we cannot use the same domain name in `member2`.
|
||||||
|
|
||||||
* `Karmada` exports the service through `ServiceExport` and imports it into `member2` through `ServiceImport`. At this time, the shadow service `derived-foo` will appear in `member2`. User in `memeber2` can access to the `foo` service in `memeber1` by using `derived-foo.default.svc.cluster.local`.
|
* `Karmada` exports the service through `ServiceExport` and imports it into `member2` through `ServiceImport`. At this time, the shadow service `derived-foo` will appear in `member2`. User in `member2` can access to the `foo` service in `member1` by using `derived-foo.default.svc.cluster.local`.
|
||||||
|
|
||||||
* After the `coreDNS` installed with `multicluster` found the `ServiceImport` had been created, it will analyze `name`, `namespace`, and `ips` fields of the `ServiceImport` and generate the rr records. In this example, the `ips` in `ServiceImport` can be the `clusterIP` of `derived-foo`.
|
* After the `coreDNS` installed with `multicluster` found the `ServiceImport` had been created, it will analyze `name`, `namespace`, and `ips` fields of the `ServiceImport` and generate the rr records. In this example, the `ips` in `ServiceImport` can be the `clusterIP` of `derived-foo`.
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ Users only need to add `conflict resolution` annotations in the `ResourceTemplat
|
||||||
|
|
||||||
#### Story 4
|
#### Story 4
|
||||||
|
|
||||||
Similarly, if multiple `Deployment` is defined in one `PropagationPolicy` , and users hope `Karmada` ignoring takeover the conflict `Deployment` by default, but forcing takeover individual specificed conflict `Deployment` :
|
Similarly, if multiple `Deployment` is defined in one `PropagationPolicy` , and users hope `Karmada` ignoring takeover the conflict `Deployment` by default, but forcing takeover individual specified conflict `Deployment` :
|
||||||
|
|
||||||
A feasible practice is to declare `conflictResolution: Abort` in the `PropagationPolicy` (or leave it blank), and annotate `work.karmada.io/conflict-resolution: overwrite` in the `ResourceTemplate`.
|
A feasible practice is to declare `conflictResolution: Abort` in the `PropagationPolicy` (or leave it blank), and annotate `work.karmada.io/conflict-resolution: overwrite` in the `ResourceTemplate`.
|
||||||
|
|
||||||
|
@ -309,6 +309,6 @@ No such api modify even makes code more clean, but two reasons are under my cons
|
||||||
Adding this field to CRDs including `ResourceBinding` can more clearly demonstrate this ability to users than adding annotations.
|
Adding this field to CRDs including `ResourceBinding` can more clearly demonstrate this ability to users than adding annotations.
|
||||||
|
|
||||||
2)Adding annotations is just a **compatible** way for individual exceptions, even if we remove it, it's still justifiable. Assuming it doesn't exist,
|
2)Adding annotations is just a **compatible** way for individual exceptions, even if we remove it, it's still justifiable. Assuming it doesn't exist,
|
||||||
we still need to modify the api of `ResourceBinding`. I mean, the annotation is just a addons, our desgin shouldn't overdependence on it.
|
we still need to modify the api of `ResourceBinding`. I mean, the annotation is just a addons, our design shouldn't overdependence on it.
|
||||||
|
|
||||||
3)More convenient for code implementation
|
3)More convenient for code implementation
|
||||||
|
|
|
@ -29,7 +29,7 @@ This proposal aims to provide a solution for users to teach Karmada to learn the
|
||||||
|
|
||||||
## Motivation
|
## Motivation
|
||||||
|
|
||||||
Nowadays, lots of people or projects extend Kubernetes by `Custom Resource Defination`. In order to propagate the
|
Nowadays, lots of people or projects extend Kubernetes by `Custom Resource Definition`. In order to propagate the
|
||||||
custom resources, Karmada has to learn the structure of the custom resource.
|
custom resources, Karmada has to learn the structure of the custom resource.
|
||||||
|
|
||||||
### Goals
|
### Goals
|
||||||
|
|
|
@ -31,7 +31,7 @@ The Cluster Accurate Scheduler Estimator aims to fix these problems.
|
||||||
|
|
||||||
### Goals
|
### Goals
|
||||||
|
|
||||||
- Make the available replica estimation more acurate for scheduler decision reference.
|
- Make the available replica estimation more accurate for scheduler decision reference.
|
||||||
- Allow user to specify node claim such as `NodeAffinity`, `NodeSelector` and `Tolerations` for multi-cluster scheduling.
|
- Allow user to specify node claim such as `NodeAffinity`, `NodeSelector` and `Tolerations` for multi-cluster scheduling.
|
||||||
|
|
||||||
### Non-Goals
|
### Non-Goals
|
||||||
|
@ -137,7 +137,7 @@ type NodeClaim struct {
|
||||||
|
|
||||||
First, the existing plugins in Karmada Scheduler such as ClusterAffinity, APIInstalled and TaintToleration will select the suitable clusters.
|
First, the existing plugins in Karmada Scheduler such as ClusterAffinity, APIInstalled and TaintToleration will select the suitable clusters.
|
||||||
|
|
||||||
Based on this prefilter result, when assigning replicas, the Karmada Scheduler could try to calculate cluster max available replicas by starting gRPC requests concurrently to the Cluster Accurate Scheduler Estimator. At last, the Cluster Accurate Scheduler Estimator will soon return how many available replicas that the cluster could produce. Then the Karmada Scheduler assgin replicas into different clusters in terms of the estimation result.
|
Based on this prefilter result, when assigning replicas, the Karmada Scheduler could try to calculate cluster max available replicas by starting gRPC requests concurrently to the Cluster Accurate Scheduler Estimator. At last, the Cluster Accurate Scheduler Estimator will soon return how many available replicas that the cluster could produce. Then the Karmada Scheduler assign replicas into different clusters in terms of the estimation result.
|
||||||
|
|
||||||
We could implement this by modifying function calClusterAvailableReplicas to an interface. The previous estimation method, based on `ResourceSummary` in `Cluster.Status`, is able to be a default normal estimation approach. Now we could just add a switch to determine whether Cluster Accurate Scheduler Estimator is applied, while the estimator via `ResourceSummary` could be a default one that does not support disabled. In the future, after the scheduler profile is added, a user could customize the config by using a profile.
|
We could implement this by modifying function calClusterAvailableReplicas to an interface. The previous estimation method, based on `ResourceSummary` in `Cluster.Status`, is able to be a default normal estimation approach. Now we could just add a switch to determine whether Cluster Accurate Scheduler Estimator is applied, while the estimator via `ResourceSummary` could be a default one that does not support disabled. In the future, after the scheduler profile is added, a user could customize the config by using a profile.
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ Now we add a new cluster member4. We may want to reschedule some replicas toward
|
||||||
|
|
||||||
### Architecture
|
### Architecture
|
||||||
|
|
||||||
It is noticed that this design only focus on User Story 1, which means that only unscheduable pods are included for descheduling, usually happening when cluster resources are insufficient. Other stragety is not considered in this proposal because it needs more discussion.
|
It is noticed that this design only focus on User Story 1, which means that only unscheduable pods are included for descheduling, usually happening when cluster resources are insufficient. Other strategy is not considered in this proposal because it needs more discussion.
|
||||||
|
|
||||||
Here is the descheduler workflow.
|
Here is the descheduler workflow.
|
||||||
|
|
||||||
|
|
|
@ -298,7 +298,7 @@ continue to evaluate from this affinity term.
|
||||||
|
|
||||||
#### karmada-controller-manager
|
#### karmada-controller-manager
|
||||||
|
|
||||||
When creating or updating `ResourceBidning`/`ClusterResourceBinding`, the added
|
When creating or updating `ResourceBinding`/`ClusterResourceBinding`, the added
|
||||||
`OrderedClusterAffinities` in `PropagationPolicy`/`ClusterPropagationPolicy` should
|
`OrderedClusterAffinities` in `PropagationPolicy`/`ClusterPropagationPolicy` should
|
||||||
be synced.
|
be synced.
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: secretStoreNamespace,
|
ClusterNamespace: secretStoreNamespace,
|
||||||
|
@ -91,7 +91,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Unjoining cluster: %s", clusterName), func() {
|
||||||
opts := unjoin.CommandUnjoinOption{
|
opts := unjoin.CommandUnjoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: secretStoreNamespace,
|
ClusterNamespace: secretStoreNamespace,
|
||||||
|
|
|
@ -114,7 +114,7 @@ var _ = ginkgo.Describe("FederatedResourceQuota auto-provision testing", func()
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("federatedResourceQuota should be propagated to new joined clusters", func() {
|
ginkgo.It("federatedResourceQuota should be propagated to new joined clusters", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
|
|
@ -365,7 +365,7 @@ var _ = framework.SerialDescribe("Karmadactl join/unjoin testing", ginkgo.Labels
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
|
|
@ -89,7 +89,7 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
|
|
@ -89,7 +89,7 @@ var _ = ginkgo.Describe("[cluster unjoined] reschedule testing", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("deployment reschedule testing", func() {
|
ginkgo.It("deployment reschedule testing", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", newClusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
@ -224,7 +224,7 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() {
|
||||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName))
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", newClusterName))
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
@ -283,7 +283,7 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() {
|
||||||
return testhelper.IsExclude(newClusterName, targetClusterNames)
|
return testhelper.IsExclude(newClusterName, targetClusterNames)
|
||||||
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
}, pollTimeout, pollInterval).Should(gomega.BeTrue())
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName))
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", newClusterName))
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
|
|
@ -151,7 +151,7 @@ var _ = ginkgo.Describe("[karmada-search] karmada search testing", ginkgo.Ordere
|
||||||
searchObject(fmt.Sprintf(pathNSDeploymentsFmt, testNamespace), m1DmName, true)
|
searchObject(fmt.Sprintf(pathNSDeploymentsFmt, testNamespace), m1DmName, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("[memeber2 deployments namespace] should be not searchable", func() {
|
ginkgo.It("[member2 deployments namespace] should be not searchable", func() {
|
||||||
searchObject(fmt.Sprintf(pathNSDeploymentsFmt, testNamespace), m2DmName, false)
|
searchObject(fmt.Sprintf(pathNSDeploymentsFmt, testNamespace), m2DmName, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -789,7 +789,7 @@ var _ = ginkgo.Describe("[karmada-search] karmada search testing", ginkgo.Ordere
|
||||||
// search cache should not have the deployment
|
// search cache should not have the deployment
|
||||||
searchObject(pathAllDeployments, existsDeploymentName, false)
|
searchObject(pathAllDeployments, existsDeploymentName, false)
|
||||||
// join the cluster
|
// join the cluster
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
opts := join.CommandJoinOption{
|
opts := join.CommandJoinOption{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
ClusterNamespace: "karmada-cluster",
|
ClusterNamespace: "karmada-cluster",
|
||||||
|
|
Loading…
Reference in New Issue