From b4ad8382274a4824c6868aff2e2f769484ce4bda Mon Sep 17 00:00:00 2001 From: whitewindmills Date: Thu, 29 Aug 2024 15:58:05 +0800 Subject: [PATCH] Fix spelling errors Signed-off-by: whitewindmills --- charts/karmada/README.md | 2 +- cmd/scheduler-estimator/app/options/options.go | 4 ++-- .../proposals/cleanup-propagated-resources/README.md | 2 +- .../proposals/scheduling/policy-preemption/README.md | 4 ++-- .../workload-rebalancer/workload-rebalancer.md | 4 ++-- docs/proposals/service-discovery/README.md | 8 ++++---- .../hpa_scale_target_marker_controller.go | 2 +- pkg/karmadactl/edit/edit.go | 4 ++-- pkg/karmadactl/options/global.go | 2 +- .../customized/declarative/luavm/lua_convert.go | 4 ++-- .../default/native/retain_test.go | 12 ++++++------ test/e2e/framework/resourcebinding.go | 12 ++++++------ test/e2e/framework/workloadrebalancer.go | 2 +- 13 files changed, 31 insertions(+), 31 deletions(-) diff --git a/charts/karmada/README.md b/charts/karmada/README.md index c8944c02c..8907634e8 100644 --- a/charts/karmada/README.md +++ b/charts/karmada/README.md @@ -259,7 +259,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada | `agent.affinity` | Affinity of the agent | `{}` | | `agent.tolerations` | Tolerations of the agent | `[]` | | `agent.strategy` | Strategy of the agent | `{"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "0", "maxSurge": "50%"} }` | -| `scheduler.labels` | Labels of the schedeler deployment | `{"app": "karmada-scheduler"}` | +| `scheduler.labels` | Labels of the scheduler deployment | `{"app": "karmada-scheduler"}` | | `scheduler.replicaCount` | Target replicas of the scheduler | `1` | | `scheduler.podLabels` | Labels of the scheduler pods | `{}` | | `scheduler.podAnnotations` | Annotations of the scheduler pods | `{}` | diff --git a/cmd/scheduler-estimator/app/options/options.go b/cmd/scheduler-estimator/app/options/options.go index 28aae98ad..1293c655c 100644 --- a/cmd/scheduler-estimator/app/options/options.go +++ b/cmd/scheduler-estimator/app/options/options.go @@ -38,10 +38,10 @@ type Options struct { Master string ClusterName string // BindAddress is the IP address on which to listen for the --secure-port port. - // Deprecated: To specify the TCP addresse for serving health probes, use HealthProbeBindAddress instead. To specify the TCP addresse for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. + // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. BindAddress string // SecurePort is the port that the server serves at. - // Deprecated: To specify the TCP addresse for serving health probes, use HealthProbeBindAddress instead. To specify the TCP addresse for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. + // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. SecurePort int // ServerPort is the port that the server gRPC serves at. ServerPort int diff --git a/docs/proposals/cleanup-propagated-resources/README.md b/docs/proposals/cleanup-propagated-resources/README.md index 36dec6990..cadc0c5fa 100644 --- a/docs/proposals/cleanup-propagated-resources/README.md +++ b/docs/proposals/cleanup-propagated-resources/README.md @@ -75,7 +75,7 @@ In kubefed's propose, the author suggest add a `BestEffort` strategy, reviewers #### Needless Strategy -Not need cleanup propagated resources when unjoining cluster. `Karmada` should use this strategy as default value, condsider the business risk. +Not need cleanup propagated resources when unjoining cluster. `Karmada` should use this strategy as default value, consider the business risk. #### Required Strategy diff --git a/docs/proposals/scheduling/policy-preemption/README.md b/docs/proposals/scheduling/policy-preemption/README.md index 978d541f1..026a79e83 100644 --- a/docs/proposals/scheduling/policy-preemption/README.md +++ b/docs/proposals/scheduling/policy-preemption/README.md @@ -49,7 +49,7 @@ Even if workloads have been propagated by a policy, they can be preempted by a h Cluster administrators usually cannot foresee future expansion scenarios when configuring policies. They will usually start with a broad policy to set the base strategy. When an application requires special configuration, -the administrator wants to provide a persionalized policy to take over the application. +the administrator wants to provide a personalized policy to take over the application. At this time, it hopes that the high-priority policy can preempt the low-priority policy. ### Goals @@ -257,7 +257,7 @@ metadata: namespace: default ``` -Assume that there is a high-priority policy which allows preepmtion: +Assume that there is a high-priority policy which allows preemption: ```yaml apiVersion: policy.karmada.io/v1alpha1 diff --git a/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md b/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md index 160d1ce80..2cecb5df4 100644 --- a/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md +++ b/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md @@ -87,7 +87,7 @@ sufficient resource to accommodate all replicas, so that the application better #### Story 4 -In disaster-recovery scenario, replicas migrated from primary cluster to backup cluster when primary cluster failue. +In disaster-recovery scenario, replicas migrated from primary cluster to backup cluster when primary cluster failure. As a cluster administrator, I hope that replicas can migrate back when cluster restored, so that: @@ -402,7 +402,7 @@ status: > 1. the `observedWorkloads` is sorted in increasing dict order of the combined string of `apiVersion/kind/namespace/name` . > 2. if workload referenced binding not found, it will be marked as `failed` without retry. > 3. if workload rebalanced failed due to occasional network error, the controller will retry, and its `result` and `reason` -> field will left empty until it succees. +> field will left empty until it succeeds. ### How to update this resource diff --git a/docs/proposals/service-discovery/README.md b/docs/proposals/service-discovery/README.md index 8a2d6a182..1cb48158a 100644 --- a/docs/proposals/service-discovery/README.md +++ b/docs/proposals/service-discovery/README.md @@ -162,7 +162,7 @@ With this API, we will: * Use `ServiceProvisionClusters` to specify the member clusters which will provision the service backend, if leave it empty, we will collect the backend endpoints from all clusters and sync them to the `ServiceConsumptionClusters`. * Use `ServiceConsumptionClusters` to specify the clusters where the service will be exposed. If leave it empty, the service will be exposed to all clusters. -For example, if we want access `foo`` service which are localted in member2 from member3 , we can use the following yaml: +For example, if we want access `foo`` service which are located in member2 from member3 , we can use the following yaml: ```yaml apiVersion: v1 kind: Service @@ -223,10 +223,10 @@ The process of synchronizing `EndpointSlice` from `ServiceProvisionClusters` to 1. `endpointsliceDispatch` controller will list&watch `MultiClusterService`. 1. `endpointsliceDispatch` controller will list&watch `EndpointSlice` from `MultiClusterService`'s `spec.serviceProvisionClusters`. -1. `endpointsliceDispatch` controller will creat the corresponding Work for each `EndpointSlice` in the cluster namespace of `MultiClusterService`'s `spec.serviceConsumptionClusters`. +1. `endpointsliceDispatch` controller will create the corresponding Work for each `EndpointSlice` in the cluster namespace of `MultiClusterService`'s `spec.serviceConsumptionClusters`. When creating the Work, in order to facilitate problem investigation, we should add following annotation to record the original `EndpointSlice` information: * `endpointslice.karmada.io/work-provision-cluster`: the cluster name of the original `EndpointSlice`. - Also, we should add the following annotation to the syned `EndpointSlice` record the original information: + Also, we should add the following annotation to the synced `EndpointSlice` record the original information: * `endpointslice.karmada.io/endpointslice-generation`: the resource generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version. * `endpointslice.karmada.io/provision-cluster`: the cluster location of the original `EndpointSlice`. 1. Karmada will sync the `EndpointSlice`'s work to the member clusters. @@ -326,7 +326,7 @@ For better monitoring, we should have following metrics: * For `multiclusterservice` controller, List&watch cluster creation/deletion, reconcile the work in corresponding cluster execution namespace. (10) * For `endpointsliceCollect` controller, List&watch mcs, collect the corresponding EndpointSlice from `serviceProvisionClusters`, and `endpointsliceDispatch` controller should sync the corresponding Work. (5d) * For `endpointsliceCollect` controller, List&watch cluster creation/deletion, reconcile the EndpointSlice's work in corresponding cluster execution namespace. (10d) -* If cluster gets unhealth, mcs-eps-controller should delete the EndpointSlice from all the cluster execution namespace. (5d) +* If cluster gets unhealthy, mcs-eps-controller should delete the EndpointSlice from all the cluster execution namespace. (5d) ### Test Plan diff --git a/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go b/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go index df96a7b6e..c066b3321 100644 --- a/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go +++ b/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go @@ -35,7 +35,7 @@ const ( scaleTargetWorkerNum = 1 ) -// HpaScaleTargetMarker is to automatically add `retain-replicas` label to resource template mananged by HPA. +// HpaScaleTargetMarker is to automatically add `retain-replicas` label to resource template managed by HPA. type HpaScaleTargetMarker struct { DynamicClient dynamic.Interface RESTMapper meta.RESTMapper diff --git a/pkg/karmadactl/edit/edit.go b/pkg/karmadactl/edit/edit.go index 1c7388b4d..e90828f1f 100644 --- a/pkg/karmadactl/edit/edit.go +++ b/pkg/karmadactl/edit/edit.go @@ -47,8 +47,8 @@ var ( ) // NewCmdEdit returns new initialized instance of edit sub command -func NewCmdEdit(f util.Factory, parentCommnd string, ioStreams genericiooptions.IOStreams) *cobra.Command { +func NewCmdEdit(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { cmd := kubectledit.NewCmdEdit(f, ioStreams) - cmd.Example = fmt.Sprintf(editExample, parentCommnd) + cmd.Example = fmt.Sprintf(editExample, parentCommand) return cmd } diff --git a/pkg/karmadactl/options/global.go b/pkg/karmadactl/options/global.go index 6cdf5c944..e24c26722 100644 --- a/pkg/karmadactl/options/global.go +++ b/pkg/karmadactl/options/global.go @@ -57,7 +57,7 @@ func (o *OperationScope) String() string { return string(*o) } -// Set vaule to OperationScope +// Set value to OperationScope func (o *OperationScope) Set(s string) error { switch s { case "": diff --git a/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go b/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go index a24023e60..36b085dca 100644 --- a/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go +++ b/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go @@ -52,7 +52,7 @@ func ConvertLuaResultInto(luaResult *lua.LTable, obj interface{}, references ... // but if a field is empty struct, it will be encoded into empty slice format as '[]' (root cause is empty lua.LTable // can not be distinguished from empty slice or empty struct). // - // Supposing an object contains empty fileds, like following one has an empty slice field and an empty struct field. + // Supposing an object contains empty fields, like following one has an empty slice field and an empty struct field. // e.g: struct{one-filed: {}, another-field: []} // // When it is converted to lua.LTable, empty slice and empty struct are all converted to lua.LTable{}, which can't be distinguished. @@ -195,7 +195,7 @@ func traverseToFindEmptyField(root gjson.Result, fieldPath []string) (sets.Set[s // 3. when traverse to the field `spec.dd.ee`, we got an empty slice, but it also exists in `fieldOfEmptyStruct`, // so, it originally is struct too, we add it into `fieldOfEmptySliceToStruct` variable. // 4. when traverse to the field `spec.dd.ff`, we got an empty slice, but it not exists in either map variable, -// so, it orinally not exist, we can't judge whether it is struct, so we add it into `fieldOfEmptySliceToDelete` variable to remove it. +// so, it originally not exist, we can't judge whether it is struct, so we add it into `fieldOfEmptySliceToDelete` variable to remove it. // // So, finally, fieldOfEmptySliceToStruct={"spec.aa", "spec.dd.ee"}, fieldOfEmptySliceToDelete={"spec.dd.ff"} func traverseToFindEmptyFieldNeededModify(root gjson.Result, fieldPath, fieldPathWithArrayIndex []string, fieldOfEmptySlice, fieldOfEmptyStruct sets.Set[string]) (sets.Set[string], sets.Set[string]) { diff --git a/pkg/resourceinterpreter/default/native/retain_test.go b/pkg/resourceinterpreter/default/native/retain_test.go index a72bbe784..e15b4cf74 100644 --- a/pkg/resourceinterpreter/default/native/retain_test.go +++ b/pkg/resourceinterpreter/default/native/retain_test.go @@ -559,13 +559,13 @@ func Test_retainPodFields(t *testing.T) { }}, observed: &corev1.Pod{Spec: corev1.PodSpec{ NodeName: "node1", - ServiceAccountName: "fake-obersved-sa", + ServiceAccountName: "fake-observed-sa", Volumes: []corev1.Volume{ { - Name: "fake-obersved-volume", + Name: "fake-observed-volume", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: "fake-obersved-secret", + SecretName: "fake-observed-secret", }, }, }, @@ -612,13 +612,13 @@ func Test_retainPodFields(t *testing.T) { }, want: &corev1.Pod{Spec: corev1.PodSpec{ NodeName: "node1", - ServiceAccountName: "fake-obersved-sa", + ServiceAccountName: "fake-observed-sa", Volumes: []corev1.Volume{ { - Name: "fake-obersved-volume", + Name: "fake-observed-volume", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: "fake-obersved-secret", + SecretName: "fake-observed-secret", }, }, }, diff --git a/test/e2e/framework/resourcebinding.go b/test/e2e/framework/resourcebinding.go index a48d5f252..eb12b70cc 100644 --- a/test/e2e/framework/resourcebinding.go +++ b/test/e2e/framework/resourcebinding.go @@ -50,17 +50,17 @@ func AssertBindingScheduledClusters(client karmada.Interface, namespace, name st if err != nil { return err } - scheduledClutsers := make([]string, 0, len(binding.Spec.Clusters)) + scheduledClusters := make([]string, 0, len(binding.Spec.Clusters)) for _, scheduledCluster := range binding.Spec.Clusters { - scheduledClutsers = append(scheduledClutsers, scheduledCluster.Name) + scheduledClusters = append(scheduledClusters, scheduledCluster.Name) } - sort.Strings(scheduledClutsers) - for _, expectedClutsers := range expectedResults { - if reflect.DeepEqual(scheduledClutsers, expectedClutsers) { + sort.Strings(scheduledClusters) + for _, expectedClusters := range expectedResults { + if reflect.DeepEqual(scheduledClusters, expectedClusters) { return nil } } - return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClutsers, expectedResults) + return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClusters, expectedResults) }, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred()) }) } diff --git a/test/e2e/framework/workloadrebalancer.go b/test/e2e/framework/workloadrebalancer.go index d5f95c75b..074d7caa1 100644 --- a/test/e2e/framework/workloadrebalancer.go +++ b/test/e2e/framework/workloadrebalancer.go @@ -47,7 +47,7 @@ func RemoveWorkloadRebalancer(client karmada.Interface, name string) { }) } -// UpdateWorkloadRebalancer udpate WorkloadRebalancer with karmada client. +// UpdateWorkloadRebalancer updates WorkloadRebalancer with karmada client. // if workloads/ttl is a nil pointer, keep previous value unchanged. func UpdateWorkloadRebalancer(client karmada.Interface, name string, workloads *[]appsv1alpha1.ObjectReference, ttl *int32) { ginkgo.By(fmt.Sprintf("Updating WorkloadRebalancer(%s)'s workloads", name), func() {