Merge pull request #4438 from hezhizhen/typo

fix typo
This commit is contained in:
karmada-bot 2023-12-18 14:17:14 +08:00 committed by GitHub
commit 7395a8bdf5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 53 additions and 53 deletions

View File

@ -11,7 +11,7 @@ global:
## - myRegistryKeySecretName
imagePullSecrets: []
## #param global.commonLabels Common labels applied to all ressources
## #param global.commonLabels Common labels applied to all resources
## E.g.
## commonLabels:
## team: infra

View File

@ -227,7 +227,7 @@ The process of synchronizing `EndpointSlice` from `ServiceProvisionClusters` to
When creating the Work, in order to facilitate problem investigation, we should add following annotation to record the original `EndpointSlice` information:
* `endpointslice.karmada.io/work-provision-cluster`: the cluster name of the original `EndpointSlice`.
Also, we should add the following annotation to the syned `EndpointSlice` record the original information:
* `endpointslice.karmada.io/endpointslice-generation`: the resoruce generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version.
* `endpointslice.karmada.io/endpointslice-generation`: the resource generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version.
* `endpointslice.karmada.io/provision-cluster`: the cluster location of the original `EndpointSlice`.
1. Karmada will sync the `EndpointSlice`'s work to the member clusters.
@ -274,9 +274,9 @@ We should have following Condition in `MultiClusterService`:
```go
MCSServiceAppliedConditionType = "ServiceApplied"
MCSEndpointSliceCollectedCondtionType = "EndpointSliceCollected"
MCSEndpointSliceCollectedConditionType = "EndpointSliceCollected"
MCSEndpointSliceAppliedCondtionType = "EndpointSliceApplied"
MCSEndpointSliceAppliedConditionType = "EndpointSliceApplied"
```
`MCSServiceAppliedConditionType` is used to record the status of `Service` propagation, for example:
@ -361,4 +361,4 @@ Another alternative approach could be to enforce a strict naming convention for
<!--
Note: This is a simplified version of kubernetes enhancement proposal template.
https://github.com/kubernetes/enhancements/tree/3317d4cb548c396a430d1c1ac6625226018adf6a/keps/NNNN-kep-template
-->
-->

View File

@ -108,7 +108,7 @@ verify_downloader() {
return 0
}
# Create tempory directory and cleanup when done
# Create temporary directory and cleanup when done
setup_tmp() {
TMP_DIR=$(mktemp -d -t "${INSTALL_CLI_TYPE}"-install.XXXXXXXXXX)
TMP_METADATA="${TMP_DIR}/${INSTALL_CLI_TYPE}.json"

View File

@ -381,7 +381,7 @@ function util::wait_pod_ready() {
}
# util::wait_apiservice_ready waits for apiservice state becomes Available until timeout.
# Parmeters:
# Parameters:
# - $1: k8s context name, such as "karmada-apiserver"
# - $2: apiservice label, such as "app=etcd"
# - $3: time out, such as "200s"
@ -402,7 +402,7 @@ function util::wait_apiservice_ready() {
}
# util::wait_cluster_ready waits for cluster state becomes ready until timeout.
# Parmeters:
# Parameters:
# - $1: context name, such as "karmada-apiserver"
# - $2: cluster name, such as "member1"
function util:wait_cluster_ready() {
@ -444,7 +444,7 @@ function util::kubectl_with_retry() {
# util::delete_all_clusters deletes all clusters directly
# util::delete_all_clusters actually do three things: delete cluster、remove kubeconfig、record delete log
# Parmeters:
# Parameters:
# - $1: KUBECONFIG file of host cluster, such as "~/.kube/karmada.config"
# - $2: KUBECONFIG file of member cluster, such as "~/.kube/members.config"
# - $3: log file path, such as "/tmp/karmada/"
@ -466,7 +466,7 @@ function util::delete_all_clusters() {
# util::create_cluster creates a kubernetes cluster
# util::create_cluster creates a kind cluster and don't wait for control plane node to be ready.
# Parmeters:
# Parameters:
# - $1: cluster name, such as "host"
# - $2: KUBECONFIG file, such as "/var/run/host.config"
# - $3: node docker image to use for booting the cluster, such as "kindest/node:v1.19.1"

View File

@ -90,7 +90,7 @@ type InitOpt func(o *InitOptions)
var _ tasks.InitData = &initData{}
// initData defines all the runtime information used when ruing init workflow;
// this data is shared across all the tasks tha are included in the workflow.
// this data is shared across all the tasks that are included in the workflow.
type initData struct {
sync.Once
certs.CertStore

View File

@ -105,7 +105,7 @@ func runRemoveComponentSubTask(component string, workloadNameFunc util.Namefunc,
constants.KarmadaOperatorLabel,
)
if err != nil {
return fmt.Errorf("failed to cleanup serivce of component %s, err: %w", component, err)
return fmt.Errorf("failed to cleanup service of component %s, err: %w", component, err)
}
}

View File

@ -95,7 +95,7 @@ func CreateOrUpdateService(client clientset.Interface, service *corev1.Service)
// Ignore if the Service is invalid with this error message:
// Service "apiserver" is invalid: provided Port is already allocated.
if apierrors.IsInvalid(err) && strings.Contains(err.Error(), errAllocated.Error()) {
klog.V(2).ErrorS(err, "failed to create or update serivce", "service", klog.KObj(service))
klog.V(2).ErrorS(err, "failed to create or update service", "service", klog.KObj(service))
return nil
}
return fmt.Errorf("unable to create Service: %v", err)

View File

@ -128,7 +128,7 @@ func Unpack(file, targetPath string) error {
}
outFile.Close()
default:
fmt.Printf("uknown type: %v in %s\n", header.Typeflag, header.Name)
fmt.Printf("unknown type: %v in %s\n", header.Typeflag, header.Name)
}
}
return nil

View File

@ -34,7 +34,7 @@ import (
type labelEventKind int
const (
// addLabelEvent refer to addding util.RetainReplicasLabel to resource scaled by HPA
// addLabelEvent refer to adding util.RetainReplicasLabel to resource scaled by HPA
addLabelEvent labelEventKind = iota
// deleteLabelEvent refer to deleting util.RetainReplicasLabel from resource scaled by HPA
deleteLabelEvent

View File

@ -62,7 +62,7 @@ import (
)
const (
// bindingDependedIdLabelKey is the resoruce id of the independent binding which the attached binding depends on.
// bindingDependedIdLabelKey is the resource id of the independent binding which the attached binding depends on.
bindingDependedIdLabelKey = "resourcebinding.karmada.io/depended-id"
// bindingDependedByLabelKeyPrefix is the prefix to a label key specifying an attached binding referred by which independent binding.
@ -377,7 +377,7 @@ func (d *DependenciesDistributor) recordDependencies(binding *workv1alpha2.Resou
klog.Errorf("Failed to marshal dependencies of binding(%s/%s): %v", binding.Namespace, binding.Name, err)
return err
}
depenciesStr := string(dependenciesBytes)
dependenciesStr := string(dependenciesBytes)
objectAnnotation := binding.GetAnnotations()
if objectAnnotation == nil {
@ -385,11 +385,11 @@ func (d *DependenciesDistributor) recordDependencies(binding *workv1alpha2.Resou
}
// dependencies are not updated, no need to update annotation.
if oldDependencies, exist := objectAnnotation[bindingDependenciesAnnotationKey]; exist && oldDependencies == depenciesStr {
if oldDependencies, exist := objectAnnotation[bindingDependenciesAnnotationKey]; exist && oldDependencies == dependenciesStr {
return nil
}
objectAnnotation[bindingDependenciesAnnotationKey] = depenciesStr
objectAnnotation[bindingDependenciesAnnotationKey] = dependenciesStr
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
binding.SetAnnotations(objectAnnotation)

View File

@ -24,7 +24,7 @@ import (
"github.com/karmada-io/karmada/pkg/karmadactl/addons/search"
)
// Install intall the karmada addons process in Addons
// Install install the karmada addons process in Addons
func Install() {
addonsinit.Addons["karmada-descheduler"] = descheduler.AddonDescheduler
addonsinit.Addons["karmada-metrics-adapter"] = metricsadapter.AddonMetricsAdapter

View File

@ -190,7 +190,7 @@ func installComponentsOnKarmadaControlPlane(opts *addoninit.CommandAddonsEnableO
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APISevice: %+v", opts.Namespace, options.KarmadaCertsName, err)
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APIService: %+v", opts.Namespace, options.KarmadaCertsName, err)
}
aaService := &corev1.Service{}

View File

@ -203,7 +203,7 @@ func installComponentsOnKarmadaControlPlane(opts *addoninit.CommandAddonsEnableO
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APISevice: %+v", opts.Namespace, options.KarmadaCertsName, err)
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APIService: %+v", opts.Namespace, options.KarmadaCertsName, err)
}
aaService := &corev1.Service{}

View File

@ -157,7 +157,7 @@ func NewCmdInit(parentCommand string) *cobra.Command {
flags.Int32VarP(&opts.KubeControllerManagerReplicas, "karmada-kube-controller-manager-replicas", "", 1, "Karmada kube controller manager replica set")
flags.StringVarP(&opts.KarmadaControllerManagerImage, "karmada-controller-manager-image", "", kubernetes.DefaultKarmadaControllerManagerImage, "Karmada controller manager image")
flags.Int32VarP(&opts.KarmadaControllerManagerReplicas, "karmada-controller-manager-replicas", "", 1, "Karmada controller manager replica set")
flags.StringVarP(&opts.KarmadaWebhookImage, "karmada-webhook-image", "", kubernetes.DefualtKarmadaWebhookImage, "Karmada webhook image")
flags.StringVarP(&opts.KarmadaWebhookImage, "karmada-webhook-image", "", kubernetes.DefaultKarmadaWebhookImage, "Karmada webhook image")
flags.Int32VarP(&opts.KarmadaWebhookReplicas, "karmada-webhook-replicas", "", 1, "Karmada webhook replica set")
flags.StringVarP(&opts.KarmadaAggregatedAPIServerImage, "karmada-aggregated-apiserver-image", "", kubernetes.DefaultKarmadaAggregatedAPIServerImage, "Karmada aggregated apiserver image")
flags.Int32VarP(&opts.KarmadaAggregatedAPIServerReplicas, "karmada-aggregated-apiserver-replicas", "", 1, "Karmada aggregated apiserver replica set")

View File

@ -93,8 +93,8 @@ var (
DefaultKarmadaSchedulerImage string
// DefaultKarmadaControllerManagerImage Karmada controller manager image
DefaultKarmadaControllerManagerImage string
// DefualtKarmadaWebhookImage Karmada webhook image
DefualtKarmadaWebhookImage string
// DefaultKarmadaWebhookImage Karmada webhook image
DefaultKarmadaWebhookImage string
// DefaultKarmadaAggregatedAPIServerImage Karmada aggregated apiserver image
DefaultKarmadaAggregatedAPIServerImage string
)
@ -117,7 +117,7 @@ func init() {
DefaultInitImage = "docker.io/alpine:3.18.5"
DefaultKarmadaSchedulerImage = fmt.Sprintf("docker.io/karmada/karmada-scheduler:%s", releaseVer.ReleaseVersion())
DefaultKarmadaControllerManagerImage = fmt.Sprintf("docker.io/karmada/karmada-controller-manager:%s", releaseVer.ReleaseVersion())
DefualtKarmadaWebhookImage = fmt.Sprintf("docker.io/karmada/karmada-webhook:%s", releaseVer.ReleaseVersion())
DefaultKarmadaWebhookImage = fmt.Sprintf("docker.io/karmada/karmada-webhook:%s", releaseVer.ReleaseVersion())
DefaultKarmadaAggregatedAPIServerImage = fmt.Sprintf("docker.io/karmada/karmada-aggregated-apiserver:%s", releaseVer.ReleaseVersion())
}
@ -696,7 +696,7 @@ func (i *CommandInitOption) karmadaControllerManagerImage() string {
// get karmada-webhook image
func (i *CommandInitOption) karmadaWebhookImage() string {
if i.ImageRegistry != "" && i.KarmadaWebhookImage == DefualtKarmadaWebhookImage {
if i.ImageRegistry != "" && i.KarmadaWebhookImage == DefaultKarmadaWebhookImage {
return i.ImageRegistry + "/karmada-webhook:" + karmadaRelease
}
return i.KarmadaWebhookImage

View File

@ -96,7 +96,7 @@ func TestFlagsIP(t *testing.T) {
want []net.IP
}{
{
name: "all ips are vaild",
name: "all ips are valid",
ip: "10.0.0.1,10.0.0.2",
want: []net.IP{
net.ParseIP("10.0.0.1"),
@ -104,7 +104,7 @@ func TestFlagsIP(t *testing.T) {
},
},
{
name: "have invaild ip",
name: "have invalid ip",
ip: "10.0.0,10.0.0.2",
want: []net.IP{
net.ParseIP("127.0.0.1"),

View File

@ -123,7 +123,7 @@ func DeCompress(file, targetPath string) error {
}
outFile.Close()
default:
fmt.Printf("uknown type: %v in %s\n", header.Typeflag, header.Name)
fmt.Printf("unknown type: %v in %s\n", header.Typeflag, header.Name)
}
}
return nil

View File

@ -390,7 +390,7 @@ func parseEditedIntoCustomization(file []byte, into *configv1alpha1.ResourceInte
if currRule == nil {
return fmt.Errorf("unexpected line %q", line)
}
script += string(line) + "\n"
script += line + "\n"
}
}

View File

@ -155,7 +155,7 @@ type CommandPromoteOption struct {
// PolicyName is the name of the PropagationPolicy(or ClusterPropagationPolicy),
// It defaults to the promoting resource name with a random hash suffix.
// It will be ingnored if AutoCreatePolicy is false.
// It will be ignored if AutoCreatePolicy is false.
PolicyName string
resource.FilenameOptions

View File

@ -308,7 +308,7 @@ func (o *CommandRegisterOption) Validate() error {
}
if !o.BootstrapToken.UnsafeSkipCAVerification && len(o.BootstrapToken.CACertHashes) == 0 {
return fmt.Errorf("need to varify CACertHashes, or set --discovery-token-unsafe-skip-ca-verification=true")
return fmt.Errorf("need to verify CACertHashes, or set --discovery-token-unsafe-skip-ca-verification=true")
}
if !filepath.IsAbs(o.CACertPath) || !strings.HasSuffix(o.CACertPath, ".crt") {
@ -592,7 +592,7 @@ func (o *CommandRegisterOption) createSecretAndRBACInMemberCluster(karmadaAgentC
StringData: map[string]string{KarmadaKubeconfigName: string(configBytes)},
}
// cerate karmada-kubeconfig secret to be used by karmada-agent component.
// create karmada-kubeconfig secret to be used by karmada-agent component.
if err := cmdutil.CreateOrUpdateSecret(o.memberClusterClient, kubeConfigSecret); err != nil {
return fmt.Errorf("create secret %s failed: %v", kubeConfigSecret.Name, err)
}

View File

@ -47,7 +47,7 @@ var _ Factory = &factoryImpl{}
type factoryImpl struct {
cmdutil.Factory
// kubeConfigFlags holds all the flags specificed by user.
// kubeConfigFlags holds all the flags specified by user.
// These flags will be inherited by the member cluster's client.
kubeConfigFlags *genericclioptions.ConfigFlags
}

View File

@ -68,7 +68,7 @@ func (c *CustomMetricsProvider) GetMetricByName(ctx context.Context, name types.
return nil, err
}
metricValueList := &custom_metrics.MetricValueList{}
metricsChanel := make(chan *custom_metrics.MetricValueList)
metricsChannel := make(chan *custom_metrics.MetricValueList)
wg := sync.WaitGroup{}
for _, cluster := range clusters {
wg.Add(1)
@ -79,15 +79,15 @@ func (c *CustomMetricsProvider) GetMetricByName(ctx context.Context, name types.
klog.Warningf("query %s's %s metric from cluster %s failed, err: %+v", info.GroupResource.String(), info.Metric, clusterName, err)
return
}
metricsChanel <- metrics
metricsChannel <- metrics
}(cluster.Name)
}
go func() {
wg.Wait()
close(metricsChanel)
close(metricsChannel)
}()
for {
metrics, ok := <-metricsChanel
metrics, ok := <-metricsChannel
if !ok {
break
}
@ -119,7 +119,7 @@ func (c *CustomMetricsProvider) GetMetricBySelector(ctx context.Context, namespa
}
metricValueList := &custom_metrics.MetricValueList{}
wg := sync.WaitGroup{}
metricsChanel := make(chan *custom_metrics.MetricValueList)
metricsChannel := make(chan *custom_metrics.MetricValueList)
for _, cluster := range clusters {
wg.Add(1)
go func(clusterName string) {
@ -129,16 +129,16 @@ func (c *CustomMetricsProvider) GetMetricBySelector(ctx context.Context, namespa
klog.Warningf("query %s's %s metric from cluster %s failed", info.GroupResource.String(), info.Metric, clusterName)
return
}
metricsChanel <- metrics
metricsChannel <- metrics
}(cluster.Name)
}
go func() {
wg.Wait()
close(metricsChanel)
close(metricsChannel)
}()
sameMetrics := make(map[string]custom_metrics.MetricValue)
for {
metrics, ok := <-metricsChanel
metrics, ok := <-metricsChannel
if !ok {
break
}

View File

@ -114,7 +114,7 @@ func (r *ResourceMetricsProvider) getMetricsParallel(resourceFunc queryResourceF
}
// step 2. Query metrics from the filtered target clusters
metricsChanel := make(chan interface{})
metricsChannel := make(chan interface{})
var wg sync.WaitGroup
for _, clusterName := range targetClusters {
@ -139,17 +139,17 @@ func (r *ResourceMetricsProvider) getMetricsParallel(resourceFunc queryResourceF
// If there are multiple metrics with same name, it's ok because it's an array instead of a map.
// The HPA controller will calculate the average utilization with the array.
metricsChanel <- metrics
metricsChannel <- metrics
}(clusterName)
}
go func() {
wg.Wait()
close(metricsChanel)
close(metricsChannel)
}()
for {
data, ok := <-metricsChanel
data, ok := <-metricsChannel
if !ok {
break
}
@ -426,7 +426,7 @@ func (p *PodLister) convertToPodPartialData(pod *corev1.Pod, selector string, la
ret.Annotations = map[string]string{}
}
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
// If user sets this annotation, we need to remove it to avoid parsing wrong next.
if !labelSelector {
delete(ret.Annotations, namespaceSpecifiedAnnotation)
delete(ret.Annotations, labelSelectorAnnotationInternal)
@ -539,7 +539,7 @@ func (n *NodeLister) List(selector labels.Selector) (ret []*corev1.Node, err err
nodeTyped.Annotations = map[string]string{}
}
//If user sets this annotation, we need to reset it.
// If user sets this annotation, we need to reset it.
nodeTyped.Annotations[labelSelectorAnnotationInternal] = selector.String()
ret = append(ret, nodeTyped)
}
@ -587,7 +587,7 @@ func (n *NodeLister) Get(name string) (*corev1.Node, error) {
nodeTyped.Annotations = map[string]string{}
}
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
// If user sets this annotation, we need to remove it to avoid parsing wrong next.
delete(nodeTyped.Annotations, labelSelectorAnnotationInternal)
}

View File

@ -76,7 +76,7 @@ func VerifyResourceInterpreterContext(uid types.UID, operation configv1alpha1.In
switch r := interpreterContext.(type) {
case *configv1alpha1.ResourceInterpreterContext:
if r.Response == nil {
return nil, fmt.Errorf("webhook resonse was absent")
return nil, fmt.Errorf("webhook response was absent")
}
if r.Response.UID != uid {

View File

@ -115,7 +115,7 @@ func (c *Cluster) Connect(ctx context.Context, request framework.ProxyRequest) (
return h, nil
}
// Objects get by client via proxy are edited some fields, different from objets in member clusters.
// Objects get by client via proxy are edited some fields, different from objects in member clusters.
// So before update, we shall recover these fields.
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if err = modifyRequest(req, clusterName); err != nil {

View File

@ -421,7 +421,7 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
err := createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, "", karmadactlTimeout,
"join", "--cluster-kubeconfig", kubeConfigPath, "--cluster-context", clusterContext, "--cluster-namespace", "karmada-cluster", clusterName)
_, err := cmd.ExecOrDie()