fix spelling errors
Signed-off-by: whitewindmills <jayfantasyhjh@gmail.com>
This commit is contained in:
parent
dd6a313add
commit
c8423cb237
|
@ -566,7 +566,7 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedhpa.ControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
DownscaleStabilisationWindow: ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerDownscaleStabilizationWindow.Duration,
|
||||
HorizontalPodAutoscalerSyncPeroid: ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
HorizontalPodAutoscalerSyncPeriod: ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ReplicaCalc: replicaCalculator,
|
||||
ClusterScaleClientSetFunc: util.NewClusterScaleClientSet,
|
||||
TypedInformerManager: typedmanager.GetInstance(),
|
||||
|
|
|
@ -165,7 +165,7 @@ The optimization design for the MultiClusterService API needs to be further iter
|
|||
|
||||
Before delving into the specific design details, let's first take a look from the user's perspective at what preparations they need to make.
|
||||
|
||||
1. The user creates a foo Deployment and Service on the Karmad control panel, and creates a PropagationPolicy to distribute them into the member cluster member1.
|
||||
1. The user creates a foo Deployment and Service on the Karmada control panel, and creates a PropagationPolicy to distribute them into the member cluster member1.
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ type CertStore interface {
|
|||
AddCert(cert *KarmadaCert)
|
||||
GetCert(name string) *KarmadaCert
|
||||
CertList() []*KarmadaCert
|
||||
LoadCertFormSercret(sercret *corev1.Secret) error
|
||||
LoadCertFromSecret(secret *corev1.Secret) error
|
||||
}
|
||||
|
||||
type splitToPairNameFunc func(name string) string
|
||||
|
@ -65,7 +65,7 @@ func (store *KarmadaCertStore) GetCert(name string) *KarmadaCert {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CertList lists all of karmada certs in the cert chache.
|
||||
// CertList lists all of karmada certs in the cert cache.
|
||||
func (store *KarmadaCertStore) CertList() []*KarmadaCert {
|
||||
certs := make([]*KarmadaCert, 0, len(store.certs))
|
||||
|
||||
|
@ -76,15 +76,15 @@ func (store *KarmadaCertStore) CertList() []*KarmadaCert {
|
|||
return certs
|
||||
}
|
||||
|
||||
// LoadCertFormSercret loads a set of certs form k8s secret resource. we get cert
|
||||
// LoadCertFromSecret loads a set of certs form k8s secret resource. we get cert
|
||||
// cache key by calling the pairNameFunc function. if the secret data key suffix is ".crt",
|
||||
// it be considered cert data. if the suffix is ".key", it be considered cert key data.
|
||||
func (store *KarmadaCertStore) LoadCertFormSercret(sercret *corev1.Secret) error {
|
||||
if len(sercret.Data) == 0 {
|
||||
func (store *KarmadaCertStore) LoadCertFromSecret(secret *corev1.Secret) error {
|
||||
if len(secret.Data) == 0 {
|
||||
return fmt.Errorf("cert data is empty")
|
||||
}
|
||||
|
||||
for name, data := range sercret.Data {
|
||||
for name, data := range secret.Data {
|
||||
pairName := store.pairNameFunc(name)
|
||||
kc := store.GetCert(pairName)
|
||||
if kc == nil {
|
||||
|
|
|
@ -105,7 +105,7 @@ const (
|
|||
// KarmadaOperatorLabelKeyName defines a label key used by all resources created by karmada operator
|
||||
KarmadaOperatorLabelKeyName = "app.kubernetes.io/managed-by"
|
||||
|
||||
// APIServiceName defines the karmada aggregated apiserver APISerivce resource name.
|
||||
// APIServiceName defines the karmada aggregated apiserver APIService resource name.
|
||||
APIServiceName = "v1alpha1.cluster.karmada.io"
|
||||
)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func skipCerts(d workflow.RunData) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
if err := data.LoadCertFormSercret(secret); err != nil {
|
||||
if err := data.LoadCertFromSecret(secret); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ func runDeployEtcd(r workflow.RunData) error {
|
|||
}
|
||||
|
||||
if cfg.Etcd.Local == nil {
|
||||
return errors.New("unexpect empty etcd local configuration")
|
||||
return errors.New("unexpected empty etcd local configuration")
|
||||
}
|
||||
|
||||
err := etcd.EnsureKarmadaEtcd(data.RemoteClient(), cfg.Etcd.Local, data.GetName(), data.GetNamespace())
|
||||
|
|
|
@ -102,7 +102,7 @@ func runCrds(r workflow.RunData) error {
|
|||
|
||||
cert := data.GetCert(constants.CaCertAndKeyName)
|
||||
if len(cert.CertData()) == 0 {
|
||||
return errors.New("unexpect empty ca cert data")
|
||||
return errors.New("unexpected empty ca cert data")
|
||||
}
|
||||
|
||||
caBase64 := base64.StdEncoding.EncodeToString(cert.CertData())
|
||||
|
@ -149,7 +149,7 @@ func patchCrds(crdsClient *crdsclient.Clientset, patchPath string, caBundle stri
|
|||
}
|
||||
|
||||
crdPath := path.Join(patchPath, file.Name())
|
||||
crdBytes, err := util.RelpaceYamlForReg(crdPath, caBundle, reg)
|
||||
crdBytes, err := util.ReplaceYamlForReg(crdPath, caBundle, reg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func runWebhookConfiguration(r workflow.RunData) error {
|
|||
|
||||
cert := data.GetCert(constants.CaCertAndKeyName)
|
||||
if len(cert.CertData()) == 0 {
|
||||
return errors.New("unexpect empty ca cert data for webhookConfiguration")
|
||||
return errors.New("unexpected empty ca cert data for webhookConfiguration")
|
||||
}
|
||||
|
||||
caBase64 := base64.StdEncoding.EncodeToString(cert.CertData())
|
||||
|
|
|
@ -168,7 +168,7 @@ func runUploadCerts(r workflow.RunData) error {
|
|||
klog.V(4).InfoS("[upload-certs] Running upload-certs task", "karmada", klog.KObj(data))
|
||||
|
||||
if len(data.CertList()) == 0 {
|
||||
return errors.New("there is no certs in store, please reload crets to store")
|
||||
return errors.New("there is no certs in store, please reload certs to store")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -78,14 +78,14 @@ func runWaitControlPlane(r workflow.RunData) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newWaitControlPlaneSubTask(component string, lables labels.Set) workflow.Task {
|
||||
func newWaitControlPlaneSubTask(component string, ls labels.Set) workflow.Task {
|
||||
return workflow.Task{
|
||||
Name: component,
|
||||
Run: runWaitControlPlaneSubTask(component, lables),
|
||||
Run: runWaitControlPlaneSubTask(component, ls),
|
||||
}
|
||||
}
|
||||
|
||||
func runWaitControlPlaneSubTask(component string, lables labels.Set) func(r workflow.RunData) error {
|
||||
func runWaitControlPlaneSubTask(component string, ls labels.Set) func(r workflow.RunData) error {
|
||||
return func(r workflow.RunData) error {
|
||||
data, ok := r.(InitData)
|
||||
if !ok {
|
||||
|
@ -93,7 +93,7 @@ func runWaitControlPlaneSubTask(component string, lables labels.Set) func(r work
|
|||
}
|
||||
|
||||
waiter := apiclient.NewKarmadaWaiter(nil, data.RemoteClient(), componentBeReadyTimeout)
|
||||
if err := waiter.WaitForSomePods(lables.String(), data.GetNamespace(), 1); err != nil {
|
||||
if err := waiter.WaitForSomePods(ls.String(), data.GetNamespace(), 1); err != nil {
|
||||
return fmt.Errorf("waiting for %s to ready timeout, err: %w", component, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ func CreateCustomResourceDefinitionIfNeed(client *crdsclient.Clientset, obj *api
|
|||
return nil
|
||||
}
|
||||
|
||||
// PatchCustomResourceDefinition patchs a crd resource.
|
||||
// PatchCustomResourceDefinition patches a crd resource.
|
||||
func PatchCustomResourceDefinition(client *crdsclient.Clientset, name string, data []byte) error {
|
||||
crd := client.ApiextensionsV1().CustomResourceDefinitions()
|
||||
if _, err := crd.Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{}); err != nil {
|
||||
|
|
|
@ -41,7 +41,7 @@ type KarmadaWaiter struct {
|
|||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewKarmadaWaiter reurn a karmada waiter, the rest config is to create crd client or aggregate client.
|
||||
// NewKarmadaWaiter returns a karmada waiter, the rest config is to create crd client or aggregate client.
|
||||
func NewKarmadaWaiter(config *rest.Config, client clientset.Interface, timeout time.Duration) Waiter {
|
||||
return &KarmadaWaiter{
|
||||
karmadaConfig: config,
|
||||
|
@ -169,8 +169,8 @@ func isPodRunning(pod corev1.Pod) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
for _, condtion := range pod.Status.Conditions {
|
||||
if condtion.Type == corev1.PodReady && condtion.Status == corev1.ConditionTrue {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func (p *Patcher) ForDeployment(deployment *appsv1.Deployment) {
|
|||
|
||||
overrideArgs := map[string]string{}
|
||||
|
||||
// merge featureGates and build to an argurment.
|
||||
// merge featureGates and build to an argument.
|
||||
if len(p.featureGates) != 0 {
|
||||
baseFeatureGates := map[string]bool{}
|
||||
|
||||
|
@ -147,7 +147,7 @@ func parseFeatrueGatesArgumentToMap(featureGates string) map[string]bool {
|
|||
|
||||
featureGatesMap := map[string]bool{}
|
||||
for _, featureGate := range featureGateSlice {
|
||||
key, val, err := parseFeatrueGate(featureGate)
|
||||
key, val, err := parseFeatureGate(featureGate)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ func parseArgument(arg string) (string, string, error) {
|
|||
return keyvalSlice[0], keyvalSlice[1], nil
|
||||
}
|
||||
|
||||
func parseFeatrueGate(featureGate string) (string, bool, error) {
|
||||
func parseFeatureGate(featureGate string) (string, bool, error) {
|
||||
if !strings.Contains(featureGate, "=") {
|
||||
return "", false, errors.New("the featureGate should have a '=' between the flag and the value")
|
||||
}
|
||||
|
|
|
@ -169,8 +169,8 @@ func ReadYamlFile(path string) ([]byte, error) {
|
|||
return yaml.YAMLToJSON(data)
|
||||
}
|
||||
|
||||
// RelpaceYamlForReg replace content of yaml file with a Regexp
|
||||
func RelpaceYamlForReg(path, destResource string, reg *regexp.Regexp) ([]byte, error) {
|
||||
// ReplaceYamlForReg replace content of yaml file with a Regexp
|
||||
func ReplaceYamlForReg(path, destResource string, reg *regexp.Regexp) ([]byte, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -2,7 +2,7 @@ package v1alpha1
|
|||
|
||||
const (
|
||||
// TaintClusterUnscheduler will be added when cluster becomes unschedulable
|
||||
// and removed when cluster becomes scheduable.
|
||||
// and removed when cluster becomes schedulable.
|
||||
TaintClusterUnscheduler = "cluster.karmada.io/unschedulable"
|
||||
// TaintClusterNotReady will be added when cluster is not ready
|
||||
// and removed when cluster becomes ready.
|
||||
|
|
|
@ -74,7 +74,7 @@ type FederatedHPAController struct {
|
|||
|
||||
monitor monitor.Monitor
|
||||
|
||||
HorizontalPodAutoscalerSyncPeroid time.Duration
|
||||
HorizontalPodAutoscalerSyncPeriod time.Duration
|
||||
DownscaleStabilisationWindow time.Duration
|
||||
// Latest unstabilized recommendations for each autoscaler.
|
||||
recommendations map[string][]timestampedRecommendation
|
||||
|
@ -166,7 +166,7 @@ func (c *FederatedHPAController) Reconcile(ctx context.Context, req controllerru
|
|||
return controllerruntime.Result{}, err
|
||||
}
|
||||
|
||||
return controllerruntime.Result{RequeueAfter: c.HorizontalPodAutoscalerSyncPeroid}, nil
|
||||
return controllerruntime.Result{RequeueAfter: c.HorizontalPodAutoscalerSyncPeriod}, nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
|
|
|
@ -555,7 +555,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod *corev1.Pod
|
||||
raw []byte
|
||||
controllerWithoutInformer bool
|
||||
workWithRigntNS bool
|
||||
workWithRightNS bool
|
||||
expectedError bool
|
||||
workWithDeletionTimestamp bool
|
||||
wrongWorkNS bool
|
||||
|
@ -566,7 +566,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
|
@ -575,7 +575,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
|
@ -584,7 +584,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: false,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
|
@ -592,7 +592,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
obj: newPodObj("karmada-es-cluster"),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
|
@ -601,7 +601,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName, true),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
|
@ -610,7 +610,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: false,
|
||||
workWithRightNS: false,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
|
@ -619,7 +619,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod1","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
|
@ -628,7 +628,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
pod: newPod(workNs, workName),
|
||||
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
|
||||
controllerWithoutInformer: true,
|
||||
workWithRigntNS: true,
|
||||
workWithRightNS: true,
|
||||
expectedError: true,
|
||||
wrongWorkNS: true,
|
||||
},
|
||||
|
@ -656,7 +656,7 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
var work *workv1alpha1.Work
|
||||
if tt.workWithRigntNS {
|
||||
if tt.workWithRightNS {
|
||||
work = testhelper.NewWork(workName, workNs, workUID, tt.raw)
|
||||
} else {
|
||||
work = testhelper.NewWork(workName, fmt.Sprintf("%v-test", workNs), workUID, tt.raw)
|
||||
|
|
|
@ -29,7 +29,7 @@ var (
|
|||
# Specify the Karmada control plane kubeconfig
|
||||
%[1]s disable karmada-search --karmada-kubeconfig /etc/karmada/karmada-apiserver.config
|
||||
|
||||
# Sepcify the namespace where Karmada components are installed
|
||||
# Specify the namespace where Karmada components are installed
|
||||
%[1]s disable karmada-search --namespace karmada-system
|
||||
`)
|
||||
)
|
||||
|
|
|
@ -34,7 +34,7 @@ var (
|
|||
# Specify the karmada-search image
|
||||
%[1]s enable karmada-search --karmada-search-image docker.io/karmada/karmada-search:latest
|
||||
|
||||
# Sepcify the namespace where Karmada components are installed
|
||||
# Specify the namespace where Karmada components are installed
|
||||
%[1]s enable karmada-search --namespace karmada-system
|
||||
`)
|
||||
)
|
||||
|
|
|
@ -19,7 +19,7 @@ type GlobalCommandOptions struct {
|
|||
KarmadaConfig string
|
||||
KarmadaContext string
|
||||
|
||||
// Namespace holds the namespace where Karmada components intalled
|
||||
// Namespace holds the namespace where Karmada components installed
|
||||
Namespace string
|
||||
|
||||
// Cluster holds the name of member cluster to enable or disable scheduler estimator
|
||||
|
|
|
@ -23,7 +23,7 @@ var (
|
|||
# Specify the karmada control plane kubeconfig
|
||||
%[1]s list --karmada-kubeconfig /etc/karmada/karmada-apiserver.config
|
||||
|
||||
# Sepcify the namespace where Karmada components are installed
|
||||
# Specify the namespace where Karmada components are installed
|
||||
%[1]s list --namespace karmada-system
|
||||
`)
|
||||
)
|
||||
|
|
|
@ -104,7 +104,7 @@ func InitKarmadaResources(dir, caBase64, systemNamespace string) error {
|
|||
klog.Exitln(err)
|
||||
}
|
||||
|
||||
if err = createExtralResources(clientSet, dir); err != nil {
|
||||
if err = createExtraResources(clientSet, dir); err != nil {
|
||||
klog.Exitln(err)
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ func InitKarmadaBootstrapToken(dir string) (string, error) {
|
|||
return registerCommand, nil
|
||||
}
|
||||
|
||||
func createExtralResources(clientSet *kubernetes.Clientset, dir string) error {
|
||||
func createExtraResources(clientSet *kubernetes.Clientset, dir string) error {
|
||||
// grant view clusterrole with karamda resource permission
|
||||
if err := grantKarmadaPermissionToViewClusterRole(clientSet); err != nil {
|
||||
return err
|
||||
|
|
|
@ -83,19 +83,19 @@ func TestDownloadFile(t *testing.T) {
|
|||
|
||||
func TestListFiles(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
tempfils []string
|
||||
name string
|
||||
path string
|
||||
tempfiles []string
|
||||
}{
|
||||
{
|
||||
name: "get files from path",
|
||||
path: "temp-path" + randString(),
|
||||
tempfils: []string{"tempfiles1" + randString(), "tempfiles2" + randString()},
|
||||
name: "get files from path",
|
||||
path: "temp-path" + randString(),
|
||||
tempfiles: []string{"tempfiles1" + randString(), "tempfiles2" + randString()},
|
||||
},
|
||||
{
|
||||
name: "no files from path",
|
||||
path: "temp-path" + randString(),
|
||||
tempfils: []string{},
|
||||
name: "no files from path",
|
||||
path: "temp-path" + randString(),
|
||||
tempfiles: []string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
@ -106,9 +106,9 @@ func TestListFiles(t *testing.T) {
|
|||
defer os.RemoveAll(tt.path)
|
||||
|
||||
var want []string
|
||||
for i := 0; i < len(tt.tempfils); i++ {
|
||||
want = append(want, tt.path+"/"+tt.tempfils[i])
|
||||
_, err = os.Create(tt.path + "/" + tt.tempfils[i])
|
||||
for i := 0; i < len(tt.tempfiles); i++ {
|
||||
want = append(want, tt.path+"/"+tt.tempfiles[i])
|
||||
_, err = os.Create(tt.path + "/" + tt.tempfiles[i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ var (
|
|||
%[1]s exec mypod -c ruby-container -C=member1 -- date
|
||||
|
||||
# Get output from running the 'date' command in ruby-container from pod mypod in cluster(member1)
|
||||
%[1]sexec mypod -c ruby-container -C=member1 -- date
|
||||
%[1]s exec mypod -c ruby-container -C=member1 -- date
|
||||
|
||||
# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod in cluster(member1)
|
||||
# and sends stdout/stderr from 'bash' back to the client
|
||||
|
|
|
@ -767,7 +767,7 @@ func (g *CommandGetOptions) printGeneric(r *resource.Result) error {
|
|||
|
||||
var obj runtime.Object
|
||||
if !singleItemImplied || len(infos) != 1 {
|
||||
// we have zero or multple items, so coerce all items into a list.
|
||||
// we have zero or multiple items, so coerce all items into a list.
|
||||
// we don't want an *unstructured.Unstructured list yet, as we
|
||||
// may be dealing with non-unstructured objects. Compose all items
|
||||
// into an corev1.List, and then decode using an unstructured scheme.
|
||||
|
|
|
@ -211,7 +211,7 @@ type CommandRegisterOption struct {
|
|||
EnableCertRotation bool
|
||||
|
||||
// CACertPath is the path to the SSL certificate authority used to
|
||||
// secure comunications between member cluster and karmada-control-plane.
|
||||
// secure communications between member cluster and karmada-control-plane.
|
||||
// Defaults to "/etc/karmada/pki/ca.crt".
|
||||
CACertPath string
|
||||
|
||||
|
@ -310,7 +310,7 @@ func (o *CommandRegisterOption) Run(parentCommand string) error {
|
|||
fmt.Printf("\n[preflight] Please check the above errors\n")
|
||||
return nil
|
||||
}
|
||||
fmt.Println("[prefligt] All pre-flight checks were passed")
|
||||
fmt.Println("[preflight] All pre-flight checks were passed")
|
||||
|
||||
if o.DryRun {
|
||||
return nil
|
||||
|
@ -472,7 +472,7 @@ func (o *CommandRegisterOption) discoveryBootstrapConfigAndClusterInfo(bootstrap
|
|||
func (o *CommandRegisterOption) constructKarmadaAgentConfig(bootstrapClient *kubeclient.Clientset, karmadaClusterInfo *clientcmdapi.Cluster) (*clientcmdapi.Config, error) {
|
||||
var cert []byte
|
||||
|
||||
pk, csr, err := generatKeyAndCSR(o.ClusterName)
|
||||
pk, csr, err := generateKeyAndCSR(o.ClusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -720,8 +720,8 @@ func (o *CommandRegisterOption) makeKarmadaAgentDeployment() *appsv1.Deployment
|
|||
return karmadaAgent
|
||||
}
|
||||
|
||||
// generatKeyAndCSR generate private key and csr
|
||||
func generatKeyAndCSR(clusterName string) (*rsa.PrivateKey, []byte, error) {
|
||||
// generateKeyAndCSR generate private key and csr
|
||||
func generateKeyAndCSR(clusterName string) (*rsa.PrivateKey, []byte, error) {
|
||||
pk, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
|
|
@ -96,8 +96,8 @@ func (f *factoryImpl) FactoryForMemberCluster(clusterName string) (cmdutil.Facto
|
|||
WrapConfigFn: f.kubeConfigFlags.WrapConfigFn,
|
||||
}
|
||||
// Override the kube-apiserver address.
|
||||
memberAPIserver := karmadaAPIServer + fmt.Sprintf(proxyURL, clusterName)
|
||||
kubeConfigFlags.APIServer = &memberAPIserver
|
||||
memberAPIServer := karmadaAPIServer + fmt.Sprintf(proxyURL, clusterName)
|
||||
kubeConfigFlags.APIServer = &memberAPIServer
|
||||
matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags)
|
||||
return cmdutil.NewFactory(matchVersionKubeConfigFlags), nil
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ type ClusterResourceNode struct {
|
|||
quantity int
|
||||
|
||||
// resourceList records the resource list of this node.
|
||||
// It maybe contain cpu, mrmory, gpu...
|
||||
// It maybe contain cpu, memory, gpu...
|
||||
// User can specify which parameters need to be included before the cluster starts
|
||||
// +required
|
||||
resourceList ResourceList
|
||||
|
@ -240,9 +240,9 @@ func rbtConvertToLl(rbt *rbt.Tree) *list.List {
|
|||
}
|
||||
|
||||
// ConvertToResourceList is convert from corev1.ResourceList to ResourceList
|
||||
func ConvertToResourceList(rslist corev1.ResourceList) ResourceList {
|
||||
func ConvertToResourceList(rsList corev1.ResourceList) ResourceList {
|
||||
resourceList := ResourceList{}
|
||||
for name, quantity := range rslist {
|
||||
for name, quantity := range rsList {
|
||||
if name == corev1.ResourceCPU {
|
||||
resourceList[clusterapis.ResourceCPU] = quantity
|
||||
} else if name == corev1.ResourceMemory {
|
||||
|
@ -265,16 +265,16 @@ func (rs *ResourceSummary) GetNodeNumFromModel(model *resourceModels) int {
|
|||
} else if model.linkedlist == nil && model.redblackTree == nil {
|
||||
return 0
|
||||
} else if model.linkedlist != nil && model.redblackTree != nil {
|
||||
klog.Info("GetNodeNum: unknow error")
|
||||
klog.Info("GetNodeNum: unknown error")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// DeleteFromResourceSummary dalete resource node into modeling summary
|
||||
// DeleteFromResourceSummary deletes resource node into modeling summary
|
||||
func (rs *ResourceSummary) DeleteFromResourceSummary(crn ClusterResourceNode) error {
|
||||
index := rs.getIndex(crn)
|
||||
if index == -1 {
|
||||
return errors.New("ClusterResource can not delet the resource summary: index is invalid.")
|
||||
return errors.New("ClusterResource can not delete the resource summary: index is invalid")
|
||||
}
|
||||
modeling := &(*rs)[index]
|
||||
if rs.GetNodeNumFromModel(modeling) >= 6 {
|
||||
|
|
|
@ -352,7 +352,7 @@ func Test_interpretDaemonSetHealth(t *testing.T) {
|
|||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "updatedNumberScheduled < desiredNumberSchedulerd",
|
||||
name: "updatedNumberScheduled < desiredNumberScheduled",
|
||||
object: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"status": map[string]interface{}{
|
||||
|
|
|
@ -72,7 +72,7 @@ func GroupClustersWithScore(
|
|||
calAvailableReplicasFunc func(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster,
|
||||
) *GroupClustersInfo {
|
||||
if isTopologyIgnored(placement) {
|
||||
return groupClustersIngoreTopology(clustersScore, spec, calAvailableReplicasFunc)
|
||||
return groupClustersIgnoringTopology(clustersScore, spec, calAvailableReplicasFunc)
|
||||
}
|
||||
|
||||
return groupClustersBasedTopology(clustersScore, spec, placement.SpreadConstraints, calAvailableReplicasFunc)
|
||||
|
@ -98,7 +98,7 @@ func groupClustersBasedTopology(
|
|||
return groupClustersInfo
|
||||
}
|
||||
|
||||
func groupClustersIngoreTopology(
|
||||
func groupClustersIgnoringTopology(
|
||||
clustersScore framework.ClusterScoreList,
|
||||
rbSpec *workv1alpha2.ResourceBindingSpec,
|
||||
calAvailableReplicasFunc func(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha2.ResourceBindingSpec) []workv1alpha2.TargetCluster,
|
||||
|
|
|
@ -8,14 +8,14 @@ import (
|
|||
|
||||
// Default is the default BackendStore
|
||||
type Default struct {
|
||||
resourceEventHander cache.ResourceEventHandler
|
||||
resourceEventHandler cache.ResourceEventHandler
|
||||
}
|
||||
|
||||
// NewDefaultBackend create a new default BackendStore
|
||||
func NewDefaultBackend(cluster string) *Default {
|
||||
klog.Infof("create default backend store: %s", cluster)
|
||||
return &Default{
|
||||
resourceEventHander: &cache.ResourceEventHandlerFuncs{
|
||||
resourceEventHandler: &cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
us, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
|
@ -45,9 +45,9 @@ func NewDefaultBackend(cluster string) *Default {
|
|||
}}}
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs return the ResourceEventHandlerFuncs
|
||||
// ResourceEventHandlerFuncs returns the ResourceEventHandler
|
||||
func (d *Default) ResourceEventHandlerFuncs() cache.ResourceEventHandler {
|
||||
return d.resourceEventHander
|
||||
return d.resourceEventHandler
|
||||
}
|
||||
|
||||
// Close close the BackendStore
|
||||
|
|
|
@ -194,7 +194,7 @@ func IsClusterIdentifyUnique(controlPlaneClient karmadaclientset.Interface, id s
|
|||
return true, "", nil
|
||||
}
|
||||
|
||||
// ClusterAccessCredentialChanged checks whether the cluster a ccess credential changed
|
||||
// ClusterAccessCredentialChanged checks whether the cluster access credential changed
|
||||
func ClusterAccessCredentialChanged(newSpec, oldSpec clusterv1alpha1.ClusterSpec) bool {
|
||||
if oldSpec.APIEndpoint == newSpec.APIEndpoint &&
|
||||
oldSpec.InsecureSkipTLSVerification == newSpec.InsecureSkipTLSVerification &&
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestGenEventRef(t *testing.T) {
|
|||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing metadata.uid but has resourcetemplate.karmada.io/uid annontation",
|
||||
name: "missing metadata.uid but has resourcetemplate.karmada.io/uid annotation",
|
||||
obj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
|
|
|
@ -689,7 +689,7 @@ func TestGetByOperation(t *testing.T) {
|
|||
expectedRule: nil,
|
||||
},
|
||||
{
|
||||
name: "case-insensitive operation namee",
|
||||
name: "case-insensitive operation name",
|
||||
operation: "InterpretDEPendency",
|
||||
expectedRule: &dependencyInterpretationRule{},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue