Merge pull request #3000 from Fish-pro/fix/logupperstart
Uniform klog output starts with uppercase
This commit is contained in:
commit
3d06bb84a3
|
@ -208,7 +208,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
}
|
||||
|
||||
if err := controllerManager.AddHealthzCheck("ping", healthz.Ping); err != nil {
|
||||
klog.Errorf("failed to add health check endpoint: %v", err)
|
||||
klog.Errorf("Failed to add health check endpoint: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -147,12 +147,12 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
}),
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to build controller manager: %v", err)
|
||||
klog.Errorf("Failed to build controller manager: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := controllerManager.AddHealthzCheck("ping", healthz.Ping); err != nil {
|
||||
klog.Errorf("failed to add health check endpoint: %v", err)
|
||||
klog.Errorf("Failed to add health check endpoint: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -107,11 +107,11 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
HealthProbeBindAddress: opts.HealthProbeBindAddress,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to build webhook server: %v", err)
|
||||
klog.Errorf("Failed to build webhook server: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Info("registering webhooks to the webhook server")
|
||||
klog.Info("Registering webhooks to the webhook server")
|
||||
hookServer := hookManager.GetWebhookServer()
|
||||
hookServer.Register("/mutate-propagationpolicy", &webhook.Admission{Handler: propagationpolicy.NewMutatingHandler(
|
||||
opts.DefaultNotReadyTolerationSeconds, opts.DefaultUnreachableTolerationSeconds)})
|
||||
|
|
|
@ -76,18 +76,18 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
LeaderElection: false,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to build webhook server: %v", err)
|
||||
klog.Errorf("Failed to build webhook server: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Info("registering webhooks to the webhook server")
|
||||
klog.Info("Registering webhooks to the webhook server")
|
||||
hookServer := hookManager.GetWebhookServer()
|
||||
hookServer.Register("/interpreter-workload", interpreter.NewWebhook(&workloadInterpreter{}, interpreter.NewDecoder(gclient.NewSchema())))
|
||||
hookServer.WebhookMux.Handle("/readyz/", http.StripPrefix("/readyz/", &healthz.Handler{}))
|
||||
|
||||
// blocks until the context is done.
|
||||
if err := hookManager.Start(ctx); err != nil {
|
||||
klog.Errorf("webhook server exits unexpectedly: %v", err)
|
||||
klog.Errorf("Webhook server exits unexpectedly: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -81,12 +81,12 @@ func Run(ctx context.Context, o *options.Options) error {
|
|||
|
||||
manager, err := createControllerManager(ctx, o)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to build controller manager: %v", err)
|
||||
klog.Errorf("Failed to build controller manager: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := manager.AddHealthzCheck("ping", healthz.Ping); err != nil {
|
||||
klog.Errorf("failed to add health check endpoint: %v", err)
|
||||
klog.Errorf("Failed to add health check endpoint: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ func Run(ctx context.Context, o *options.Options) error {
|
|||
Manager: manager,
|
||||
}
|
||||
if err := controllers.StartControllers(controllerCtx, controllersDisabledByDefault); err != nil {
|
||||
klog.Errorf("failed to start controllers: %v", err)
|
||||
klog.Errorf("Failed to start controllers: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ func (c completedConfig) New(kubeClient kubernetes.Interface) (*APIServer, error
|
|||
|
||||
clusterStorage, err := clusterstorage.NewStorage(Scheme, kubeClient, c.GenericConfig.RESTOptionsGetter)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to create REST storage for a resource due to %v, will die", err)
|
||||
klog.Errorf("Unable to create REST storage for a resource due to %v, will die", err)
|
||||
return nil, err
|
||||
}
|
||||
v1alpha1cluster := map[string]rest.Storage{}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (d *ClusterDetector) OnDelete(obj interface{}) {
|
|||
func (d *ClusterDetector) Reconcile(key util.QueueKey) error {
|
||||
clusterWideKey, ok := key.(keys.ClusterWideKey)
|
||||
if !ok {
|
||||
klog.Errorf("invalid key")
|
||||
klog.Errorf("Invalid key")
|
||||
return fmt.Errorf("invalid key")
|
||||
}
|
||||
klog.Infof("Reconciling cluster-api object: %s", clusterWideKey)
|
||||
|
|
|
@ -194,7 +194,7 @@ func (c *ResourceBindingController) updateResourceStatus(binding *workv1alpha2.R
|
|||
return err
|
||||
}
|
||||
if reflect.DeepEqual(obj, newObj) {
|
||||
klog.V(3).Infof("ignore update resource(%s/%s/%s) status as up to date", resource.Kind, resource.Namespace, resource.Name)
|
||||
klog.V(3).Infof("Ignore update resource(%s/%s/%s) status as up to date", resource.Kind, resource.Namespace, resource.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ func (c *ResourceBindingController) updateResourceStatus(binding *workv1alpha2.R
|
|||
klog.Errorf("Failed to update resource(%s/%s/%s), Error: %v", resource.Kind, resource.Namespace, resource.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("update resource status successfully for resource(%s/%s/%s)", resource.Kind, resource.Namespace, resource.Name)
|
||||
klog.V(3).Infof("Update resource status successfully for resource(%s/%s/%s)", resource.Kind, resource.Namespace, resource.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ func (c *ClusterResourceBindingController) updateResourceStatus(binding *workv1a
|
|||
return err
|
||||
}
|
||||
if reflect.DeepEqual(obj, newObj) {
|
||||
klog.V(3).Infof("ignore update resource(%s/%s/%s) status as up to date", resource.Kind, resource.Namespace, resource.Name)
|
||||
klog.V(3).Infof("Ignore update resource(%s/%s/%s) status as up to date", resource.Kind, resource.Namespace, resource.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ func (c *ClusterResourceBindingController) updateResourceStatus(binding *workv1a
|
|||
klog.Errorf("Failed to update resource(%s/%s/%s), Error: %v", resource.Kind, resource.Namespace, resource.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("update resource status successfully for resource(%s/%s/%s)", resource.Kind, resource.Namespace, resource.Name)
|
||||
klog.V(3).Infof("Update resource status successfully for resource(%s/%s/%s)", resource.Kind, resource.Namespace, resource.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ func ensureWork(
|
|||
if resourceInterpreter.HookEnabled(clonedWorkload.GroupVersionKind(), configv1alpha1.InterpreterOperationReviseReplica) {
|
||||
clonedWorkload, err = resourceInterpreter.ReviseReplica(clonedWorkload, desireReplicaInfos[targetCluster.Name])
|
||||
if err != nil {
|
||||
klog.Errorf("failed to revise replica for %s/%s/%s in cluster %s, err is: %v",
|
||||
klog.Errorf("Failed to revise replica for %s/%s/%s in cluster %s, err is: %v",
|
||||
workload.GetKind(), workload.GetNamespace(), workload.GetName(), targetCluster.Name, err)
|
||||
return err
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func ensureWork(
|
|||
// Refer to: https://kubernetes.io/docs/concepts/workloads/controllers/job/#parallel-jobs.
|
||||
if len(jobCompletions) > 0 {
|
||||
if err = helper.ApplyReplica(clonedWorkload, int64(jobCompletions[i].Replicas), util.CompletionsField); err != nil {
|
||||
klog.Errorf("failed to apply Completions for %s/%s/%s in cluster %s, err is: %v",
|
||||
klog.Errorf("Failed to apply Completions for %s/%s/%s in cluster %s, err is: %v",
|
||||
clonedWorkload.GetKind(), clonedWorkload.GetNamespace(), clonedWorkload.GetName(), targetCluster.Name, err)
|
||||
return err
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ func ensureWork(
|
|||
annotations := mergeAnnotations(clonedWorkload, binding, scope)
|
||||
annotations, err = RecordAppliedOverrides(cops, ops, annotations)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to record appliedOverrides, Error: %v", err)
|
||||
klog.Errorf("Failed to record appliedOverrides, Error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -329,7 +329,7 @@ func (c *Controller) ExecutionSpaceExistForCluster(clusterName string) (bool, er
|
|||
executionSpaceObj := &corev1.Namespace{}
|
||||
err = c.Client.Get(context.TODO(), types.NamespacedName{Name: executionSpaceName}, executionSpaceObj)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("execution space(%s) no longer exists", executionSpaceName)
|
||||
klog.V(2).Infof("Execution space(%s) no longer exists", executionSpaceName)
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -548,7 +548,7 @@ func (c *Controller) tryUpdateClusterHealth(ctx context.Context, cluster *cluste
|
|||
LastTransitionTime: nowTimestamp,
|
||||
})
|
||||
} else {
|
||||
klog.V(2).Infof("cluster %v hasn't been updated for %+v. Last %v is: %+v",
|
||||
klog.V(2).Infof("Cluster %v hasn't been updated for %+v. Last %v is: %+v",
|
||||
cluster.Name, metav1.Now().Time.Sub(clusterHealth.probeTimestamp.Time), clusterConditionType, currentCondition)
|
||||
if currentCondition.Status != metav1.ConditionUnknown {
|
||||
currentCondition.Status = metav1.ConditionUnknown
|
||||
|
|
|
@ -133,7 +133,7 @@ func (c *Controller) tryDeleteWorkload(clusterName string, work *workv1alpha1.Wo
|
|||
workload := &unstructured.Unstructured{}
|
||||
err := workload.UnmarshalJSON(manifest.Raw)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal workload, error is: %v", err)
|
||||
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ func (c *Controller) syncToClusters(clusterName string, work *workv1alpha1.Work)
|
|||
workload := &unstructured.Unstructured{}
|
||||
err := workload.UnmarshalJSON(manifest.Raw)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal workload, error is: %v", err)
|
||||
klog.Errorf("Failed to unmarshal workload, error is: %v", err)
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ func (c *Controller) updateAppliedCondition(work *workv1alpha1.Work, status meta
|
|||
// make a copy, so we don't mutate the shared cache
|
||||
work = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated work %s/%s: %v", work.Namespace, work.Name, err)
|
||||
klog.Errorf("Failed to get updated work %s/%s: %v", work.Namespace, work.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
|
|
@ -156,7 +156,7 @@ func (c *StatusController) collectQuotaStatus(quota *policyv1alpha1.FederatedRes
|
|||
// make a copy, so we don't mutate the shared cache
|
||||
quota = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated federatedResourceQuota(%s): %v", klog.KObj(quota).String(), err)
|
||||
klog.Errorf("Failed to get updated federatedResourceQuota(%s): %v", klog.KObj(quota).String(), err)
|
||||
}
|
||||
|
||||
return updateErr
|
||||
|
|
|
@ -94,7 +94,7 @@ func (c *EndpointSliceController) collectEndpointSliceFromWork(work *workv1alpha
|
|||
endpointSlice := &discoveryv1.EndpointSlice{}
|
||||
err = helper.ConvertToTypedObject(unstructObj, endpointSlice)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to convert unstructured to typed object: %v", err)
|
||||
klog.Errorf("Failed to convert unstructured to typed object: %v", err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ func isWorkContains(manifests []workv1alpha1.Manifest, targetResource schema.Gro
|
|||
workload := &unstructured.Unstructured{}
|
||||
err := workload.UnmarshalJSON(manifests[index].Raw)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal work manifests index %d, error is: %v", index, err)
|
||||
klog.Errorf("Failed to unmarshal work manifests index %d, error is: %v", index, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ func (c *ServiceImportController) updateServiceStatus(svcImport *mcsv1alpha1.Ser
|
|||
if err = c.Get(context.TODO(), client.ObjectKey{Namespace: derivedService.Namespace, Name: derivedService.Name}, updated); err == nil {
|
||||
derivedService = updated
|
||||
} else {
|
||||
klog.Errorf("failed to get updated service %s/%s: %v", derivedService.Namespace, derivedService.Name, err)
|
||||
klog.Errorf("Failed to get updated service %s/%s: %v", derivedService.Namespace, derivedService.Name, err)
|
||||
}
|
||||
|
||||
return updateErr
|
||||
|
|
|
@ -124,7 +124,7 @@ func (c *Controller) buildWorks(namespace *corev1.Namespace, clusters []clusterv
|
|||
|
||||
annotations, err := binding.RecordAppliedOverrides(cops, nil, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to record appliedOverrides, Error: %v", err)
|
||||
klog.Errorf("Failed to record appliedOverrides, Error: %v", err)
|
||||
ch <- fmt.Errorf("sync namespace(%s) to cluster(%s) failed due to: %v", clonedNamespaced.GetName(), cluster.GetName(), err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ func (c *ClusterStatusController) Reconcile(ctx context.Context, req controllerr
|
|||
// start syncing status only when the finalizer is present on the given Cluster to
|
||||
// avoid conflict with cluster controller.
|
||||
if !controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) {
|
||||
klog.V(2).Infof("waiting finalizer present for member cluster: %s", cluster.Name)
|
||||
klog.V(2).Infof("Waiting finalizer present for member cluster: %s", cluster.Name)
|
||||
return controllerruntime.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,7 @@ func (c *ClusterStatusController) updateStatusIfNeeded(cluster *clusterv1alpha1.
|
|||
// make a copy, so we don't mutate the shared cache
|
||||
cluster = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated cluster %s: %v", cluster.Name, err)
|
||||
klog.Errorf("Failed to get updated cluster %s: %v", cluster.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
|
|
@ -342,7 +342,7 @@ func (c *WorkStatusController) reflectStatus(work *workv1alpha1.Work, clusterObj
|
|||
//make a copy, so we don't mutate the shared cache
|
||||
workCopy = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated work %s/%s: %v", workCopy.Namespace, workCopy.Name, err)
|
||||
klog.Errorf("Failed to get updated work %s/%s: %v", workCopy.Namespace, workCopy.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
|
|
@ -123,13 +123,13 @@ func (c *Controller) syncImpersonationConfig(cluster *clusterv1alpha1.Cluster) e
|
|||
|
||||
// step5: sync clusterrole to cluster for impersonation
|
||||
if err := c.buildImpersonationClusterRole(cluster, rules); err != nil {
|
||||
klog.Errorf("failed to sync impersonate clusterrole to cluster(%s): %v", cluster.Name, err)
|
||||
klog.Errorf("Failed to sync impersonate clusterrole to cluster(%s): %v", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// step6: sync clusterrolebinding to cluster for impersonation
|
||||
if err := c.buildImpersonationClusterRoleBinding(cluster); err != nil {
|
||||
klog.Errorf("failed to sync impersonate clusterrolebinding to cluster(%s): %v", cluster.Name, err)
|
||||
klog.Errorf("Failed to sync impersonate clusterrolebinding to cluster(%s): %v", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ func (d *DependenciesDistributor) OnDelete(obj interface{}) {
|
|||
func (d *DependenciesDistributor) Reconcile(key util.QueueKey) error {
|
||||
clusterWideKey, ok := key.(keys.ClusterWideKey)
|
||||
if !ok {
|
||||
klog.Error("invalid key")
|
||||
klog.Error("Invalid key")
|
||||
return fmt.Errorf("invalid key")
|
||||
}
|
||||
klog.V(4).Infof("DependenciesDistributor start to reconcile object: %s", clusterWideKey)
|
||||
|
@ -188,18 +188,18 @@ func (d *DependenciesDistributor) Reconcile(key util.QueueKey) error {
|
|||
|
||||
matched, err := dependentObjectReferenceMatches(clusterWideKey, binding)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to evaluate if binding(%s/%s) need to sync dependencies: %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to evaluate if binding(%s/%s) need to sync dependencies: %v", binding.Namespace, binding.Name, err)
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
} else if !matched {
|
||||
klog.V(4).Infof("no need to sync binding(%s/%s)", binding.Namespace, binding.Name)
|
||||
klog.V(4).Infof("No need to sync binding(%s/%s)", binding.Namespace, binding.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(4).Infof("resource binding(%s/%s) is matched for resource(%s/%s)", binding.Namespace, binding.Name, clusterWideKey.Namespace, clusterWideKey.Name)
|
||||
klog.V(4).Infof("Resource binding(%s/%s) is matched for resource(%s/%s)", binding.Namespace, binding.Name, clusterWideKey.Namespace, clusterWideKey.Name)
|
||||
bindingKey, err := detector.ClusterWideKeyFunc(binding)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to generate cluster wide key for binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to generate cluster wide key for binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
@ -243,13 +243,13 @@ func dependentObjectReferenceMatches(objectKey keys.ClusterWideKey, referenceBin
|
|||
func (d *DependenciesDistributor) OnResourceBindingUpdate(oldObj, newObj interface{}) {
|
||||
oldBindingObject := &workv1alpha2.ResourceBinding{}
|
||||
if err := helper.ConvertToTypedObject(oldObj, oldBindingObject); err != nil {
|
||||
klog.Warningf("convert to resource binding failed: %v", err)
|
||||
klog.Warningf("Convert to resource binding failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
newBindingObject := &workv1alpha2.ResourceBinding{}
|
||||
if err := helper.ConvertToTypedObject(newObj, newBindingObject); err != nil {
|
||||
klog.Warningf("convert to resource binding failed: %v", err)
|
||||
klog.Warningf("Convert to resource binding failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ func (d *DependenciesDistributor) OnResourceBindingUpdate(oldObj, newObj interfa
|
|||
func (d *DependenciesDistributor) OnResourceBindingDelete(obj interface{}) {
|
||||
bindingObject := &workv1alpha2.ResourceBinding{}
|
||||
if err := helper.ConvertToTypedObject(obj, bindingObject); err != nil {
|
||||
klog.Warningf("convert to resource binding failed: %v", err)
|
||||
klog.Warningf("Convert to resource binding failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -416,7 +416,7 @@ func (d *DependenciesDistributor) syncScheduleResultToAttachedBindings(binding *
|
|||
func (d *DependenciesDistributor) recordDependenciesForIndependentBinding(binding *workv1alpha2.ResourceBinding, dependencies []configv1alpha1.DependentObjectReference) error {
|
||||
dependenciesBytes, err := json.Marshal(dependencies)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to marshal dependencies of binding(%s/%s): %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to marshal dependencies of binding(%s/%s): %v", binding.Namespace, binding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -444,7 +444,7 @@ func (d *DependenciesDistributor) recordDependenciesForIndependentBinding(bindin
|
|||
//make a copy, so we don't mutate the shared cache
|
||||
binding = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
@ -572,14 +572,14 @@ func buildAttachedBinding(binding *workv1alpha2.ResourceBinding, object *unstruc
|
|||
func (d *DependenciesDistributor) createOrUpdateAttachedBinding(attachedBinding *workv1alpha2.ResourceBinding) error {
|
||||
if err := d.Client.Create(context.TODO(), attachedBinding); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
klog.Infof("failed to create resource binding(%s/%s): %v", attachedBinding.Namespace, attachedBinding.Name, err)
|
||||
klog.Infof("Failed to create resource binding(%s/%s): %v", attachedBinding.Namespace, attachedBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
existBinding := &workv1alpha2.ResourceBinding{}
|
||||
key := client.ObjectKeyFromObject(attachedBinding)
|
||||
if err := d.Client.Get(context.TODO(), key, existBinding); err != nil {
|
||||
klog.Infof("failed to get resource binding(%s/%s): %v", attachedBinding.Namespace, attachedBinding.Name, err)
|
||||
klog.Infof("Failed to get resource binding(%s/%s): %v", attachedBinding.Namespace, attachedBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -589,7 +589,7 @@ func (d *DependenciesDistributor) createOrUpdateAttachedBinding(attachedBinding
|
|||
existBinding.Spec.Resource = attachedBinding.Spec.Resource
|
||||
|
||||
if err := d.Client.Update(context.TODO(), existBinding); err != nil {
|
||||
klog.Errorf("failed to update resource binding(%s/%s): %v", existBinding.Namespace, existBinding.Name, err)
|
||||
klog.Errorf("Failed to update resource binding(%s/%s): %v", existBinding.Namespace, existBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -207,7 +207,7 @@ func (d *ResourceDetector) NeedLeaderElection() bool {
|
|||
func (d *ResourceDetector) Reconcile(key util.QueueKey) error {
|
||||
clusterWideKey, ok := key.(keys.ClusterWideKey)
|
||||
if !ok {
|
||||
klog.Error("invalid key")
|
||||
klog.Error("Invalid key")
|
||||
return fmt.Errorf("invalid key")
|
||||
}
|
||||
klog.Infof("Reconciling object: %s", clusterWideKey)
|
||||
|
@ -318,14 +318,14 @@ func (d *ResourceDetector) LookForMatchedPolicy(object *unstructured.Unstructure
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("attempts to match policy for resource(%s)", objectKey)
|
||||
klog.V(2).Infof("Attempts to match policy for resource(%s)", objectKey)
|
||||
policyObjects, err := d.propagationPolicyLister.ByNamespace(objectKey.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list propagation policy: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(policyObjects) == 0 {
|
||||
klog.V(2).Infof("no propagationpolicy find in namespace(%s).", objectKey.Namespace)
|
||||
klog.V(2).Infof("No propagationpolicy find in namespace(%s).", objectKey.Namespace)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -344,14 +344,14 @@ func (d *ResourceDetector) LookForMatchedPolicy(object *unstructured.Unstructure
|
|||
|
||||
// LookForMatchedClusterPolicy tries to find a ClusterPropagationPolicy for object referenced by object key.
|
||||
func (d *ResourceDetector) LookForMatchedClusterPolicy(object *unstructured.Unstructured, objectKey keys.ClusterWideKey) (*policyv1alpha1.ClusterPropagationPolicy, error) {
|
||||
klog.V(2).Infof("attempts to match cluster policy for resource(%s)", objectKey)
|
||||
klog.V(2).Infof("Attempts to match cluster policy for resource(%s)", objectKey)
|
||||
policyObjects, err := d.clusterPropagationPolicyLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list cluster propagation policy: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(policyObjects) == 0 {
|
||||
klog.V(2).Infof("no clusterpropagationpolicy find.")
|
||||
klog.V(2).Infof("No clusterpropagationpolicy find.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -1079,7 +1079,7 @@ func (d *ResourceDetector) CleanupResourceBindingLabels(rb *workv1alpha2.Resourc
|
|||
//make a copy, so we don't mutate the shared cache
|
||||
rb = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated resource binding %s/%s: %v", rb.GetNamespace(), rb.GetName(), err)
|
||||
klog.Errorf("Failed to get updated resource binding %s/%s: %v", rb.GetNamespace(), rb.GetName(), err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
@ -1104,7 +1104,7 @@ func (d *ResourceDetector) CleanupClusterResourceBindingLabels(crb *workv1alpha2
|
|||
//make a copy, so we don't mutate the shared cache
|
||||
crb = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated cluster resource binding %s: %v", crb.GetName(), err)
|
||||
klog.Errorf("Failed to get updated cluster resource binding %s: %v", crb.GetName(), err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
|
|
@ -60,7 +60,7 @@ func CreateBootstrapConfigMapIfNotExists(clientSet *kubernetes.Clientset, file s
|
|||
|
||||
// CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users
|
||||
func CreateClusterInfoRBACRules(clientSet *kubernetes.Clientset) error {
|
||||
klog.V(1).Info("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace")
|
||||
klog.V(1).Info("Creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace")
|
||||
err := cmdutil.CreateOrUpdateRole(clientSet, &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: BootstrapSignerClusterRoleName,
|
||||
|
|
|
@ -35,7 +35,7 @@ func grantProxyPermissionToAdmin(clientSet kubernetes.Interface) error {
|
|||
Name: clusterProxyAdminUser,
|
||||
}}, nil)
|
||||
|
||||
klog.V(1).Info("grant cluster proxy permission to 'system:admin'")
|
||||
klog.V(1).Info("Grant cluster proxy permission to 'system:admin'")
|
||||
err = cmdutil.CreateOrUpdateClusterRoleBinding(clientSet, proxyAdminClusterRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -115,7 +115,7 @@ func grantAccessPermissionToAgent(clientSet kubernetes.Interface) error {
|
|||
Name: karmadaAgentGroup,
|
||||
}}, nil)
|
||||
|
||||
klog.V(1).Info("grant the limited access permission to 'karmada-agent'")
|
||||
klog.V(1).Info("Grant the limited access permission to 'karmada-agent'")
|
||||
err = cmdutil.CreateOrUpdateClusterRoleBinding(clientSet, clusterRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -38,7 +38,7 @@ func isPodReady(c *kubernetes.Clientset, n, p string) wait.ConditionFunc {
|
|||
}
|
||||
|
||||
if pod.Status.Phase == corev1.PodPending && len(pod.Status.ContainerStatuses) == 0 {
|
||||
klog.Warningf("pod: %s not ready. status: %v", pod.Name, corev1.PodPending)
|
||||
klog.Warningf("Pod: %s not ready. status: %v", pod.Name, corev1.PodPending)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func isPodReady(c *kubernetes.Clientset, n, p string) wait.ConditionFunc {
|
|||
klog.Infof("pod: %s is ready. status: %v", pod.Name, podStatus(pod))
|
||||
return true, nil
|
||||
}
|
||||
klog.Warningf("pod: %s not ready. status: %v", pod.Name, podStatus(pod))
|
||||
klog.Warningf("Pod: %s not ready. status: %v", pod.Name, podStatus(pod))
|
||||
return false, nil
|
||||
default:
|
||||
continue
|
||||
|
@ -95,10 +95,10 @@ func WaitEtcdReplicasetInDesired(replicas int32, c *kubernetes.Clientset, namesp
|
|||
return false, nil
|
||||
}
|
||||
if int32(len(pods.Items)) == replicas {
|
||||
klog.Infof("etcd desired replicaset is %v, currently: %v", replicas, len(pods.Items))
|
||||
klog.Infof("Etcd desired replicaset is %v, currently: %v", replicas, len(pods.Items))
|
||||
return true, nil
|
||||
}
|
||||
klog.Warningf("etcd desired replicaset is %v, currently: %v", replicas, len(pods.Items))
|
||||
klog.Warningf("Etcd desired replicaset is %v, currently: %v", replicas, len(pods.Items))
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
|
@ -349,7 +349,7 @@ func (i *CommandInitOption) initKarmadaAPIServer() error {
|
|||
if err := util.CreateOrUpdateService(i.KubeClientSet, i.makeEtcdService(etcdStatefulSetAndServiceName)); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Info("create etcd StatefulSets")
|
||||
klog.Info("Create etcd StatefulSets")
|
||||
if _, err := i.KubeClientSet.AppsV1().StatefulSets(i.Namespace).Create(context.TODO(), i.makeETCDStatefulSet(), metav1.CreateOptions{}); err != nil {
|
||||
klog.Warning(err)
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ func (i *CommandInitOption) initKarmadaAPIServer() error {
|
|||
klog.Warning(err)
|
||||
}
|
||||
|
||||
klog.Info("create karmada ApiServer Deployment")
|
||||
klog.Info("Create karmada ApiServer Deployment")
|
||||
if err := util.CreateOrUpdateService(i.KubeClientSet, i.makeKarmadaAPIServerService()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ func (i *CommandInitOption) initKarmadaAPIServer() error {
|
|||
|
||||
// Create karmada-aggregated-apiserver
|
||||
// https://github.com/karmada-io/karmada/blob/master/artifacts/deploy/karmada-aggregated-apiserver.yaml
|
||||
klog.Info("create karmada aggregated apiserver Deployment")
|
||||
klog.Info("Create karmada aggregated apiserver Deployment")
|
||||
if err := util.CreateOrUpdateService(i.KubeClientSet, i.karmadaAggregatedAPIServerService()); err != nil {
|
||||
klog.Exitln(err)
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ func (i *CommandInitOption) initKarmadaComponent() error {
|
|||
deploymentClient := i.KubeClientSet.AppsV1().Deployments(i.Namespace)
|
||||
// Create karmada-kube-controller-manager
|
||||
// https://github.com/karmada-io/karmada/blob/master/artifacts/deploy/kube-controller-manager.yaml
|
||||
klog.Info("create karmada kube controller manager Deployment")
|
||||
klog.Info("Create karmada kube controller manager Deployment")
|
||||
if err := util.CreateOrUpdateService(i.KubeClientSet, i.kubeControllerManagerService()); err != nil {
|
||||
klog.Exitln(err)
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ func (i *CommandInitOption) initKarmadaComponent() error {
|
|||
|
||||
// Create karmada-scheduler
|
||||
// https://github.com/karmada-io/karmada/blob/master/artifacts/deploy/karmada-scheduler.yaml
|
||||
klog.Info("create karmada scheduler Deployment")
|
||||
klog.Info("Create karmada scheduler Deployment")
|
||||
if _, err := deploymentClient.Create(context.TODO(), i.makeKarmadaSchedulerDeployment(), metav1.CreateOptions{}); err != nil {
|
||||
klog.Warning(err)
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ func (i *CommandInitOption) initKarmadaComponent() error {
|
|||
|
||||
// Create karmada-controller-manager
|
||||
// https://github.com/karmada-io/karmada/blob/master/artifacts/deploy/karmada-controller-manager.yaml
|
||||
klog.Info("create karmada controller manager Deployment")
|
||||
klog.Info("Create karmada controller manager Deployment")
|
||||
if _, err := deploymentClient.Create(context.TODO(), i.makeKarmadaControllerManagerDeployment(), metav1.CreateOptions{}); err != nil {
|
||||
klog.Warning(err)
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ func (i *CommandInitOption) initKarmadaComponent() error {
|
|||
|
||||
// Create karmada-webhook
|
||||
// https://github.com/karmada-io/karmada/blob/master/artifacts/deploy/karmada-webhook.yaml
|
||||
klog.Info("create karmada webhook Deployment")
|
||||
klog.Info("Create karmada webhook Deployment")
|
||||
if err := util.CreateOrUpdateService(i.KubeClientSet, i.karmadaWebhookService()); err != nil {
|
||||
klog.Exitln(err)
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (i *CommandInitOption) getKarmadaAPIServerIP() error {
|
|||
}
|
||||
|
||||
if len(masterNodes.Items) == 0 {
|
||||
klog.Warning("the kubernetes cluster does not have a Master role.")
|
||||
klog.Warning("The kubernetes cluster does not have a Master role.")
|
||||
} else {
|
||||
for _, v := range masterNodes.Items {
|
||||
i.KarmadaAPIServerIP = append(i.KarmadaAPIServerIP, utils.StringToNetIP(v.Status.Addresses[0].Address))
|
||||
|
@ -47,7 +47,7 @@ func (i *CommandInitOption) getKarmadaAPIServerIP() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
klog.Info("randomly select 3 Node IPs in the kubernetes cluster.")
|
||||
klog.Info("Randomly select 3 Node IPs in the kubernetes cluster.")
|
||||
nodes, err := nodeClient.List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -134,8 +134,8 @@ func (j *CommandJoinOption) AddFlags(flags *pflag.FlagSet) {
|
|||
|
||||
// Run is the implementation of the 'join' command.
|
||||
func (j *CommandJoinOption) Run(f cmdutil.Factory) error {
|
||||
klog.V(1).Infof("joining cluster. cluster name: %s", j.ClusterName)
|
||||
klog.V(1).Infof("joining cluster. cluster namespace: %s", j.ClusterNamespace)
|
||||
klog.V(1).Infof("Joining cluster. cluster name: %s", j.ClusterName)
|
||||
klog.V(1).Infof("Joining cluster. cluster namespace: %s", j.ClusterNamespace)
|
||||
|
||||
// Get control plane karmada-apiserver client
|
||||
controlPlaneRestConfig, err := f.ToRawKubeConfigLoader().ClientConfig()
|
||||
|
@ -159,7 +159,7 @@ func (j *CommandJoinOption) RunJoinCluster(controlPlaneRestConfig, clusterConfig
|
|||
karmadaClient := karmadaclientset.NewForConfigOrDie(controlPlaneRestConfig)
|
||||
clusterKubeClient := kubeclient.NewForConfigOrDie(clusterConfig)
|
||||
|
||||
klog.V(1).Infof("joining cluster config. endpoint: %s", clusterConfig.Host)
|
||||
klog.V(1).Infof("Joining cluster config. endpoint: %s", clusterConfig.Host)
|
||||
|
||||
registerOption := util.ClusterRegisterOption{
|
||||
ClusterNamespace: j.ClusterNamespace,
|
||||
|
|
|
@ -297,8 +297,8 @@ func (o *CommandRegisterOption) Validate() error {
|
|||
|
||||
// Run is the implementation of the 'register' command.
|
||||
func (o *CommandRegisterOption) Run(parentCommand string) error {
|
||||
klog.V(1).Infof("registering cluster. cluster name: %s", o.ClusterName)
|
||||
klog.V(1).Infof("registering cluster. cluster namespace: %s", o.ClusterNamespace)
|
||||
klog.V(1).Infof("Registering cluster. cluster name: %s", o.ClusterName)
|
||||
klog.V(1).Infof("Registering cluster. cluster namespace: %s", o.ClusterNamespace)
|
||||
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
errlist := o.preflight()
|
||||
|
@ -410,7 +410,7 @@ func appendError(errlist []error, err error) []error {
|
|||
|
||||
// checkFileIfExist validates if the given file already exist.
|
||||
func checkFileIfExist(filePath string) error {
|
||||
klog.V(1).Infof("validating the existence of file %s", filePath)
|
||||
klog.V(1).Infof("Validating the existence of file %s", filePath)
|
||||
|
||||
if _, err := os.Stat(filePath); err == nil {
|
||||
return fmt.Errorf("%s already exists", filePath)
|
||||
|
@ -499,7 +499,7 @@ func (o *CommandRegisterOption) constructKarmadaAgentConfig(bootstrapClient *kub
|
|||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(1).Infof("waiting for the client certificate to be issued")
|
||||
klog.V(1).Infof("Waiting for the client certificate to be issued")
|
||||
err = wait.Poll(1*time.Second, o.Timeout, func() (done bool, err error) {
|
||||
csrOK, err := bootstrapClient.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -507,12 +507,12 @@ func (o *CommandRegisterOption) constructKarmadaAgentConfig(bootstrapClient *kub
|
|||
}
|
||||
|
||||
if csrOK.Status.Certificate != nil {
|
||||
klog.V(1).Infof("signing certificate successfully")
|
||||
klog.V(1).Infof("Signing certificate successfully")
|
||||
cert = csrOK.Status.Certificate
|
||||
return true, nil
|
||||
}
|
||||
|
||||
klog.V(1).Infof("waiting for the client certificate to be issued")
|
||||
klog.V(1).Infof("Waiting for the client certificate to be issued")
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -140,8 +140,8 @@ func (j *CommandUnjoinOption) AddFlags(flags *pflag.FlagSet) {
|
|||
|
||||
// Run is the implementation of the 'unjoin' command.
|
||||
func (j *CommandUnjoinOption) Run(f cmdutil.Factory) error {
|
||||
klog.V(1).Infof("unjoining cluster. cluster name: %s", j.ClusterName)
|
||||
klog.V(1).Infof("unjoining cluster. cluster namespace: %s", j.ClusterNamespace)
|
||||
klog.V(1).Infof("Unjoining cluster. cluster name: %s", j.ClusterName)
|
||||
klog.V(1).Infof("Unjoining cluster. cluster namespace: %s", j.ClusterNamespace)
|
||||
|
||||
// Get control plane kube-apiserver client
|
||||
controlPlaneRestConfig, err := f.ToRawKubeConfigLoader().ClientConfig()
|
||||
|
@ -156,7 +156,7 @@ func (j *CommandUnjoinOption) Run(f cmdutil.Factory) error {
|
|||
// Get cluster config
|
||||
clusterConfig, err = apiclient.RestConfig(j.ClusterContext, j.ClusterKubeConfig)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get unjoining cluster config. error: %v", err)
|
||||
klog.V(1).Infof("Failed to get unjoining cluster config. error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func (j *CommandUnjoinOption) RunUnJoinCluster(controlPlaneRestConfig, clusterCo
|
|||
if clusterConfig != nil {
|
||||
clusterKubeClient := kubeclient.NewForConfigOrDie(clusterConfig)
|
||||
|
||||
klog.V(1).Infof("unjoining cluster config. endpoint: %s", clusterConfig.Host)
|
||||
klog.V(1).Infof("Unjoining cluster config. endpoint: %s", clusterConfig.Host)
|
||||
|
||||
// delete RBAC resource from unjoining cluster
|
||||
err = deleteRBACResources(clusterKubeClient, j.ClusterName, j.forceDeletion, j.DryRun)
|
||||
|
|
|
@ -85,7 +85,7 @@ type Token struct {
|
|||
|
||||
// GenerateRegisterCommand generate register command that will be printed
|
||||
func GenerateRegisterCommand(kubeConfig, parentCommand, token string, karmadaContext string) (string, error) {
|
||||
klog.V(1).Info("print register command")
|
||||
klog.V(1).Info("Print register command")
|
||||
// load the kubeconfig file to get the CA certificate and endpoint
|
||||
config, err := clientcmd.LoadFromFile(kubeConfig)
|
||||
if err != nil {
|
||||
|
|
|
@ -260,7 +260,7 @@ func (rs *ResourceSummary) GetNodeNumFromModel(model *resourceModels) int {
|
|||
} else if model.linkedlist == nil && model.redblackTree == nil {
|
||||
return 0
|
||||
} else if model.linkedlist != nil && model.redblackTree != nil {
|
||||
klog.Info("getNodeNum: unknow error")
|
||||
klog.Info("GetNodeNum: unknow error")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func aggregateDeploymentStatus(object *unstructured.Unstructured, aggregatedStat
|
|||
oldStatus.UpdatedReplicas == newStatus.UpdatedReplicas &&
|
||||
oldStatus.AvailableReplicas == newStatus.AvailableReplicas &&
|
||||
oldStatus.UnavailableReplicas == newStatus.UnavailableReplicas {
|
||||
klog.V(3).Infof("ignore update deployment(%s/%s) status as up to date", deploy.Namespace, deploy.Name)
|
||||
klog.V(3).Infof("Ignore update deployment(%s/%s) status as up to date", deploy.Namespace, deploy.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func aggregateServiceStatus(object *unstructured.Unstructured, aggregatedStatusI
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(service.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update service(%s/%s) status as up to date", service.Namespace, service.Name)
|
||||
klog.V(3).Infof("Ignore update service(%s/%s) status as up to date", service.Namespace, service.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ func aggregateIngressStatus(object *unstructured.Unstructured, aggregatedStatusI
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(ingress.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update ingress(%s/%s) status as up to date", ingress.Namespace, ingress.Name)
|
||||
klog.V(3).Infof("Ignore update ingress(%s/%s) status as up to date", ingress.Namespace, ingress.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ func aggregateJobStatus(object *unstructured.Unstructured, aggregatedStatusItems
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(job.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update job(%s/%s) status as up to date", job.Namespace, job.Name)
|
||||
klog.V(3).Infof("Ignore update job(%s/%s) status as up to date", job.Namespace, job.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ func aggregateDaemonSetStatus(object *unstructured.Unstructured, aggregatedStatu
|
|||
oldStatus.NumberReady == newStatus.NumberReady &&
|
||||
oldStatus.UpdatedNumberScheduled == newStatus.UpdatedNumberScheduled &&
|
||||
oldStatus.NumberUnavailable == newStatus.NumberUnavailable {
|
||||
klog.V(3).Infof("ignore update daemonSet(%s/%s) status as up to date", daemonSet.Namespace, daemonSet.Name)
|
||||
klog.V(3).Infof("Ignore update daemonSet(%s/%s) status as up to date", daemonSet.Namespace, daemonSet.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -290,7 +290,7 @@ func aggregateStatefulSetStatus(object *unstructured.Unstructured, aggregatedSta
|
|||
oldStatus.ReadyReplicas == newStatus.ReadyReplicas &&
|
||||
oldStatus.Replicas == newStatus.Replicas &&
|
||||
oldStatus.UpdatedReplicas == newStatus.UpdatedReplicas {
|
||||
klog.V(3).Infof("ignore update statefulSet(%s/%s) status as up to date", statefulSet.Namespace, statefulSet.Name)
|
||||
klog.V(3).Infof("Ignore update statefulSet(%s/%s) status as up to date", statefulSet.Namespace, statefulSet.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ func aggregatePodStatus(object *unstructured.Unstructured, aggregatedStatusItems
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(pod.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update pod(%s/%s) status as up to date", pod.Namespace, pod.Name)
|
||||
klog.V(3).Infof("Ignore update pod(%s/%s) status as up to date", pod.Namespace, pod.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ func aggregatePersistentVolumeStatus(object *unstructured.Unstructured, aggregat
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(pv.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update persistentVolume(%s/%s) status as up to date", pv.Namespace, pv.Name)
|
||||
klog.V(3).Infof("Ignore update persistentVolume(%s/%s) status as up to date", pv.Namespace, pv.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
@ -448,7 +448,7 @@ func aggregatePersistentVolumeClaimStatus(object *unstructured.Unstructured, agg
|
|||
}
|
||||
|
||||
if reflect.DeepEqual(pvc.Status, *newStatus) {
|
||||
klog.V(3).Infof("ignore update pvc(%s/%s) status as up to date", pvc.Namespace, pvc.Name)
|
||||
klog.V(3).Infof("Ignore update pvc(%s/%s) status as up to date", pvc.Namespace, pvc.Name)
|
||||
return object, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -70,19 +70,19 @@ func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alph
|
|||
Diagnosis: diagnosis,
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("feasible clusters found: %v", feasibleClusters)
|
||||
klog.V(4).Infof("Feasible clusters found: %v", feasibleClusters)
|
||||
|
||||
clustersScore, err := g.prioritizeClusters(ctx, g.scheduleFramework, placement, spec, feasibleClusters)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to prioritizeClusters: %v", err)
|
||||
}
|
||||
klog.V(4).Infof("feasible clusters scores: %v", clustersScore)
|
||||
klog.V(4).Infof("Feasible clusters scores: %v", clustersScore)
|
||||
|
||||
clusters, err := g.selectClusters(clustersScore, placement, spec)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to select clusters: %v", err)
|
||||
}
|
||||
klog.V(4).Infof("selected clusters: %v", clusters)
|
||||
klog.V(4).Infof("Selected clusters: %v", clusters)
|
||||
|
||||
clustersWithReplicas, err := g.assignReplicas(clusters, placement.ReplicaScheduling, spec)
|
||||
if err != nil {
|
||||
|
@ -116,7 +116,7 @@ func (g *genericScheduler) findClustersThatFit(
|
|||
clusters := clusterInfo.GetClusters()
|
||||
for _, c := range clusters {
|
||||
if result := fwk.RunFilterPlugins(ctx, placement, bindingSpec, c.Cluster()); !result.IsSuccess() {
|
||||
klog.V(4).Infof("cluster %q is not fit, reason: %v", c.Cluster().Name, result.AsError())
|
||||
klog.V(4).Infof("Cluster %q is not fit, reason: %v", c.Cluster().Name, result.AsError())
|
||||
diagnosis.ClusterToResultMap[c.Cluster().Name] = result
|
||||
} else {
|
||||
out = append(out, c.Cluster())
|
||||
|
|
|
@ -16,7 +16,7 @@ func SelectBestClusters(placement *policyv1alpha1.Placement, groupClustersInfo *
|
|||
for _, cluster := range groupClustersInfo.Clusters {
|
||||
clusters = append(clusters, cluster.Cluster)
|
||||
}
|
||||
klog.V(4).Infof("select all clusters")
|
||||
klog.V(4).Infof("Select all clusters")
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ func (os *OpenSearch) delete(obj interface{}) {
|
|||
klog.Errorf("cannot delete: %v", err)
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("delete response: %v", resp.String())
|
||||
klog.V(4).Infof("Delete response: %v", resp.String())
|
||||
}
|
||||
|
||||
// TODO: bulk upsert
|
||||
|
@ -203,13 +203,13 @@ func (os *OpenSearch) upsert(obj interface{}) {
|
|||
|
||||
body, err := json.Marshal(doc)
|
||||
if err != nil {
|
||||
klog.Errorf("cannot marshal to json: %v", err)
|
||||
klog.Errorf("Cannot marshal to json: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
indexName, err := os.indexName(us)
|
||||
if err != nil {
|
||||
klog.Errorf("cannot get index name: %v", err)
|
||||
klog.Errorf("Cannot get index name: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -220,14 +220,14 @@ func (os *OpenSearch) upsert(obj interface{}) {
|
|||
}
|
||||
resp, err := req.Do(context.Background(), os.client)
|
||||
if err != nil {
|
||||
klog.Errorf("cannot upsert: %v", err)
|
||||
klog.Errorf("Cannot upsert: %v", err)
|
||||
return
|
||||
}
|
||||
if resp.IsError() {
|
||||
klog.Errorf("upsert error: %s", resp.String())
|
||||
klog.Errorf("Upsert error: %s", resp.String())
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("upsert response: %s", resp.String())
|
||||
klog.V(4).Infof("Upsert response: %s", resp.String())
|
||||
}
|
||||
|
||||
// TODO: apply mapping
|
||||
|
@ -240,12 +240,12 @@ func (os *OpenSearch) indexName(us *unstructured.Unstructured) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
klog.Infof("try to create index: %s", name)
|
||||
klog.Infof("Try to create index: %s", name)
|
||||
res := opensearchapi.IndicesCreateRequest{Index: name, Body: strings.NewReader(mapping)}
|
||||
resp, err := res.Do(context.Background(), os.client)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "resource_already_exists_exception") {
|
||||
klog.Info("index already exists")
|
||||
klog.Info("Index already exists")
|
||||
os.indices[name] = struct{}{}
|
||||
return name, nil
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ func (os *OpenSearch) indexName(us *unstructured.Unstructured) (string, error) {
|
|||
}
|
||||
if resp.IsError() {
|
||||
if strings.Contains(resp.String(), "resource_already_exists_exception") {
|
||||
klog.Info("index already exists")
|
||||
klog.Info("Index already exists")
|
||||
os.indices[name] = struct{}{}
|
||||
return name, nil
|
||||
}
|
||||
|
@ -278,13 +278,13 @@ func (os *OpenSearch) initClient(bsc *searchv1alpha1.BackendStoreConfig) error {
|
|||
|
||||
user, pwd := func(secretRef clusterv1alpha1.LocalSecretReference) (user, pwd string) {
|
||||
if secretRef.Namespace == "" || secretRef.Name == "" {
|
||||
klog.Warningf("not found secret for opensearch, try to without auth")
|
||||
klog.Warningf("Not found secret for opensearch, try to without auth")
|
||||
return
|
||||
}
|
||||
|
||||
secret, err := k8sClient.CoreV1().Secrets(secretRef.Namespace).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("can not get secret %s/%s: %v, try to without auth", secretRef.Namespace, secretRef.Name, err)
|
||||
klog.Warningf("Can not get secret %s/%s: %v, try to without auth", secretRef.Namespace, secretRef.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ func (os *OpenSearch) initClient(bsc *searchv1alpha1.BackendStoreConfig) error {
|
|||
return fmt.Errorf("cannot get opensearch info: %v", err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("opensearch client: %v", info)
|
||||
klog.V(4).Infof("Opensearch client: %v", info)
|
||||
os.client = client
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ func (c *Controller) doCacheCluster(cluster string) error {
|
|||
|
||||
// STEP2: added/updated cluster, builds an informer manager for a specific cluster.
|
||||
if !c.InformerManager.IsManagerExist(cluster) {
|
||||
klog.Info("try to build informer manager for cluster ", cluster)
|
||||
klog.Info("Try to build informer manager for cluster ", cluster)
|
||||
controlPlaneClient := gclient.NewForConfigOrDie(c.restConfig)
|
||||
|
||||
clusterDynamicClient, err := util.NewClusterDynamicClientSet(cluster, controlPlaneClient)
|
||||
|
@ -402,7 +402,7 @@ func (c *Controller) getClusters(affinity policyv1alpha1.ClusterAffinity) []stri
|
|||
clusters := make([]string, 0)
|
||||
lst, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list clusters: %v", err)
|
||||
klog.Errorf("Failed to list clusters: %v", err)
|
||||
return clusters
|
||||
}
|
||||
for _, cls := range lst {
|
||||
|
@ -421,7 +421,7 @@ func (c *Controller) getResources(selectors []searchv1alpha1.ResourceSelector) [
|
|||
c.restMapper, schema.FromAPIVersionAndKind(rs.APIVersion, rs.Kind),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get gvr: %v", err)
|
||||
klog.Errorf("Failed to get gvr: %v", err)
|
||||
continue
|
||||
}
|
||||
resources = append(resources, gvr)
|
||||
|
|
|
@ -168,7 +168,7 @@ func (ctl *Controller) reconcile(util.QueueKey) error {
|
|||
for _, selector := range registry.Spec.ResourceSelectors {
|
||||
gvr, err := restmapper.GetGroupVersionResource(ctl.restMapper, schema.FromAPIVersionAndKind(selector.APIVersion, selector.Kind))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get gvr: %v", err)
|
||||
klog.Errorf("Failed to get gvr: %v", err)
|
||||
continue
|
||||
}
|
||||
matchedResources[gvr] = struct{}{}
|
||||
|
|
|
@ -95,7 +95,7 @@ func CreateClusterObject(controlPlaneClient karmadaclientset.Interface, clusterO
|
|||
}
|
||||
|
||||
if cluster, err = createCluster(controlPlaneClient, clusterObj); err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
klog.Warningf("Failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -111,13 +111,13 @@ func CreateOrUpdateClusterObject(controlPlaneClient karmadaclientset.Interface,
|
|||
}
|
||||
if exist {
|
||||
if reflect.DeepEqual(cluster.Spec, clusterObj.Spec) {
|
||||
klog.Warningf("cluster(%s) already exist and newest", clusterObj.Name)
|
||||
klog.Warningf("Cluster(%s) already exist and newest", clusterObj.Name)
|
||||
return cluster, nil
|
||||
}
|
||||
mutate(cluster)
|
||||
cluster, err = updateCluster(controlPlaneClient, cluster)
|
||||
if err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
klog.Warningf("Failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return cluster, nil
|
||||
|
@ -126,7 +126,7 @@ func CreateOrUpdateClusterObject(controlPlaneClient karmadaclientset.Interface,
|
|||
mutate(clusterObj)
|
||||
|
||||
if cluster, err = createCluster(controlPlaneClient, clusterObj); err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
klog.Warningf("Failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return cluster, nil
|
||||
|
@ -140,7 +140,7 @@ func GetClusterWithKarmadaClient(client karmadaclientset.Interface, name string)
|
|||
return nil, false, nil
|
||||
}
|
||||
|
||||
klog.Warningf("failed to retrieve cluster(%s). error: %v", name, err)
|
||||
klog.Warningf("Failed to retrieve cluster(%s). error: %v", name, err)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ func GetClusterWithKarmadaClient(client karmadaclientset.Interface, name string)
|
|||
func createCluster(controlPlaneClient karmadaclientset.Interface, cluster *clusterv1alpha1.Cluster) (*clusterv1alpha1.Cluster, error) {
|
||||
newCluster, err := controlPlaneClient.ClusterV1alpha1().Clusters().Create(context.TODO(), cluster, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", cluster.Name, err)
|
||||
klog.Warningf("Failed to create cluster(%s). error: %v", cluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ func createCluster(controlPlaneClient karmadaclientset.Interface, cluster *clust
|
|||
func updateCluster(controlPlaneClient karmadaclientset.Interface, cluster *clusterv1alpha1.Cluster) (*clusterv1alpha1.Cluster, error) {
|
||||
newCluster, err := controlPlaneClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("failed to update cluster(%s). error: %v", cluster.Name, err)
|
||||
klog.Warningf("Failed to update cluster(%s). error: %v", cluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ func SetLeaseOwnerFunc(c client.Client, clusterName string) func(lease *coordina
|
|||
},
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("failed to get cluster %q when trying to set owner ref to the cluster lease: %v", clusterName, err)
|
||||
klog.Errorf("Failed to get cluster %q when trying to set owner ref to the cluster lease: %v", clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func EnsureClusterRoleExist(client kubeclient.Interface, clusterRole *rbacv1.Clu
|
|||
return nil, fmt.Errorf("failed to check if ClusterRole exist. ClusterRole: %s, error: %v", clusterRole.Name, err)
|
||||
}
|
||||
if exist {
|
||||
klog.V(1).Infof("ensure ClusterRole succeed as already exist. ClusterRole: %s", clusterRole.Name)
|
||||
klog.V(1).Infof("Ensure ClusterRole succeed as already exist. ClusterRole: %s", clusterRole.Name)
|
||||
return clusterRole, nil
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func EnsureClusterRoleBindingExist(client kubeclient.Interface, clusterRoleBindi
|
|||
return nil, fmt.Errorf("failed to check if ClusterRole exist. ClusterRole: %s, error: %v", clusterRoleBinding.Name, err)
|
||||
}
|
||||
if exist {
|
||||
klog.V(1).Infof("ensure ClusterRole succeed as already exist. ClusterRole: %s", clusterRoleBinding.Name)
|
||||
klog.V(1).Infof("Ensure ClusterRole succeed as already exist. ClusterRole: %s", clusterRoleBinding.Name)
|
||||
return clusterRoleBinding, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ func AggregateResourceBindingWorkStatus(c client.Client, binding *workv1alpha2.R
|
|||
// make a copy, so we don't mutate the shared cache
|
||||
binding = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
}
|
||||
|
||||
return updateErr
|
||||
|
@ -123,7 +123,7 @@ func AggregateClusterResourceBindingWorkStatus(c client.Client, binding *workv1a
|
|||
// make a copy, so we don't mutate the shared cache
|
||||
binding = updated.DeepCopy()
|
||||
} else {
|
||||
klog.Errorf("failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
klog.Errorf("Failed to get updated binding %s/%s: %v", binding.Namespace, binding.Name, err)
|
||||
}
|
||||
|
||||
return updateErr
|
||||
|
|
|
@ -39,9 +39,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
|||
preferredResources, err := discoveryClient.ServerPreferredResources()
|
||||
if err != nil {
|
||||
if discovery.IsGroupDiscoveryFailedError(err) {
|
||||
klog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
klog.Warningf("Failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
} else {
|
||||
klog.Warningf("failed to discover preferred resources: %v", err)
|
||||
klog.Warningf("Failed to discover preferred resources: %v", err)
|
||||
}
|
||||
}
|
||||
if preferredResources == nil {
|
||||
|
@ -55,7 +55,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
|||
for _, rl := range deletableResources {
|
||||
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
|
||||
if err != nil {
|
||||
klog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
klog.Warningf("Ignore invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
continue
|
||||
}
|
||||
for i := range rl.APIResources {
|
||||
|
|
|
@ -34,7 +34,7 @@ func CreateSecret(client kubeclient.Interface, secret *corev1.Secret) (*corev1.S
|
|||
func PatchSecret(client kubeclient.Interface, namespace, name string, pt types.PatchType, patchSecretBody *corev1.Secret) error {
|
||||
patchSecretByte, err := json.Marshal(patchSecretBody)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to marshal patch body of secret object %v into JSON: %v", patchSecretByte, err)
|
||||
klog.Errorf("Failed to marshal patch body of secret object %v into JSON: %v", patchSecretByte, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
wh.writeResponse(w, res)
|
||||
return
|
||||
}
|
||||
klog.V(1).Infof("received request UID: %q, kind: %s", request.UID, request.Kind)
|
||||
klog.V(1).Infof("Received request UID: %q, kind: %s", request.UID, request.Kind)
|
||||
|
||||
res = wh.Handle(ctx, request)
|
||||
wh.writeResponse(w, res)
|
||||
|
@ -86,9 +86,9 @@ func (wh *Webhook) writeResourceInterpreterResponse(w io.Writer, interpreterCont
|
|||
} else {
|
||||
response := interpreterContext.Response
|
||||
if response.Successful {
|
||||
klog.V(4).Infof("wrote response UID: %q, successful: %t", response.UID, response.Successful)
|
||||
klog.V(4).Infof("Wrote response UID: %q, successful: %t", response.UID, response.Successful)
|
||||
} else {
|
||||
klog.V(4).Infof("wrote response UID: %q, successful: %t, response.status.code: %d, response.status.message: %s",
|
||||
klog.V(4).Infof("Wrote response UID: %q, successful: %t, response.status.code: %d, response.status.message: %s",
|
||||
response.UID, response.Successful, response.Status.Code, response.Status.Message)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue