mirror of https://github.com/openkruise/kruise.git
support structured logging for pkg/controller (#1629)
Signed-off-by: jairui <jairuigou@gmail.com>
This commit is contained in:
parent
0f6aada9d3
commit
f0f6eef44d
|
@ -49,7 +49,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
|
||||
var childJobs appsv1alpha1.BroadcastJobList
|
||||
if err := r.List(ctx, &childJobs, client.InNamespace(advancedCronJob.Namespace), client.MatchingFields{jobOwnerKey: advancedCronJob.Name}); err != nil {
|
||||
klog.Error(err, "unable to list child Jobs", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to list child Jobs", "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
// the active jobs themselves.
|
||||
scheduledTimeForJob, err := getScheduledTimeForJob(&job)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to parse schedule time for child broadcastjob ", job.Name, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to parse schedule time for child BroadcastJob", "broadcastJob", klog.KObj(&job), "advancedCronJob", req)
|
||||
continue
|
||||
}
|
||||
if scheduledTimeForJob != nil {
|
||||
|
@ -120,15 +120,15 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
for _, activeJob := range activeJobs {
|
||||
jobRef, err := ref.GetReference(r.scheme, activeJob)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to make reference to active broadcastjob ", " job ", activeJob, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to make reference to active BroadcastJob", "broadcastJob", klog.KObj(activeJob), "advancedCronJob", req)
|
||||
continue
|
||||
}
|
||||
advancedCronJob.Status.Active = append(advancedCronJob.Status.Active, *jobRef)
|
||||
}
|
||||
|
||||
klog.V(1).Info("advancedCronJob count ", " active advancedCronJob ", len(activeJobs), " successful advancedCronJob ", len(successfulJobs), " failed advancedCronJob ", len(failedJobs), req.NamespacedName)
|
||||
klog.V(1).InfoS("AdvancedCronJob count", "activeJobCount", len(activeJobs), "successfulJobCount", len(successfulJobs), "failedJobCount", len(failedJobs), "advancedCronJob", req)
|
||||
if err := r.updateAdvancedJobStatus(req, &advancedCronJob); err != nil {
|
||||
klog.Error(err, "unable to update AdvancedCronJob status", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to update AdvancedCronJob status", "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -154,9 +154,9 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
break
|
||||
}
|
||||
if err := r.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); client.IgnoreNotFound(err) != nil {
|
||||
klog.Error(err, "unable to delete old failed broadcastjob", "job", job, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete old failed BroadcastJob", "oldFailedBroadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
} else {
|
||||
klog.V(0).Info("deleted old failed broadcastjob", "job", job, req.NamespacedName)
|
||||
klog.InfoS("Deleted old failed BroadcastJob", "oldFailedBroadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -173,9 +173,9 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
break
|
||||
}
|
||||
if err := r.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); (err) != nil {
|
||||
klog.Error(err, "unable to delete old successful broadcastjob ", job.Name, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete old successful BroadcastJob", "oldSuccessfulBroadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
} else {
|
||||
klog.V(0).Info("deleted old successful broadcastjob ", job.Name, req.NamespacedName)
|
||||
klog.InfoS("Deleted old successful BroadcastJob", "oldSuccessfulBroadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
*/
|
||||
|
||||
if advancedCronJob.Spec.Paused != nil && *advancedCronJob.Spec.Paused {
|
||||
klog.V(1).Info("advancedCronJob paused, skipping", req.NamespacedName)
|
||||
klog.V(1).InfoS("AdvancedCronJob paused, skipping", "advancedCronJob", req)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
now := realClock{}.Now()
|
||||
missedRun, nextRun, err := getNextSchedule(&advancedCronJob, now)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to figure out CronJob schedule", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to figure out CronJob schedule", "advancedCronJob", req)
|
||||
// we don't really care about requeuing until we get an update that
|
||||
// fixes the schedule, so don't return an error
|
||||
return ctrl.Result{}, nil
|
||||
|
@ -282,7 +282,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
If we've missed a run, and we're still within the deadline to start it, we'll need to run a job.
|
||||
*/
|
||||
if missedRun.IsZero() {
|
||||
klog.V(1).Info("no upcoming scheduled times, sleeping until next now ", now, " and next run ", nextRun, req.NamespacedName)
|
||||
klog.V(1).InfoS("No upcoming scheduled times, sleeping until next run", "now", now, "nextRun", nextRun, "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
tooLate = missedRun.Add(time.Duration(*advancedCronJob.Spec.StartingDeadlineSeconds) * time.Second).Before(now)
|
||||
}
|
||||
if tooLate {
|
||||
klog.V(1).Info("missed starting deadline for last run, sleeping till next", "current run", missedRun, req.NamespacedName)
|
||||
klog.V(1).InfoS("Missed starting deadline for last run, sleeping till next run", "missedRun", missedRun, "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
// figure out how to run this job -- concurrency policy might forbid us from running
|
||||
// multiple at the same time...
|
||||
if advancedCronJob.Spec.ConcurrencyPolicy == appsv1alpha1.ForbidConcurrent && len(activeJobs) > 0 {
|
||||
klog.V(1).Info("concurrency policy blocks concurrent runs, skipping", "num active", len(activeJobs), req.NamespacedName)
|
||||
klog.V(1).InfoS("Concurrency policy blocks concurrent runs, skipping", "activeBroadcastJobCount", len(activeJobs), "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
for _, activeJob := range activeJobs {
|
||||
// we don't care if the job was already deleted
|
||||
if err := r.Delete(ctx, activeJob, client.PropagationPolicy(metav1.DeletePropagationBackground)); client.IgnoreNotFound(err) != nil {
|
||||
klog.Error(err, "unable to delete active broadcastjob", "job", activeJob, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete active broadcastjob", "broadcastJob", klog.KObj(activeJob), "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
@ -360,18 +360,18 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
|
|||
// actually make the job...
|
||||
job, err := constructBrJobForCronJob(&advancedCronJob, missedRun)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to construct broadcastjob from template", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to construct broadcastjob from template", "advancedCronJob", req)
|
||||
// don't bother requeuing until we get a change to the spec
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
// ...and create it on the cluster
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
klog.Error(err, "unable to create BroadcastJob for CronJob", "job", job, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to create BroadcastJob for CronJob", "broadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(1).Info("created BroadcastJob for CronJob run", "job", job, req.NamespacedName)
|
||||
klog.V(1).InfoS("Created BroadcastJob for CronJob run", "broadcastJob", klog.KObj(job), "advancedCronJob", req)
|
||||
|
||||
/*
|
||||
### 7: Requeue when we either see a running job or it's time for the next scheduled run
|
||||
|
|
|
@ -76,28 +76,28 @@ func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
|||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
klog.Info("Starting AdvancedCronJob Controller")
|
||||
klog.InfoS("Starting AdvancedCronJob Controller")
|
||||
c, err := controller.New("advancedcronjob-controller", mgr, controller.Options{Reconciler: r,
|
||||
MaxConcurrentReconciles: concurrentReconciles, CacheSyncTimeout: util.GetControllerCacheSyncTimeout()})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to create AdvandedCronJob controller")
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to AdvancedCronJob
|
||||
src := source.Kind(mgr.GetCache(), &appsv1alpha1.AdvancedCronJob{})
|
||||
if err = c.Watch(src, &handler.EnqueueRequestForObject{}); err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to watch AdvancedCronJob")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = watchJob(mgr, c); err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to watch Job")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = watchBroadcastJob(mgr, c); err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to watch BroadcastJob")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -133,7 +133,7 @@ type ReconcileAdvancedCronJob struct {
|
|||
|
||||
func (r *ReconcileAdvancedCronJob) Reconcile(_ context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
klog.Info("Running AdvancedCronJob job ", req.NamespacedName)
|
||||
klog.InfoS("Running AdvancedCronJob job", "advancedCronJob", req)
|
||||
|
||||
namespacedName := types.NamespacedName{
|
||||
Namespace: req.Namespace,
|
||||
|
@ -143,7 +143,7 @@ func (r *ReconcileAdvancedCronJob) Reconcile(_ context.Context, req ctrl.Request
|
|||
var advancedCronJob appsv1alpha1.AdvancedCronJob
|
||||
|
||||
if err := r.Get(ctx, namespacedName, &advancedCronJob); err != nil {
|
||||
klog.Error(err, "unable to fetch CronJob", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to fetch CronJob", "advancedCronJob", req)
|
||||
// we'll ignore not-found errors, since they can't be fixed by an immediate
|
||||
// requeue (we'll need to wait for a new notification), and we can get them
|
||||
// on deleted requests.
|
||||
|
@ -156,7 +156,7 @@ func (r *ReconcileAdvancedCronJob) Reconcile(_ context.Context, req ctrl.Request
|
|||
case appsv1alpha1.BroadcastJobTemplate:
|
||||
return r.reconcileBroadcastJob(ctx, req, advancedCronJob)
|
||||
default:
|
||||
klog.Info("No template found", req.NamespacedName)
|
||||
klog.InfoS("No template found", "advancedCronJob", req)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
|
@ -169,7 +169,7 @@ func (r *ReconcileAdvancedCronJob) SetupWithManager(mgr ctrl.Manager) error {
|
|||
}
|
||||
|
||||
func (r *ReconcileAdvancedCronJob) updateAdvancedJobStatus(request reconcile.Request, advancedCronJob *appsv1alpha1.AdvancedCronJob) error {
|
||||
klog.V(1).Info(fmt.Sprintf("Updating job %s status %#v", advancedCronJob.Name, advancedCronJob.Status))
|
||||
klog.V(1).InfoS("Updating job status", "advancedCronJob", klog.KObj(advancedCronJob), "status", advancedCronJob.Status)
|
||||
advancedCronJobCopy := advancedCronJob.DeepCopy()
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
err := r.Status().Update(context.TODO(), advancedCronJobCopy)
|
||||
|
|
|
@ -52,7 +52,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
advancedCronJob.Status.Type = appsv1alpha1.JobTemplate
|
||||
var childJobs batchv1.JobList
|
||||
if err := r.List(ctx, &childJobs, client.InNamespace(advancedCronJob.Namespace), client.MatchingFields{jobOwnerKey: advancedCronJob.Name}); err != nil {
|
||||
klog.Error(err, "unable to list child Jobs", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to list child Jobs", "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
// the active jobs themselves.
|
||||
scheduledTimeForJob, err := getScheduledTimeForJob(&job)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to parse schedule time for child job", "job", &job, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to parse schedule time for child job", "job", klog.KObj(&job), "advancedCronJob", req)
|
||||
continue
|
||||
}
|
||||
if scheduledTimeForJob != nil {
|
||||
|
@ -123,15 +123,15 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
for _, activeJob := range activeJobs {
|
||||
jobRef, err := ref.GetReference(r.scheme, activeJob)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to make reference to active job", "job", activeJob, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to make reference to active job", "job", klog.KObj(activeJob), "advancedCronJob", req)
|
||||
continue
|
||||
}
|
||||
advancedCronJob.Status.Active = append(advancedCronJob.Status.Active, *jobRef)
|
||||
}
|
||||
|
||||
klog.V(1).Info("job count ", " active jobs ", len(activeJobs), " successful jobs ", len(successfulJobs), " failed jobs ", len(failedJobs), req.NamespacedName)
|
||||
klog.V(1).InfoS("Job count", "activeJobCount", len(activeJobs), "successfulJobCount", len(successfulJobs), "failedJobCount", len(failedJobs), "advancedCronJob", req)
|
||||
if err := r.updateAdvancedJobStatus(req, &advancedCronJob); err != nil {
|
||||
klog.Error(err, "unable to update AdvancedCronJob status", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to update AdvancedCronJob status", "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -157,9 +157,9 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
break
|
||||
}
|
||||
if err := r.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); client.IgnoreNotFound(err) != nil {
|
||||
klog.Error(err, "unable to delete old failed job ", job.Name, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete old failed job", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
} else {
|
||||
klog.V(0).Info("deleted old failed job ", job.Name, req.NamespacedName)
|
||||
klog.InfoS("Deleted old failed job", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -176,9 +176,9 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
break
|
||||
}
|
||||
if err := r.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); (err) != nil {
|
||||
klog.Error(err, "unable to delete old successful job ", job.Name, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete old successful job", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
} else {
|
||||
klog.V(0).Info("deleted old successful job ", job.Name, req.NamespacedName)
|
||||
klog.InfoS("Deleted old successful job", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
*/
|
||||
|
||||
if advancedCronJob.Spec.Paused != nil && *advancedCronJob.Spec.Paused {
|
||||
klog.V(1).Info("cronjob paused, skipping", req.NamespacedName)
|
||||
klog.V(1).InfoS("CronJob paused, skipping", "advancedCronJob", req)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -268,7 +268,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
now := realClock{}.Now()
|
||||
missedRun, nextRun, err := getNextSchedule(&advancedCronJob, now)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to figure out CronJob schedule", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to figure out CronJob schedule", "advancedCronJob", req)
|
||||
// we don't really care about requeuing until we get an update that
|
||||
// fixes the schedule, so don't return an error
|
||||
return ctrl.Result{}, nil
|
||||
|
@ -285,7 +285,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
If we've missed a run, and we're still within the deadline to start it, we'll need to run a job.
|
||||
*/
|
||||
if missedRun.IsZero() {
|
||||
klog.V(1).Info("no upcoming scheduled times, sleeping until next now ", now, " and next run ", nextRun, req.NamespacedName)
|
||||
klog.V(1).InfoS("No upcoming scheduled times, sleeping until next run", "now", now, "nextRun", nextRun, "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -295,7 +295,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
tooLate = missedRun.Add(time.Duration(*advancedCronJob.Spec.StartingDeadlineSeconds) * time.Second).Before(now)
|
||||
}
|
||||
if tooLate {
|
||||
klog.V(1).Info("missed starting deadline for last run, sleeping till next", "current run", missedRun, req.NamespacedName)
|
||||
klog.V(1).InfoS("Missed starting deadline for last run, sleeping till next run", "missedRun", missedRun, "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
// figure out how to run this job -- concurrency policy might forbid us from running
|
||||
// multiple at the same time...
|
||||
if advancedCronJob.Spec.ConcurrencyPolicy == appsv1alpha1.ForbidConcurrent && len(activeJobs) > 0 {
|
||||
klog.V(1).Info("concurrency policy blocks concurrent runs, skipping", "num active", len(activeJobs), req.NamespacedName)
|
||||
klog.V(1).InfoS("Concurrency policy blocks concurrent runs, skipping", "activeJobCount", len(activeJobs), "advancedCronJob", req)
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
for _, activeJob := range activeJobs {
|
||||
// we don't care if the job was already deleted
|
||||
if err := r.Delete(ctx, activeJob, client.PropagationPolicy(metav1.DeletePropagationBackground)); client.IgnoreNotFound(err) != nil {
|
||||
klog.Error(err, "unable to delete active job", "job", activeJob, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to delete active job", "job", klog.KObj(activeJob), "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
@ -363,18 +363,18 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
|
|||
// actually make the job...
|
||||
job, err := constructJobForCronJob(&advancedCronJob, missedRun)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to construct job from template", req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to construct job from template", "advancedCronJob", req)
|
||||
// don't bother requeuing until we get a change to the spec
|
||||
return scheduledResult, nil
|
||||
}
|
||||
|
||||
// ...and create it on the cluster
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
klog.Error(err, "unable to create Job for AdvancedCronJob", "job", job, req.NamespacedName)
|
||||
klog.ErrorS(err, "Unable to create Job for AdvancedCronJob", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(1).Info("created Job for AdvancedCronJob run", "job", job, req.NamespacedName)
|
||||
klog.V(1).InfoS("Created Job for AdvancedCronJob run", "job", klog.KObj(job), "advancedCronJob", req)
|
||||
|
||||
/*
|
||||
### 7: Requeue when we either see a running job or it's time for the next scheduled run
|
||||
|
|
|
@ -23,7 +23,7 @@ func formatSchedule(acj *appsv1alpha1.AdvancedCronJob) string {
|
|||
}
|
||||
if acj.Spec.TimeZone != nil {
|
||||
if _, err := time.LoadLocation(*acj.Spec.TimeZone); err != nil {
|
||||
klog.Errorf("Failed to load location %s for %s/%s: %v", *acj.Spec.TimeZone, acj.Namespace, acj.Name, err)
|
||||
klog.ErrorS(err, "Failed to load location for advancedCronJob", "location", *acj.Spec.TimeZone, "advancedCronJob", klog.KObj(acj))
|
||||
return acj.Spec.Schedule
|
||||
}
|
||||
return fmt.Sprintf("TZ=%s %s", *acj.Spec.TimeZone, acj.Spec.Schedule)
|
||||
|
|
|
@ -172,16 +172,16 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
klog.Errorf("failed to get job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to get BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if scaleSatisfied, unsatisfiedDuration, scaleDirtyPods := scaleExpectations.SatisfiedExpectations(request.String()); !scaleSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for bcj %v, scaleDirtyPods=%v, overtime=%v", request.String(), scaleDirtyPods, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for BroadcastJob", "broadcastJob", request, "scaleDirtyPods", scaleDirtyPods, "overtime", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied scale for bcj %v, scaleDirtyPods=%v", request.String(), scaleDirtyPods)
|
||||
klog.V(4).InfoS("Not satisfied scale for BroadcastJob", "broadcastJob", request, "scaleDirtyPods", scaleDirtyPods)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
|
@ -191,10 +191,10 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
if IsJobFinished(job) {
|
||||
isPast, leftTime := pastTTLDeadline(job)
|
||||
if isPast {
|
||||
klog.Infof("deleting the job %s", job.Name)
|
||||
klog.InfoS("Deleting BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
err = r.Delete(context.TODO(), job)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete job %s", job.Name)
|
||||
klog.ErrorS(err, "Failed to delete BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
}
|
||||
} else if leftTime > 0 {
|
||||
return reconcile.Result{RequeueAfter: leftTime}, nil
|
||||
|
@ -209,8 +209,8 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
job.Status.StartTime = &now
|
||||
if job.Spec.CompletionPolicy.Type == appsv1alpha1.Always &&
|
||||
job.Spec.CompletionPolicy.ActiveDeadlineSeconds != nil {
|
||||
klog.Infof("Job %s has ActiveDeadlineSeconds, will resync after %d seconds",
|
||||
job.Name, *job.Spec.CompletionPolicy.ActiveDeadlineSeconds)
|
||||
klog.InfoS("BroadcastJob has ActiveDeadlineSeconds, will resync after the deadline", "broadcastJob", klog.KObj(job),
|
||||
"activeDeadlineSeconds", *job.Spec.CompletionPolicy.ActiveDeadlineSeconds)
|
||||
requeueAfter = time.Duration(*job.Spec.CompletionPolicy.ActiveDeadlineSeconds) * time.Second
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
}
|
||||
err = r.List(context.TODO(), podList, listOptions)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get podList for job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to get podList for BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
nodes := &corev1.NodeList{}
|
||||
err = r.List(context.TODO(), nodes)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get nodeList for job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to get nodeList for BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -259,8 +259,9 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
|
||||
desiredNodes, restNodesToRunPod, podsToDelete := getNodesToRunPod(nodes, job, existingNodeToPodMap)
|
||||
desired := int32(len(desiredNodes))
|
||||
klog.Infof("%s/%s has %d/%d nodes remaining to schedule pods", job.Namespace, job.Name, len(restNodesToRunPod), desired)
|
||||
klog.Infof("Before broadcastjob reconcile %s/%s, desired=%d, active=%d, failed=%d", job.Namespace, job.Name, desired, active, failed)
|
||||
klog.InfoS("BroadcastJob has some nodes remaining to schedule pods", "broadcastJob", klog.KObj(job), "restNodeCount", len(restNodesToRunPod), "desiredNodeCount", desired)
|
||||
klog.InfoS("Before BroadcastJob reconcile, with desired, active and failed counts",
|
||||
"broadcastJob", klog.KObj(job), "desiredCount", desired, "activeCount", active, "failedCount", failed)
|
||||
job.Status.Active = active
|
||||
job.Status.Failed = failed
|
||||
job.Status.Succeeded = succeeded
|
||||
|
@ -304,7 +305,7 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
// Handle Job failures, delete all active pods
|
||||
failed, active, err = r.deleteJobPods(job, activePods, failed, active)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to deleteJobPods for job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to deleteJobPods for job", "broadcastJob", klog.KObj(job))
|
||||
}
|
||||
job.Status.Phase = appsv1alpha1.PhaseFailed
|
||||
requeueAfter = finishJob(job, appsv1alpha1.JobFailed, failureMessage)
|
||||
|
@ -316,7 +317,7 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
// should we remove the pods without nodes associated, the podgc controller will do this if enabled
|
||||
failed, active, err = r.deleteJobPods(job, podsToDelete, failed, active)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to deleteJobPods for job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to delete BroadcastJob Pods", "broadcastJob", klog.KObj(job))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -324,7 +325,7 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
if job.DeletionTimestamp == nil && len(restNodesToRunPod) > 0 {
|
||||
active, err = r.reconcilePods(job, restNodesToRunPod, active, desired)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to reconcilePods for job %s,", job.Name)
|
||||
klog.ErrorS(err, "Failed to reconcile Pods for BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,20 +337,21 @@ func (r *ReconcileBroadcastJob) Reconcile(_ context.Context, request reconcile.R
|
|||
fmt.Sprintf("Job %s/%s is completed, %d pods succeeded, %d pods failed", job.Namespace, job.Name, succeeded, failed))
|
||||
}
|
||||
}
|
||||
klog.Infof("After broadcastjob reconcile %s/%s, desired=%d, active=%d, failed=%d", job.Namespace, job.Name, desired, active, failed)
|
||||
klog.InfoS("After broadcastjob reconcile, with desired, active and failed counts",
|
||||
"broadcastJob", klog.KObj(job), "desiredCount", desired, "activeCount", active, "failedCount", failed)
|
||||
|
||||
// update the status
|
||||
job.Status.Failed = failed
|
||||
job.Status.Active = active
|
||||
if err := r.updateJobStatus(request, job); err != nil {
|
||||
klog.Errorf("failed to update job %s, %v", job.Name, err)
|
||||
klog.ErrorS(err, "Failed to update BroadcastJob", "broadcastJob", klog.KObj(job))
|
||||
}
|
||||
|
||||
return reconcile.Result{RequeueAfter: requeueAfter}, err
|
||||
}
|
||||
|
||||
func (r *ReconcileBroadcastJob) updateJobStatus(request reconcile.Request, job *appsv1alpha1.BroadcastJob) error {
|
||||
klog.Infof("Updating job %s status %#v", job.Name, job.Status)
|
||||
klog.InfoS("Updating BroadcastJob status", "broadcastJob", klog.KObj(job), "status", job.Status)
|
||||
jobCopy := job.DeepCopy()
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
err := r.Status().Update(context.TODO(), jobCopy)
|
||||
|
@ -372,15 +374,15 @@ func (r *ReconcileBroadcastJob) updateJobStatus(request reconcile.Request, job *
|
|||
// finishJob appends the condition to JobStatus, and sets ttl if needed
|
||||
func finishJob(job *appsv1alpha1.BroadcastJob, conditionType appsv1alpha1.JobConditionType, message string) time.Duration {
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(conditionType, string(conditionType), message))
|
||||
klog.Infof("job %s/%s is %s: %s", job.Namespace, job.Name, string(conditionType), message)
|
||||
klog.InfoS("BroadcastJob conditions updated", "broadcastJob", klog.KObj(job), "conditionType", string(conditionType), "message", message)
|
||||
|
||||
now := metav1.Now()
|
||||
job.Status.CompletionTime = &now
|
||||
|
||||
var requeueAfter time.Duration
|
||||
if job.Spec.CompletionPolicy.TTLSecondsAfterFinished != nil {
|
||||
klog.Infof("Job %s is %s, will be deleted after %d seconds", job.Name, string(conditionType),
|
||||
*job.Spec.CompletionPolicy.TTLSecondsAfterFinished)
|
||||
klog.InfoS("BroadcastJob will be deleted after a certain seconds", "broadcastJob", klog.KObj(job), "conditionType", string(conditionType),
|
||||
"TTLSecondsAfterFinished", *job.Spec.CompletionPolicy.TTLSecondsAfterFinished)
|
||||
// a bit more than the TTLSecondsAfterFinished to ensure it exceeds the TTLSecondsAfterFinished when being reconciled
|
||||
requeueAfter = time.Duration(*job.Spec.CompletionPolicy.TTLSecondsAfterFinished+1) * time.Second
|
||||
}
|
||||
|
@ -443,7 +445,7 @@ func (r *ReconcileBroadcastJob) reconcilePods(job *appsv1alpha1.BroadcastJob,
|
|||
go func(nodeName string) {
|
||||
defer wait.Done()
|
||||
// parallelize pod creation
|
||||
klog.Infof("creating pod on node %s", nodeName)
|
||||
klog.InfoS("Creating pod on node", "nodeName", nodeName)
|
||||
err := r.createPodOnNode(nodeName, job.Namespace, &job.Spec.Template, job, asOwner(job))
|
||||
if err != nil && errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
|
@ -493,7 +495,7 @@ func isJobComplete(job *appsv1alpha1.BroadcastJob, desiredNodes map[string]*core
|
|||
}
|
||||
// if no desiredNodes, job pending
|
||||
if len(desiredNodes) == 0 {
|
||||
klog.Info("Num desiredNodes is 0")
|
||||
klog.InfoS("Num desiredNodes is 0")
|
||||
return false
|
||||
}
|
||||
for _, pod := range desiredNodes {
|
||||
|
@ -539,7 +541,7 @@ func getNodesToRunPod(nodes *corev1.NodeList, job *appsv1alpha1.BroadcastJob,
|
|||
if pod, ok := existingNodeToPodMap[node.Name]; ok {
|
||||
canFit, err = checkNodeFitness(pod, &node)
|
||||
if !canFit && pod.DeletionTimestamp == nil {
|
||||
klog.Infof("Pod %s does not fit on node %s due to %v", pod.Name, node.Name, err)
|
||||
klog.ErrorS(err, "Pod did not fit on node", "pod", klog.KObj(pod), "nodeName", node.Name)
|
||||
podsToDelete = append(podsToDelete, pod)
|
||||
continue
|
||||
}
|
||||
|
@ -550,7 +552,7 @@ func getNodesToRunPod(nodes *corev1.NodeList, job *appsv1alpha1.BroadcastJob,
|
|||
mockPod := NewMockPod(job, node.Name)
|
||||
canFit, err = checkNodeFitness(mockPod, &node)
|
||||
if !canFit {
|
||||
klog.Infof("Pod does not fit on node %s due to %v", node.Name, err)
|
||||
klog.InfoS("Pod did not fit on node", "nodeName", node.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
restNodesToRunPod = append(restNodesToRunPod, &nodes.Items[i])
|
||||
|
@ -568,7 +570,7 @@ func (r *ReconcileBroadcastJob) getNodeToPodMap(pods []*corev1.Pod, job *appsv1a
|
|||
nodeName := getAssignedNode(pod)
|
||||
if _, ok := nodeToPodMap[nodeName]; ok {
|
||||
// should not happen
|
||||
klog.Warningf("Duplicated pod %s run on the same node %s. this should not happen.", pod.Name, nodeName)
|
||||
klog.InfoS("Duplicated pod run on the same node. This should not happen", "pod", klog.KObj(pod), "nodeName", nodeName)
|
||||
r.recorder.Eventf(job, corev1.EventTypeWarning, "DuplicatePodCreatedOnSameNode",
|
||||
"Duplicated pod %s found on same node %s", pod.Name, nodeName)
|
||||
}
|
||||
|
@ -641,7 +643,7 @@ func logPredicateFailedReason(node *corev1.Node, status *framework.Status) (bool
|
|||
return true, nil
|
||||
}
|
||||
for _, reason := range status.Reasons() {
|
||||
klog.Errorf("Failed predicate on node %s : %s ", node.Name, reason)
|
||||
klog.ErrorS(fmt.Errorf(reason), "Failed to predicate on node", "nodeName", node.Name)
|
||||
}
|
||||
return status.IsSuccess(), status.AsError()
|
||||
}
|
||||
|
@ -669,7 +671,7 @@ func (r *ReconcileBroadcastJob) deleteJobPods(job *appsv1alpha1.BroadcastJob, po
|
|||
if err := r.Delete(context.TODO(), pods[ix]); err != nil {
|
||||
scaleExpectations.ObserveScale(key, expectations.Delete, getAssignedNode(pods[ix]))
|
||||
defer utilruntime.HandleError(err)
|
||||
klog.Infof("Failed to delete %v, job %q/%q", pods[ix].Name, job.Namespace, job.Name)
|
||||
klog.InfoS("Failed to delete BroadcastJob Pod", "pod", klog.KObj(pods[ix]), "broadcastJob", klog.KObj(job))
|
||||
errCh <- err
|
||||
} else {
|
||||
failedLock.Lock()
|
||||
|
@ -733,10 +735,10 @@ func (r *ReconcileBroadcastJob) createPod(nodeName, namespace string, template *
|
|||
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
klog.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
klog.ErrorS(err, "ParentObject did not have ObjectMeta")
|
||||
return nil
|
||||
}
|
||||
klog.Infof("Controller %v created pod %v", accessor.GetName(), pod.Name)
|
||||
klog.InfoS("Controller created pod", "controllerName", accessor.GetName(), "pod", klog.KObj(pod))
|
||||
r.recorder.Eventf(object, corev1.EventTypeNormal, kubecontroller.SuccessfulCreatePodReason, "Created pod: %v", pod.Name)
|
||||
|
||||
return nil
|
||||
|
|
|
@ -27,7 +27,7 @@ type podEventHandler struct {
|
|||
func isBroadcastJobController(controllerRef *metav1.OwnerReference) bool {
|
||||
refGV, err := schema.ParseGroupVersion(controllerRef.APIVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not parse OwnerReference %v APIVersion: %v", controllerRef, err)
|
||||
klog.ErrorS(err, "Could not parse APIVersion in OwnerReference", "ownerReference", controllerRef)
|
||||
return false
|
||||
}
|
||||
return controllerRef.Kind == controllerKind.Kind && refGV.Group == controllerKind.Group
|
||||
|
@ -89,13 +89,13 @@ func (p *enqueueBroadcastJobForNode) addNode(q workqueue.RateLimitingInterface,
|
|||
jobList := &v1alpha1.BroadcastJobList{}
|
||||
err := p.reader.List(context.TODO(), jobList)
|
||||
if err != nil {
|
||||
klog.Errorf("Error enqueueing broadcastjob on addNode %v", err)
|
||||
klog.ErrorS(err, "Failed to enqueue BroadcastJob on addNode")
|
||||
}
|
||||
for _, bcj := range jobList.Items {
|
||||
mockPod := NewMockPod(&bcj, node.Name)
|
||||
canFit, err := checkNodeFitness(mockPod, node)
|
||||
if !canFit {
|
||||
klog.Infof("Job %s/%s does not fit on node %s due to %v", bcj.Namespace, bcj.Name, node.Name, err)
|
||||
klog.ErrorS(err, "BroadcastJob did not fit on node", "broadcastJob", klog.KObj(&bcj), "nodeName", node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ func (p *enqueueBroadcastJobForNode) updateNode(q workqueue.RateLimitingInterfac
|
|||
jobList := &v1alpha1.BroadcastJobList{}
|
||||
err := p.reader.List(context.TODO(), jobList)
|
||||
if err != nil {
|
||||
klog.Errorf("Error enqueueing broadcastjob on updateNode %v", err)
|
||||
klog.ErrorS(err, "Failed to enqueue BroadcastJob on updateNode")
|
||||
}
|
||||
for _, bcj := range jobList.Items {
|
||||
mockPod := NewMockPod(&bcj, oldNode.Name)
|
||||
|
|
|
@ -56,8 +56,8 @@ func filterPods(restartLimit int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod, []*v1
|
|||
activePods = append(activePods, p)
|
||||
}
|
||||
} else {
|
||||
klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
|
||||
klog.V(4).InfoS("Ignoring inactive pod, deletion scheduled",
|
||||
"pod", klog.KObj(p), "phase", p.Status.Phase, "deletionTimestamp", p.DeletionTimestamp)
|
||||
}
|
||||
}
|
||||
return activePods, failedPods, succeededPods
|
||||
|
@ -158,6 +158,6 @@ func getAssignedNode(pod *v1.Pod) string {
|
|||
}
|
||||
}
|
||||
}
|
||||
klog.Warningf("Not found assigned node in Pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.InfoS("Could not find assigned node in Pod", "pod", klog.KObj(pod))
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -128,8 +128,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
oldCS := e.ObjectOld.(*appsv1alpha1.CloneSet)
|
||||
newCS := e.ObjectNew.(*appsv1alpha1.CloneSet)
|
||||
if *oldCS.Spec.Replicas != *newCS.Spec.Replicas {
|
||||
klog.V(4).Infof("Observed updated replicas for CloneSet: %s/%s, %d->%d",
|
||||
newCS.Namespace, newCS.Name, *oldCS.Spec.Replicas, *newCS.Spec.Replicas)
|
||||
klog.V(4).InfoS("Observed updated replicas for CloneSet",
|
||||
"cloneSet", klog.KObj(newCS), "oldReplicas", *oldCS.Spec.Replicas, "newReplicas", *newCS.Spec.Replicas)
|
||||
}
|
||||
return true
|
||||
},
|
||||
|
@ -188,12 +188,12 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
defer func() {
|
||||
if retErr == nil {
|
||||
if res.Requeue || res.RequeueAfter > 0 {
|
||||
klog.Infof("Finished syncing CloneSet %s, cost %v, result: %v", request, time.Since(startTime), res)
|
||||
klog.InfoS("Finished syncing CloneSet", "cloneSet", request, "cost", time.Since(startTime), "result", res)
|
||||
} else {
|
||||
klog.Infof("Finished syncing CloneSet %s, cost %v", request, time.Since(startTime))
|
||||
klog.InfoS("Finished syncing CloneSet", "cloneSet", request, "cost", time.Since(startTime))
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("Failed syncing CloneSet %s: %v", request, retErr)
|
||||
klog.ErrorS(retErr, "Failed syncing CloneSet", "cloneSet", request)
|
||||
}
|
||||
// clean the duration store
|
||||
_ = clonesetutils.DurationStore.Pop(request.String())
|
||||
|
@ -206,7 +206,7 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
if errors.IsNotFound(err) {
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
klog.V(3).Infof("CloneSet %s has been deleted.", request)
|
||||
klog.V(3).InfoS("CloneSet has been deleted", "cloneSet", request)
|
||||
clonesetutils.ScaleExpectations.DeleteExpectations(request.String())
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
@ -215,13 +215,13 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
|
||||
coreControl := clonesetcore.New(instance)
|
||||
if coreControl.IsInitializing() {
|
||||
klog.V(4).Infof("CloneSet %s skip reconcile for initializing", request)
|
||||
klog.V(4).InfoS("CloneSet skipped reconcile for initializing", "cloneSet", request)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
selector, err := util.ValidatedLabelSelectorAsSelector(instance.Spec.Selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Error converting CloneSet %s selector: %v", request, err)
|
||||
klog.ErrorS(err, "Error converting CloneSet selector", "cloneSet", request)
|
||||
// This is a non-transient error, so don't retry.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
@ -229,10 +229,10 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
// If scaling expectations have not satisfied yet, just skip this reconcile.
|
||||
if scaleSatisfied, unsatisfiedDuration, scaleDirtyPods := clonesetutils.ScaleExpectations.SatisfiedExpectations(request.String()); !scaleSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, scaleDirtyPods=%v, overtime=%v", request.String(), scaleDirtyPods, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime", "cloneSet", request, "scaleDirtyPods", scaleDirtyPods, "overTime", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied scale for %v, scaleDirtyPods=%v", request.String(), scaleDirtyPods)
|
||||
klog.V(4).InfoS("Not satisfied scale", "cloneSet", request, "scaleDirtyPods", scaleDirtyPods)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
|
@ -279,20 +279,20 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
clonesetutils.ResourceVersionExpectations.Observe(updateRevision)
|
||||
if isSatisfied, unsatisfiedDuration := clonesetutils.ResourceVersionExpectations.IsSatisfied(updateRevision); !isSatisfied {
|
||||
if unsatisfiedDuration < expectations.ExpectationTimeout {
|
||||
klog.V(4).Infof("Not satisfied resourceVersion for %v, wait for updateRevision %v updating", request.String(), updateRevision.Name)
|
||||
klog.V(4).InfoS("Not satisfied resourceVersion for CloneSet, wait for updateRevision updating", "cloneSet", request, "updateRevisionName", updateRevision.Name)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, wait for updateRevision %v updating, timeout=%v", request.String(), updateRevision.Name, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for CloneSet, wait for updateRevision updating timeout", "cloneSet", request, "updateRevisionName", updateRevision.Name, "timeout", unsatisfiedDuration)
|
||||
clonesetutils.ResourceVersionExpectations.Delete(updateRevision)
|
||||
}
|
||||
for _, pod := range filteredPods {
|
||||
clonesetutils.ResourceVersionExpectations.Observe(pod)
|
||||
if isSatisfied, unsatisfiedDuration := clonesetutils.ResourceVersionExpectations.IsSatisfied(pod); !isSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, wait for pod %v updating, timeout=%v", request.String(), pod.Name, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for CloneSet, wait for pod updating timeout", "cloneSet", request, "pod", klog.KObj(pod), "timeout", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied resourceVersion for %v, wait for pod %v updating", request.String(), pod.Name)
|
||||
klog.V(4).InfoS("Not satisfied resourceVersion for CloneSet, wait for pod updating", "cloneSet", request, "pod", klog.KObj(pod))
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
minUpdatedReadyPodsIntStr := intstrutil.Parse(minUpdatedReadyPods)
|
||||
minUpdatedReadyPodsCount, err = intstrutil.GetScaledValueFromIntOrPercent(&minUpdatedReadyPodsIntStr, int(*instance.Spec.Replicas), true)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to GetScaledValueFromIntOrPercent of minUpdatedReadyPods for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to GetScaledValueFromIntOrPercent of minUpdatedReadyPods for CloneSet", "cloneSet", request)
|
||||
}
|
||||
}
|
||||
updatedReadyReplicas := instance.Status.UpdatedReadyReplicas
|
||||
|
@ -324,13 +324,13 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
if int32(minUpdatedReadyPodsCount) <= updatedReadyReplicas {
|
||||
// pre-download images for new revision
|
||||
if err := r.createImagePullJobsForInPlaceUpdate(instance, currentRevision, updateRevision); err != nil {
|
||||
klog.Errorf("Failed to create ImagePullJobs for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to create ImagePullJobs for CloneSet", "cloneSet", request)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// delete ImagePullJobs if revisions have been consistent
|
||||
if err := imagejobutilfunc.DeleteJobsForWorkload(r.Client, instance); err != nil {
|
||||
klog.Errorf("Failed to delete imagepulljobs for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to delete imagepulljobs for CloneSet", "cloneSet", request)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -344,11 +344,11 @@ func (r *ReconcileCloneSet) doReconcile(request reconcile.Request) (res reconcil
|
|||
}
|
||||
|
||||
if err = r.truncatePodsToDelete(instance, filteredPods); err != nil {
|
||||
klog.Warningf("Failed to truncate podsToDelete for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to truncate podsToDelete for CloneSet", "cloneSet", request)
|
||||
}
|
||||
|
||||
if err = r.truncateHistory(instance, filteredPods, revisions, currentRevision, updateRevision); err != nil {
|
||||
klog.Errorf("Failed to truncate history for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to truncate history for CloneSet", "cloneSet", request)
|
||||
}
|
||||
|
||||
if syncErr == nil && instance.Spec.MinReadySeconds > 0 && newStatus.AvailableReplicas != newStatus.ReadyReplicas {
|
||||
|
@ -452,7 +452,7 @@ func (r *ReconcileCloneSet) getActiveRevisions(cs *appsv1alpha1.CloneSet, revisi
|
|||
} else if equalCount > 0 {
|
||||
lastEqualRevision := equalRevisions[equalCount-1]
|
||||
if !VCTHashEqual(lastEqualRevision, updateRevision) {
|
||||
klog.Infof("revision %v vct hash will be updated from %v to %v", lastEqualRevision.Name, lastEqualRevision.Annotations[volumeclaimtemplate.HashAnnotation], updateRevision.Annotations[volumeclaimtemplate.HashAnnotation])
|
||||
klog.InfoS("Revision vct hash will be updated", "revisionName", lastEqualRevision.Name, "lastRevisionVCTHash", lastEqualRevision.Annotations[volumeclaimtemplate.HashAnnotation], "updateRevisionVCTHash", updateRevision.Annotations[volumeclaimtemplate.HashAnnotation])
|
||||
lastEqualRevision.Annotations[volumeclaimtemplate.HashAnnotation] = updateRevision.Annotations[volumeclaimtemplate.HashAnnotation]
|
||||
}
|
||||
// if the equivalent revision is not immediately prior we will roll back by incrementing the
|
||||
|
@ -661,7 +661,7 @@ func (r *ReconcileCloneSet) cleanupPVCs(
|
|||
if err := r.updateOnePVC(cs, pvc); err != nil && !errors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(3).Infof("Update CloneSet(%s/%s) pvc(%s)'s ownerRef to Pod", cs.Namespace, cs.Name, pvc.Name)
|
||||
klog.V(3).InfoS("Updated CloneSet pvc's ownerRef to Pod", "cloneSet", klog.KObj(cs), "pvc", klog.KObj(pvc))
|
||||
}
|
||||
// delete pvc directly
|
||||
for i := range toDeletePVCs {
|
||||
|
@ -669,7 +669,7 @@ func (r *ReconcileCloneSet) cleanupPVCs(
|
|||
if err := r.deleteOnePVC(cs, pvc); err != nil && !errors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(3).Infof("Delete CloneSet(%s/%s) pvc(%s) directly", cs.Namespace, cs.Name, pvc.Name)
|
||||
klog.V(3).InfoS("Deleted CloneSet pvc directly", "cloneSet", klog.KObj(cs), "pvc", klog.KObj(pvc))
|
||||
}
|
||||
return activePVCs, nil
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (e *podEventHandler) Create(ctx context.Context, evt event.CreateEvent, q w
|
|||
if req == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s/%s created, owner: %s", pod.Namespace, pod.Name, req.Name)
|
||||
klog.V(4).InfoS("Pod created", "pod", klog.KObj(pod), "owner", req)
|
||||
|
||||
isSatisfied, _, _ := clonesetutils.ScaleExpectations.SatisfiedExpectations(req.String())
|
||||
clonesetutils.ScaleExpectations.ObserveScale(req.String(), expectations.Create, pod.Name)
|
||||
|
@ -93,7 +93,7 @@ func (e *podEventHandler) Create(ctx context.Context, evt event.CreateEvent, q w
|
|||
if len(csList) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s/%s created, matched owner: %s", pod.Namespace, pod.Name, e.joinCloneSetNames(csList))
|
||||
klog.V(4).InfoS("Orphan Pod created", "pod", klog.KObj(pod), "owner", e.joinCloneSetNames(csList))
|
||||
for _, cs := range csList {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: cs.GetName(),
|
||||
|
@ -149,7 +149,7 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Pod %s/%s updated, owner: %s", curPod.Namespace, curPod.Name, req.Name)
|
||||
klog.V(4).InfoS("Pod updated", "pod", klog.KObj(curPod), "owner", req)
|
||||
q.Add(*req)
|
||||
return
|
||||
}
|
||||
|
@ -161,8 +161,7 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
if len(csList) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s/%s updated, matched owner: %s",
|
||||
curPod.Namespace, curPod.Name, e.joinCloneSetNames(csList))
|
||||
klog.V(4).InfoS("Orphan Pod updated", "pod", klog.KObj(curPod), "owner", e.joinCloneSetNames(csList))
|
||||
for _, cs := range csList {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: cs.GetName(),
|
||||
|
@ -184,7 +183,7 @@ func (e *podEventHandler) shouldIgnoreUpdate(req *reconcile.Request, oldPod, cur
|
|||
func (e *podEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
pod, ok := evt.Object.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.Errorf("DeleteEvent parse pod failed, DeleteStateUnknown: %#v, obj: %#v", evt.DeleteStateUnknown, evt.Object)
|
||||
klog.ErrorS(nil, "Skipped pod deletion event", "deleteStateUnknown", evt.DeleteStateUnknown, "obj", evt.Object)
|
||||
return
|
||||
}
|
||||
clonesetutils.ResourceVersionExpectations.Delete(pod)
|
||||
|
@ -199,7 +198,7 @@ func (e *podEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q w
|
|||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Pod %s/%s deleted, owner: %s", pod.Namespace, pod.Name, req.Name)
|
||||
klog.V(4).InfoS("Pod deleted", "pod", klog.KObj(pod), "owner", req)
|
||||
clonesetutils.ScaleExpectations.ObserveScale(req.String(), expectations.Delete, pod.Name)
|
||||
q.Add(*req)
|
||||
}
|
||||
|
@ -212,7 +211,7 @@ func resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference
|
|||
// Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType
|
||||
refGV, err := schema.ParseGroupVersion(controllerRef.APIVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not parse OwnerReference %v APIVersion: %v", controllerRef, err)
|
||||
klog.ErrorS(err, "Could not parse APIVersion in OwnerReference", "ownerRef", controllerRef)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -250,8 +249,7 @@ func (e *podEventHandler) getPodCloneSets(pod *v1.Pod) []appsv1alpha1.CloneSet {
|
|||
if len(csMatched) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
klog.Warningf("Error! More than one CloneSet is selecting pod %s/%s : %s",
|
||||
pod.Namespace, pod.Name, e.joinCloneSetNames(csMatched))
|
||||
klog.InfoS("Error! More than one CloneSet is selecting pod", "pod", klog.KObj(pod), "cloneSets", e.joinCloneSetNames(csMatched))
|
||||
}
|
||||
return csMatched
|
||||
}
|
||||
|
@ -303,7 +301,7 @@ func (e *pvcEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
func (e *pvcEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
pvc, ok := evt.Object.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
klog.Errorf("DeleteEvent parse pvc failed, DeleteStateUnknown: %#v, obj: %#v", evt.DeleteStateUnknown, evt.Object)
|
||||
klog.ErrorS(nil, "Skipped pvc deletion event", "deleteStateUnknown", evt.DeleteStateUnknown, "obj", evt.Object)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -46,15 +46,14 @@ func (r *ReconcileCloneSet) createImagePullJobsForInPlaceUpdate(cs *appsv1alpha1
|
|||
|
||||
// ignore if update type is ReCreate
|
||||
if cs.Spec.UpdateStrategy.Type == appsv1alpha1.RecreateCloneSetUpdateStrategyType {
|
||||
klog.V(4).Infof("CloneSet %s/%s skipped to create ImagePullJob for update type is %s",
|
||||
cs.Namespace, cs.Name, cs.Spec.UpdateStrategy.Type)
|
||||
klog.V(4).InfoS("CloneSet skipped to create ImagePullJob for update type is ReCreate", "cloneSet", klog.KObj(cs))
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
// ignore if replicas <= minimumReplicasToPreDownloadImage
|
||||
if *cs.Spec.Replicas <= minimumReplicasToPreDownloadImage {
|
||||
klog.V(4).Infof("CloneSet %s/%s skipped to create ImagePullJob for replicas %d <= %d",
|
||||
cs.Namespace, cs.Name, *cs.Spec.Replicas, minimumReplicasToPreDownloadImage)
|
||||
klog.V(4).InfoS("CloneSet skipped to create ImagePullJob because replicas less than or equal to the minimum threshold",
|
||||
"cloneSet", klog.KObj(cs), "replicas", *cs.Spec.Replicas, "minimumReplicasToPreDownloadImage", minimumReplicasToPreDownloadImage)
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -63,7 +62,7 @@ func (r *ReconcileCloneSet) createImagePullJobsForInPlaceUpdate(cs *appsv1alpha1
|
|||
if cs.Spec.UpdateStrategy.Partition != nil {
|
||||
pValue, err := util.CalculatePartitionReplicas(cs.Spec.UpdateStrategy.Partition, cs.Spec.Replicas)
|
||||
if err != nil {
|
||||
klog.Errorf("CloneSet %s/%s partition value is illegal", cs.Namespace, cs.Name)
|
||||
klog.ErrorS(err, "CloneSet partition value was illegal", "cloneSet", klog.KObj(cs))
|
||||
return err
|
||||
}
|
||||
partition = pValue
|
||||
|
@ -71,8 +70,8 @@ func (r *ReconcileCloneSet) createImagePullJobsForInPlaceUpdate(cs *appsv1alpha1
|
|||
maxUnavailable, _ = intstrutil.GetValueFromIntOrPercent(
|
||||
intstrutil.ValueOrDefault(cs.Spec.UpdateStrategy.MaxUnavailable, intstrutil.FromString(appsv1alpha1.DefaultCloneSetMaxUnavailable)), int(*cs.Spec.Replicas), false)
|
||||
if partition == 0 && maxUnavailable >= int(*cs.Spec.Replicas) {
|
||||
klog.V(4).Infof("CloneSet %s/%s skipped to create ImagePullJob for all Pods update in one batch, replicas=%d, partition=%d, maxUnavailable=%d",
|
||||
cs.Namespace, cs.Name, *cs.Spec.Replicas, partition, maxUnavailable)
|
||||
klog.V(4).InfoS("CloneSet skipped to create ImagePullJob for all Pods update in one batch",
|
||||
"cloneSet", klog.KObj(cs), "replicas", *cs.Spec.Replicas, "partition", partition, "maxUnavailable", maxUnavailable)
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -80,8 +79,8 @@ func (r *ReconcileCloneSet) createImagePullJobsForInPlaceUpdate(cs *appsv1alpha1
|
|||
coreControl := clonesetcore.New(cs)
|
||||
inplaceControl := inplaceupdate.New(r.Client, clonesetutils.RevisionAdapterImpl)
|
||||
if !inplaceControl.CanUpdateInPlace(currentRevision, updateRevision, coreControl.GetUpdateOptions()) {
|
||||
klog.V(4).Infof("CloneSet %s/%s skipped to create ImagePullJob for %s -> %s can not update in-place",
|
||||
cs.Namespace, cs.Name, currentRevision.Name, updateRevision.Name)
|
||||
klog.V(4).InfoS("CloneSet skipped to create ImagePullJob because in-place update was not possible",
|
||||
"cloneSet", klog.KObj(cs), "currentRevision", klog.KObj(currentRevision), "updateRevision", klog.KObj(updateRevision))
|
||||
return r.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -114,20 +113,20 @@ func (r *ReconcileCloneSet) createImagePullJobsForInPlaceUpdate(cs *appsv1alpha1
|
|||
}
|
||||
|
||||
containerImages := diffImagesBetweenRevisions(currentRevision, updateRevision)
|
||||
klog.V(3).Infof("CloneSet %s/%s begin to create ImagePullJobs for revision %s -> %s: %v",
|
||||
cs.Namespace, cs.Name, currentRevision.Name, updateRevision.Name, containerImages)
|
||||
klog.V(3).InfoS("CloneSet began to create ImagePullJobs for the revision changes",
|
||||
"cloneSet", klog.KObj(cs), "currentRevision", klog.KObj(currentRevision), "updateRevision", klog.KObj(updateRevision), "containerImages", containerImages)
|
||||
for name, image := range containerImages {
|
||||
// job name is revision name + container name, it can not be more than 255 characters
|
||||
jobName := fmt.Sprintf("%s-%s", updateRevision.Name, name)
|
||||
err := imagejobutilfunc.CreateJobForWorkload(r.Client, cs, clonesetutils.ControllerKind, jobName, image, labelMap, annotationMap, *selector, pullSecrets)
|
||||
if err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("CloneSet %s/%s failed to create ImagePullJob %s: %v", cs.Namespace, cs.Name, jobName, err)
|
||||
klog.ErrorS(err, "CloneSet failed to create ImagePullJob", "cloneSet", klog.KObj(cs), "jobName", jobName)
|
||||
r.recorder.Eventf(cs, v1.EventTypeNormal, "FailedCreateImagePullJob", "failed to create ImagePullJob %s: %v", jobName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("CloneSet %s/%s created ImagePullJob %s for image: %s", cs.Namespace, cs.Name, jobName, image)
|
||||
klog.V(3).InfoS("CloneSet created ImagePullJob for the image", "cloneSet", klog.KObj(cs), "jobName", jobName, "image", image)
|
||||
r.recorder.Eventf(cs, v1.EventTypeNormal, "CreatedImagePullJob", "created ImagePullJob %s for image: %s", jobName, image)
|
||||
}
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ func (r *realStatusUpdater) UpdateCloneSetStatus(cs *appsv1alpha1.CloneSet, newS
|
|||
if !r.inconsistentStatus(cs, newStatus) {
|
||||
return nil
|
||||
}
|
||||
klog.Infof("To update CloneSet status for %s/%s, replicas=%d ready=%d available=%d updated=%d updatedReady=%d, revisions current=%s update=%s",
|
||||
cs.Namespace, cs.Name, newStatus.Replicas, newStatus.ReadyReplicas, newStatus.AvailableReplicas, newStatus.UpdatedReplicas, newStatus.UpdatedReadyReplicas, newStatus.CurrentRevision, newStatus.UpdateRevision)
|
||||
klog.InfoS("To update CloneSet status", "cloneSet", klog.KObj(cs), "replicas", newStatus.Replicas, "ready", newStatus.ReadyReplicas, "available", newStatus.AvailableReplicas,
|
||||
"updated", newStatus.UpdatedReplicas, "updatedReady", newStatus.UpdatedReadyReplicas, "currentRevision", newStatus.CurrentRevision, "updateRevision", newStatus.UpdateRevision)
|
||||
return r.updateStatus(cs, newStatus)
|
||||
}
|
||||
|
||||
|
|
|
@ -55,10 +55,9 @@ func (r *realControl) Scale(
|
|||
return false, fmt.Errorf("spec.Replicas is nil")
|
||||
}
|
||||
|
||||
controllerKey := clonesetutils.GetControllerKey(updateCS)
|
||||
coreControl := clonesetcore.New(updateCS)
|
||||
if !coreControl.IsReadyToScale() {
|
||||
klog.Warningf("CloneSet %s skip scaling for not ready to scale", controllerKey)
|
||||
klog.InfoS("CloneSet skipped scaling for not ready to scale", "cloneSet", klog.KObj(updateCS))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -83,8 +82,8 @@ func (r *realControl) Scale(
|
|||
// lack number of current version
|
||||
expectedCurrentCreations := diffRes.scaleUpNumOldRevision
|
||||
|
||||
klog.V(3).Infof("CloneSet %s begin to scale out %d pods including %d (current rev)",
|
||||
controllerKey, expectedCreations, expectedCurrentCreations)
|
||||
klog.V(3).InfoS("CloneSet began to scale out pods, including current revision",
|
||||
"cloneSet", klog.KObj(updateCS), "expectedCreations", expectedCreations, "expectedCurrentCreations", expectedCurrentCreations)
|
||||
|
||||
// available instance-id come from free pvc
|
||||
availableIDs := getOrGenAvailableIDs(expectedCreations, pods, pvcs)
|
||||
|
@ -100,7 +99,7 @@ func (r *realControl) Scale(
|
|||
|
||||
// 4. try to delete pods already in pre-delete
|
||||
if len(podsInPreDelete) > 0 {
|
||||
klog.V(3).Infof("CloneSet %s try to delete pods in preDelete: %v", controllerKey, util.GetPodNames(podsInPreDelete).List())
|
||||
klog.V(3).InfoS("CloneSet tried to delete pods in preDelete", "cloneSet", klog.KObj(updateCS), "pods", util.GetPodNames(podsInPreDelete).List())
|
||||
if modified, err := r.deletePods(updateCS, podsInPreDelete, pvcs); err != nil || modified {
|
||||
return modified, err
|
||||
}
|
||||
|
@ -109,8 +108,8 @@ func (r *realControl) Scale(
|
|||
// 5. specified delete
|
||||
if podsToDelete := util.DiffPods(podsSpecifiedToDelete, podsInPreDelete); len(podsToDelete) > 0 {
|
||||
newPodsToDelete, oldPodsToDelete := clonesetutils.GroupUpdateAndNotUpdatePods(podsToDelete, updateRevision)
|
||||
klog.V(3).Infof("CloneSet %s try to delete pods specified. Delete ready limit: %d. New Pods: %v, old Pods: %v.",
|
||||
controllerKey, diffRes.deleteReadyLimit, util.GetPodNames(newPodsToDelete).List(), util.GetPodNames(oldPodsToDelete).List())
|
||||
klog.V(3).InfoS("CloneSet tried to delete pods specified", "cloneSet", klog.KObj(updateCS), "deleteReadyLimit", diffRes.deleteReadyLimit,
|
||||
"newPods", util.GetPodNames(newPodsToDelete).List(), "oldPods", util.GetPodNames(oldPodsToDelete).List())
|
||||
|
||||
podsCanDelete := make([]*v1.Pod, 0, len(podsToDelete))
|
||||
for _, pod := range podsToDelete {
|
||||
|
@ -130,13 +129,13 @@ func (r *realControl) Scale(
|
|||
// 6. scale in
|
||||
if diffRes.scaleDownNum > 0 {
|
||||
if numToDelete > 0 {
|
||||
klog.V(3).Infof("CloneSet %s skip to scale in %d for %d to delete, including %d specified and %d preDelete",
|
||||
controllerKey, diffRes.scaleDownNum, numToDelete, len(podsSpecifiedToDelete), len(podsInPreDelete))
|
||||
klog.V(3).InfoS("CloneSet skipped to scale in for deletion", "cloneSet", klog.KObj(updateCS), "scaleDownNum", diffRes.scaleDownNum,
|
||||
"numToDelete", numToDelete, "specifiedToDelete", len(podsSpecifiedToDelete), "preDelete", len(podsInPreDelete))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("CloneSet %s begin to scale in %d pods including %d (current rev), delete ready limit: %d",
|
||||
controllerKey, diffRes.scaleDownNum, diffRes.scaleDownNumOldRevision, diffRes.deleteReadyLimit)
|
||||
klog.V(3).InfoS("CloneSet began to scale in", "cloneSet", klog.KObj(updateCS), "scaleDownNum", diffRes.scaleDownNum,
|
||||
"oldRevision", diffRes.scaleDownNumOldRevision, "deleteReadyLimit", diffRes.deleteReadyLimit)
|
||||
|
||||
podsPreparingToDelete := r.choosePodsToDelete(updateCS, diffRes.scaleDownNum, diffRes.scaleDownNumOldRevision, notUpdatedPods, updatedPods)
|
||||
podsToDelete := make([]*v1.Pod, 0, len(podsPreparingToDelete))
|
||||
|
@ -174,8 +173,8 @@ func (r *realControl) managePreparingDelete(cs *appsv1alpha1.CloneSet, pods, pod
|
|||
continue
|
||||
}
|
||||
|
||||
klog.V(3).Infof("CloneSet %s cancel deletion of pod %s, and patch lifecycle from PreparingDelete to PreparingNormal",
|
||||
clonesetutils.GetControllerKey(cs), pod.Name)
|
||||
klog.V(3).InfoS("CloneSet canceled deletion of pod and patch lifecycle from PreparingDelete to PreparingNormal",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
if updated, gotPod, err := r.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingNormal, false); err != nil {
|
||||
return modified, err
|
||||
} else if updated {
|
||||
|
@ -273,8 +272,8 @@ func (r *realControl) deletePods(cs *appsv1alpha1.CloneSet, podsToDelete []*v1.P
|
|||
if updated, gotPod, err := r.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingDelete, markPodNotReady); err != nil {
|
||||
return false, err
|
||||
} else if updated {
|
||||
klog.V(3).Infof("CloneSet %s scaling update pod %s lifecycle to PreparingDelete",
|
||||
clonesetutils.GetControllerKey(cs), pod.Name)
|
||||
klog.V(3).InfoS("CloneSet scaling update pod lifecycle to PreparingDelete",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
modified = true
|
||||
clonesetutils.ResourceVersionExpectations.Expect(gotPod)
|
||||
}
|
||||
|
@ -387,7 +386,7 @@ func (r *realControl) choosePodsToDelete(cs *appsv1alpha1.CloneSet, totalDiff in
|
|||
},
|
||||
})
|
||||
} else if diff > len(pods) {
|
||||
klog.Warningf("Diff > len(pods) in choosePodsToDelete func which is not expected.")
|
||||
klog.InfoS("Diff > len(pods) in choosePodsToDelete func which is not expected")
|
||||
return pods
|
||||
}
|
||||
return pods[:diff]
|
||||
|
|
|
@ -97,7 +97,7 @@ func calculateDiffsWithExpectation(cs *appsv1alpha1.CloneSet, pods []*v1.Pod, cu
|
|||
if cs.Spec.UpdateStrategy.Partition != nil {
|
||||
if pValue, err := util.CalculatePartitionReplicas(cs.Spec.UpdateStrategy.Partition, cs.Spec.Replicas); err != nil {
|
||||
// TODO: maybe, we should block pod update if partition settings is wrong
|
||||
klog.Errorf("CloneSet %s/%s partition value is illegal", cs.Namespace, cs.Name)
|
||||
klog.ErrorS(err, "CloneSet partition value was illegal", "cloneSet", klog.KObj(cs))
|
||||
} else {
|
||||
partition = pValue
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func calculateDiffsWithExpectation(cs *appsv1alpha1.CloneSet, pods []*v1.Pod, cu
|
|||
maxSurge, _ = intstrutil.GetValueFromIntOrPercent(cs.Spec.UpdateStrategy.MaxSurge, replicas, true)
|
||||
if cs.Spec.UpdateStrategy.Paused {
|
||||
maxSurge = 0
|
||||
klog.V(3).Infof("Because CloneSet(%s/%s) updateStrategy.paused=true, and Set maxSurge=0", cs.Namespace, cs.Name)
|
||||
klog.V(3).InfoS("Because CloneSet updateStrategy.paused=true, and Set maxSurge=0", "cloneSet", klog.KObj(cs))
|
||||
}
|
||||
}
|
||||
maxUnavailable, _ = intstrutil.GetValueFromIntOrPercent(
|
||||
|
@ -121,16 +121,14 @@ func calculateDiffsWithExpectation(cs *appsv1alpha1.CloneSet, pods []*v1.Pod, cu
|
|||
if res.isEmpty() {
|
||||
return
|
||||
}
|
||||
klog.V(1).Infof("Calculate diffs for CloneSet %s/%s, replicas=%d, partition=%d, maxSurge=%d, maxUnavailable=%d,"+
|
||||
" allPods=%d, newRevisionPods=%d, newRevisionActivePods=%d, oldRevisionPods=%d, oldRevisionActivePods=%d,"+
|
||||
" unavailableNewRevisionCount=%d, unavailableOldRevisionCount=%d, preDeletingNewRevisionCount=%d, preDeletingOldRevisionCount=%d,"+
|
||||
" toDeleteNewRevisionCount=%d, toDeleteOldRevisionCount=%d, enabledPreparingUpdateAsUpdate=%v, useDefaultIsPodUpdate=%v."+
|
||||
" Result: %+v",
|
||||
cs.Namespace, cs.Name, replicas, partition, maxSurge, maxUnavailable,
|
||||
len(pods), newRevisionCount, newRevisionActiveCount, oldRevisionCount, oldRevisionActiveCount,
|
||||
unavailableNewRevisionCount, unavailableOldRevisionCount, preDeletingNewRevisionCount, preDeletingOldRevisionCount,
|
||||
toDeleteNewRevisionCount, toDeleteOldRevisionCount, utilfeature.DefaultFeatureGate.Enabled(features.PreparingUpdateAsUpdate), isPodUpdate == nil,
|
||||
res)
|
||||
klog.V(1).InfoS("Calculate diffs for CloneSet", "cloneSet", klog.KObj(cs), "replicas", replicas, "partition", partition,
|
||||
"maxSurge", maxSurge, "maxUnavailable", maxUnavailable, "allPodCount", len(pods), "newRevisionCount", newRevisionCount,
|
||||
"newRevisionActiveCount", newRevisionActiveCount, "oldrevisionCount", oldRevisionCount, "oldRevisionActiveCount", oldRevisionActiveCount,
|
||||
"unavailableNewRevisionCount", unavailableNewRevisionCount, "unavailableOldRevisionCount", unavailableOldRevisionCount,
|
||||
"preDeletingNewRevisionCount", preDeletingNewRevisionCount, "preDeletingOldRevisionCount", preDeletingOldRevisionCount,
|
||||
"toDeleteNewRevisionCount", toDeleteNewRevisionCount, "toDeleteOldRevisionCount", toDeleteOldRevisionCount,
|
||||
"enabledPreparingUpdateAsUpdate", utilfeature.DefaultFeatureGate.Enabled(features.PreparingUpdateAsUpdate), "useDefaultIsPodUpdate", isPodUpdate == nil,
|
||||
"result", res)
|
||||
}()
|
||||
|
||||
// If PreparingUpdateAsUpdate feature gate is enabled:
|
||||
|
|
|
@ -103,16 +103,16 @@ func (c *realControl) Update(cs *appsv1alpha1.CloneSet,
|
|||
if waitUpdate {
|
||||
switch lifecycle.GetPodLifecycleState(pod) {
|
||||
case appspub.LifecycleStatePreparingDelete:
|
||||
klog.V(3).Infof("CloneSet %s/%s find pod %s in state %s, so skip to update it",
|
||||
cs.Namespace, cs.Name, pod.Name, lifecycle.GetPodLifecycleState(pod))
|
||||
klog.V(3).InfoS("CloneSet found pod in PreparingDelete state, so skipped updating it",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
case appspub.LifecycleStateUpdated:
|
||||
klog.V(3).Infof("CloneSet %s/%s find pod %s in state %s but not in updated revision",
|
||||
cs.Namespace, cs.Name, pod.Name, appspub.LifecycleStateUpdated)
|
||||
klog.V(3).InfoS("CloneSet found pod in Updated state but not in updated revision",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
canUpdate = true
|
||||
default:
|
||||
if gracePeriod, _ := appspub.GetInPlaceUpdateGrace(pod); gracePeriod != "" {
|
||||
klog.V(3).Infof("CloneSet %s/%s find pod %s still in grace period %s, so skip to update it",
|
||||
cs.Namespace, cs.Name, pod.Name, gracePeriod)
|
||||
klog.V(3).InfoS("CloneSet found pod still in grace period, so skipped updating it",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod), "gracePeriod", gracePeriod)
|
||||
} else {
|
||||
canUpdate = true
|
||||
}
|
||||
|
@ -161,8 +161,8 @@ func (c *realControl) refreshPodState(cs *appsv1alpha1.CloneSet, coreControl clo
|
|||
|
||||
res := c.inplaceControl.Refresh(pod, opts)
|
||||
if res.RefreshErr != nil {
|
||||
klog.Errorf("CloneSet %s/%s failed to update pod %s condition for inplace: %v",
|
||||
cs.Namespace, cs.Name, pod.Name, res.RefreshErr)
|
||||
klog.ErrorS(res.RefreshErr, "CloneSet failed to update pod condition for inplace",
|
||||
"cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
return false, 0, res.RefreshErr
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@ func (c *realControl) refreshPodState(cs *appsv1alpha1.CloneSet, coreControl clo
|
|||
return false, 0, err
|
||||
} else if updated {
|
||||
clonesetutils.ResourceVersionExpectations.Expect(gotPod)
|
||||
klog.V(3).Infof("CloneSet %s update pod %s lifecycle to %s", clonesetutils.GetControllerKey(cs), pod.Name, state)
|
||||
klog.V(3).InfoS("CloneSet updated pod lifecycle", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod), "newState", state)
|
||||
return true, res.DelayDuration, nil
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func (c *realControl) fixPodTemplateHashLabel(cs *appsv1alpha1.CloneSet, pod *v1
|
|||
clonesetutils.GetShortHash(pod.Labels[apps.ControllerRevisionHashLabelKey])))
|
||||
pod = pod.DeepCopy()
|
||||
if err := c.Patch(context.TODO(), pod, client.RawPatch(types.StrategicMergePatchType, patch)); err != nil {
|
||||
klog.Warningf("CloneSet %s/%s failed to fix pod-template-hash to Pod %s: %v", cs.Namespace, cs.Name, pod.Name, err)
|
||||
klog.ErrorS(err, "CloneSet failed to fix pod-template-hash", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
return false, err
|
||||
}
|
||||
clonesetutils.ResourceVersionExpectations.Expect(pod)
|
||||
|
@ -259,8 +259,7 @@ func (c *realControl) updatePod(cs *appsv1alpha1.CloneSet, coreControl clonesetc
|
|||
markPodNotReady := cs.Spec.Lifecycle.InPlaceUpdate.MarkPodNotReady
|
||||
if updated, gotPod, err = c.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingUpdate, markPodNotReady); err == nil && updated {
|
||||
clonesetutils.ResourceVersionExpectations.Expect(gotPod)
|
||||
klog.V(3).Infof("CloneSet %s update pod %s lifecycle to PreparingUpdate",
|
||||
clonesetutils.GetControllerKey(cs), pod.Name)
|
||||
klog.V(3).InfoS("CloneSet updated pod lifecycle to PreparingUpdate", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -274,8 +273,7 @@ func (c *realControl) updatePod(cs *appsv1alpha1.CloneSet, coreControl clonesetc
|
|||
}
|
||||
if updated, gotPod, err = c.lifecycleControl.UpdatePodLifecycleWithHandler(pod, appspub.LifecycleStatePreparingUpdate, inPlaceUpdateHandler); err == nil && updated {
|
||||
clonesetutils.ResourceVersionExpectations.Expect(gotPod)
|
||||
klog.V(3).Infof("CloneSet %s update pod %s lifecycle to PreparingUpdate",
|
||||
clonesetutils.GetControllerKey(cs), pod.Name)
|
||||
klog.V(3).InfoS("CloneSet updated pod lifecycle to PreparingUpdate", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
}
|
||||
return 0, err
|
||||
case appspub.LifecycleStatePreparingUpdate:
|
||||
|
@ -305,10 +303,10 @@ func (c *realControl) updatePod(cs *appsv1alpha1.CloneSet, coreControl clonesetc
|
|||
if cs.Spec.UpdateStrategy.Type == appsv1alpha1.InPlaceOnlyCloneSetUpdateStrategyType {
|
||||
return 0, fmt.Errorf("find Pod %s update strategy is InPlaceOnly but can not update in-place", pod.Name)
|
||||
}
|
||||
klog.Warningf("CloneSet %s/%s can not update Pod %s in-place, so it will back off to ReCreate", cs.Namespace, cs.Name, pod.Name)
|
||||
klog.InfoS("CloneSet could not update Pod in-place, so it will back off to ReCreate", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
klog.V(2).Infof("CloneSet %s/%s start to patch Pod %s specified-delete for update %s", cs.Namespace, cs.Name, pod.Name, updateRevision.Name)
|
||||
klog.V(2).InfoS("CloneSet started to patch Pod specified-delete for update", "cloneSet", klog.KObj(cs), "pod", klog.KObj(pod), "updateRevision", klog.KObj(updateRevision))
|
||||
|
||||
if patched, err := specifieddelete.PatchPodSpecifiedDelete(c.Client, pod, "true"); err != nil {
|
||||
c.recorder.Eventf(cs, v1.EventTypeWarning, "FailedUpdatePodReCreate",
|
||||
|
|
|
@ -131,12 +131,12 @@ type ReconcileContainerLaunchPriority struct {
|
|||
|
||||
func (r *ReconcileContainerLaunchPriority) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(3).Infof("Starting to process Pod %v", request.NamespacedName)
|
||||
klog.V(3).InfoS("Starting to process Pod", "pod", request)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process Pod %v, elapsedTime %v, error: %v", request.NamespacedName, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process Pod", "pod", request, "elapsedTime", time.Since(start))
|
||||
} else {
|
||||
klog.Infof("Finish to process Pod %v, elapsedTime %v", request.NamespacedName, time.Since(start))
|
||||
klog.InfoS("Finish to process Pod", "pod", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -163,7 +163,7 @@ func (r *ReconcileContainerLaunchPriority) Reconcile(_ context.Context, request
|
|||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
})
|
||||
klog.V(4).Infof("Creating ConfigMap %s for Pod %s/%s", barrier.Name, pod.Namespace, pod.Name)
|
||||
klog.V(4).InfoS("Creating ConfigMap for Pod", "configMap", klog.KObj(barrier), "pod", klog.KObj(pod))
|
||||
temErr := r.Client.Create(context.TODO(), barrier)
|
||||
if temErr != nil && !errors.IsAlreadyExists(temErr) {
|
||||
return reconcile.Result{}, temErr
|
||||
|
@ -205,13 +205,13 @@ func (r *ReconcileContainerLaunchPriority) handle(pod *v1.Pod, barrier *v1.Confi
|
|||
}
|
||||
|
||||
func (r *ReconcileContainerLaunchPriority) addPriorityIntoBarrier(barrier *v1.ConfigMap, priority int) error {
|
||||
klog.V(3).Infof("Adding priority %d into barrier %s/%s", priority, barrier.Namespace, barrier.Name)
|
||||
klog.V(3).InfoS("Adding priority into barrier", "priority", priority, "barrier", klog.KObj(barrier))
|
||||
body := fmt.Sprintf(`{"data":{"%s":"true"}}`, utilcontainerlaunchpriority.GetKey(priority))
|
||||
return r.Client.Patch(context.TODO(), barrier, client.RawPatch(types.StrategicMergePatchType, []byte(body)))
|
||||
}
|
||||
|
||||
func (r *ReconcileContainerLaunchPriority) patchCompleted(pod *v1.Pod) error {
|
||||
klog.V(3).Infof("Marking pod %s/%s as launch priority completed", pod.Namespace, pod.Name)
|
||||
klog.V(3).InfoS("Marking pod as launch priority completed", "pod", klog.KObj(pod))
|
||||
body := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"true"}}}`, appspub.ContainerLaunchPriorityCompletedKey)
|
||||
return r.Client.Patch(context.TODO(), pod, client.RawPatch(types.StrategicMergePatchType, []byte(body)))
|
||||
}
|
||||
|
|
|
@ -122,14 +122,14 @@ type ReconcileContainerRecreateRequest struct {
|
|||
// and what is in the ContainerRecreateRequest.Spec
|
||||
func (r *ReconcileContainerRecreateRequest) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(3).Infof("Starting to process CRR %v", request.NamespacedName)
|
||||
klog.V(3).InfoS("Starting to process CRR", "containerRecreateRequest", request)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process CRR %v, elapsedTime %v, error: %v", request.NamespacedName, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process CRR", "containerRecreateRequest", request, "elapsedTime", time.Since(start))
|
||||
} else if res.RequeueAfter > 0 {
|
||||
klog.Infof("Finish to process CRR %v, elapsedTime %v, RetryAfter %v", request.NamespacedName, time.Since(start), res.RequeueAfter)
|
||||
klog.InfoS("Finished processing CRR with scheduled retry", "containerRecreateRequest", request, "elapsedTime", time.Since(start), "retryAfter", res.RequeueAfter)
|
||||
} else {
|
||||
klog.Infof("Finish to process CRR %v, elapsedTime %v", request.NamespacedName, time.Since(start))
|
||||
klog.InfoS("Finished processing CRR", "containerRecreateRequest", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -171,7 +171,7 @@ func (r *ReconcileContainerRecreateRequest) Reconcile(_ context.Context, request
|
|||
if crr.Spec.TTLSecondsAfterFinished != nil {
|
||||
leftTime = time.Duration(*crr.Spec.TTLSecondsAfterFinished)*time.Second - time.Since(crr.Status.CompletionTime.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Infof("Deleting CRR %s/%s for ttlSecondsAfterFinished", crr.Namespace, crr.Name)
|
||||
klog.InfoS("Deleting CRR for ttlSecondsAfterFinished", "containerRecreateRequest", klog.KObj(crr))
|
||||
if err = r.Delete(context.TODO(), crr); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("delete CRR error: %v", err)
|
||||
}
|
||||
|
@ -182,8 +182,8 @@ func (r *ReconcileContainerRecreateRequest) Reconcile(_ context.Context, request
|
|||
}
|
||||
|
||||
if errors.IsNotFound(podErr) || pod.DeletionTimestamp != nil || string(pod.UID) != crr.Labels[appsv1alpha1.ContainerRecreateRequestPodUIDKey] {
|
||||
klog.Warningf("Complete CRR %s/%s as failure for Pod %s with UID=%s has gone",
|
||||
crr.Namespace, crr.Name, crr.Spec.PodName, crr.Labels[appsv1alpha1.ContainerRecreateRequestPodUIDKey])
|
||||
klog.InfoS("Completed CRR as failure for Pod has gone",
|
||||
"containerRecreateRequest", klog.KObj(crr), "podName", crr.Spec.PodName, "podUID", crr.Labels[appsv1alpha1.ContainerRecreateRequestPodUIDKey])
|
||||
return reconcile.Result{}, r.completeCRR(crr, "pod has gone")
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ func (r *ReconcileContainerRecreateRequest) Reconcile(_ context.Context, request
|
|||
if crr.Status.Phase == "" {
|
||||
leftTime := responseTimeout - time.Since(crr.CreationTimestamp.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Warningf("Complete CRR %s/%s as failure for daemon has not responded for a long time", crr.Namespace, crr.Name)
|
||||
klog.InfoS("Completed CRR as failure for daemon has not responded for a long time", "containerRecreateRequest", klog.KObj(crr))
|
||||
return reconcile.Result{}, r.completeCRR(crr, "daemon has not responded for a long time")
|
||||
}
|
||||
duration.Update(leftTime)
|
||||
|
@ -203,7 +203,7 @@ func (r *ReconcileContainerRecreateRequest) Reconcile(_ context.Context, request
|
|||
if crr.Spec.ActiveDeadlineSeconds != nil {
|
||||
leftTime := time.Duration(*crr.Spec.ActiveDeadlineSeconds)*time.Second - time.Since(crr.CreationTimestamp.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Warningf("Complete CRR %s/%s as failure for recreating has exceeded the activeDeadlineSeconds", crr.Namespace, crr.Name)
|
||||
klog.InfoS("Completed CRR as failure for recreating has exceeded the activeDeadlineSeconds", "containerRecreateRequest", klog.KObj(crr))
|
||||
return reconcile.Result{}, r.completeCRR(crr, "recreating has exceeded the activeDeadlineSeconds")
|
||||
}
|
||||
duration.Update(leftTime)
|
||||
|
@ -234,7 +234,7 @@ func (r *ReconcileContainerRecreateRequest) syncContainerStatuses(crr *appsv1alp
|
|||
c := &crr.Spec.Containers[i]
|
||||
containerStatus := util.GetContainerStatus(c.Name, pod)
|
||||
if containerStatus == nil {
|
||||
klog.Warningf("Not found %s container in Pod Status for CRR %s/%s", c.Name, crr.Namespace, crr.Name)
|
||||
klog.InfoS("Could not find container in Pod Status for CRR", "containerName", c.Name, "containerRecreateRequest", klog.KObj(crr))
|
||||
continue
|
||||
} else if containerStatus.State.Running == nil || containerStatus.State.Running.StartedAt.Before(&crr.CreationTimestamp) {
|
||||
// ignore non-running and history status
|
||||
|
@ -286,7 +286,8 @@ func (r *ReconcileContainerRecreateRequest) acquirePodNotReady(crr *appsv1alpha1
|
|||
return fmt.Errorf("add Pod not ready error: %v", err)
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("CRR %s/%s can not set Pod %s to not ready, because Pod has no %s readinessGate", crr.Namespace, crr.Name, pod.Name, appspub.KruisePodReadyConditionType)
|
||||
klog.InfoS("CRR could not set Pod to not ready, because Pod has no readinessGate",
|
||||
"containerRecreateRequest", klog.KObj(crr), "pod", klog.KObj(pod), "readinessGate", appspub.KruisePodReadyConditionType)
|
||||
}
|
||||
|
||||
body := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, appsv1alpha1.ContainerRecreateRequestUnreadyAcquiredKey, r.clock.Now().Format(time.RFC3339))
|
||||
|
|
|
@ -68,7 +68,7 @@ func (e *podEventHandler) handle(pod *v1.Pod, q workqueue.RateLimitingInterface)
|
|||
crrList := &appsv1alpha1.ContainerRecreateRequestList{}
|
||||
err := e.List(context.TODO(), crrList, client.InNamespace(pod.Namespace), client.MatchingLabels{appsv1alpha1.ContainerRecreateRequestPodUIDKey: string(pod.UID)})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get CRR List for Pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Failed to get CRR List for Pod", "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
for i := range crrList.Items {
|
||||
|
|
|
@ -73,7 +73,7 @@ func SetupWithManager(m manager.Manager) error {
|
|||
for _, f := range controllerAddFuncs {
|
||||
if err := f(m); err != nil {
|
||||
if kindMatchErr, ok := err.(*meta.NoKindMatchError); ok {
|
||||
klog.Infof("CRD %v is not installed, its controller will perform noops!", kindMatchErr.GroupKind)
|
||||
klog.InfoS("CRD is not installed, its controller will perform noops!", "CRD", kindMatchErr.GroupKind)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -220,7 +220,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
err = c.Watch(source.Kind(mgr.GetCache(), &appsv1alpha1.DaemonSet{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
ds := e.Object.(*appsv1alpha1.DaemonSet)
|
||||
klog.V(4).Infof("Adding DaemonSet %s/%s", ds.Namespace, ds.Name)
|
||||
klog.V(4).InfoS("Adding DaemonSet", "daemonSet", klog.KObj(ds))
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
|
@ -229,12 +229,12 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
if oldDS.UID != newDS.UID {
|
||||
dsc.expectations.DeleteExpectations(logger, keyFunc(oldDS))
|
||||
}
|
||||
klog.V(4).Infof("Updating DaemonSet %s/%s", newDS.Namespace, newDS.Name)
|
||||
klog.V(4).InfoS("Updating DaemonSet", "daemonSet", klog.KObj(newDS))
|
||||
return true
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
ds := e.Object.(*appsv1alpha1.DaemonSet)
|
||||
klog.V(4).Infof("Deleting DaemonSet %s/%s", ds.Namespace, ds.Name)
|
||||
klog.V(4).InfoS("Deleting DaemonSet", "daemonSet", klog.KObj(ds))
|
||||
dsc.expectations.DeleteExpectations(logger, keyFunc(ds))
|
||||
newPodForDSCache.Delete(ds.UID)
|
||||
return true
|
||||
|
@ -258,7 +258,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
|
||||
// TODO: Do we need to watch ControllerRevision?
|
||||
|
||||
klog.V(4).Info("finished to add daemonset-controller")
|
||||
klog.V(4).InfoS("Finished to add daemonset-controller")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -313,12 +313,12 @@ func (dsc *ReconcileDaemonSet) Reconcile(ctx context.Context, request reconcile.
|
|||
defer func() {
|
||||
if retErr == nil {
|
||||
if res.Requeue || res.RequeueAfter > 0 {
|
||||
klog.Infof("Finished syncing DaemonSet %s, cost %v, result: %v", request, time.Since(startTime), res)
|
||||
klog.InfoS("Finished syncing DaemonSet", "daemonSet", request, "cost", time.Since(startTime), "result", res)
|
||||
} else {
|
||||
klog.Infof("Finished syncing DaemonSet %s, cost %v", request, time.Since(startTime))
|
||||
klog.InfoS("Finished syncing DaemonSet", "daemonSet", request, "cost", time.Since(startTime))
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("Failed syncing DaemonSet %s: %v", request, retErr)
|
||||
klog.ErrorS(retErr, "Failed syncing DaemonSet", "daemonSet", request)
|
||||
}
|
||||
// clean the duration store
|
||||
_ = durationStore.Pop(request.String())
|
||||
|
@ -369,7 +369,7 @@ func (dsc *ReconcileDaemonSet) syncDaemonSet(ctx context.Context, request reconc
|
|||
ds, err := dsc.dsLister.DaemonSets(request.Namespace).Get(request.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("DaemonSet has been deleted %s", dsKey)
|
||||
klog.V(4).InfoS("DaemonSet has been deleted", "daemonSet", request)
|
||||
dsc.expectations.DeleteExpectations(logger, dsKey)
|
||||
return nil
|
||||
}
|
||||
|
@ -414,19 +414,19 @@ func (dsc *ReconcileDaemonSet) syncDaemonSet(ctx context.Context, request reconc
|
|||
minUpdatedReadyPodsIntStr := intstrutil.Parse(minUpdatedReadyPods)
|
||||
minUpdatedReadyPodsCount, err = intstrutil.GetScaledValueFromIntOrPercent(&minUpdatedReadyPodsIntStr, int(ds.Status.DesiredNumberScheduled), true)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to GetScaledValueFromIntOrPercent of minUpdatedReadyPods for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to GetScaledValueFromIntOrPercent of minUpdatedReadyPods for DaemonSet", "daemonSet", request)
|
||||
}
|
||||
}
|
||||
// todo: check whether the updatedReadyPodsCount greater than minUpdatedReadyPodsCount
|
||||
_ = minUpdatedReadyPodsCount
|
||||
// pre-download images for new revision
|
||||
if err := dsc.createImagePullJobsForInPlaceUpdate(ds, old, cur); err != nil {
|
||||
klog.Errorf("Failed to create ImagePullJobs for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to create ImagePullJobs for DaemonSet", "daemonSet", request)
|
||||
}
|
||||
} else {
|
||||
// delete ImagePullJobs if revisions have been consistent
|
||||
if err := imagejobutilfunc.DeleteJobsForWorkload(dsc.Client, ds); err != nil {
|
||||
klog.Errorf("Failed to delete imagepulljobs for %s: %v", request, err)
|
||||
klog.ErrorS(err, "Failed to delete ImagePullJobs for DaemonSet", "daemonSet", request)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -611,16 +611,16 @@ func (dsc *ReconcileDaemonSet) storeDaemonSetStatus(
|
|||
toUpdate.Status.DaemonSetHash = hash
|
||||
|
||||
if _, updateErr = dsClient.UpdateStatus(ctx, toUpdate, metav1.UpdateOptions{}); updateErr == nil {
|
||||
klog.Infof("Updated DaemonSet %s/%s status to %v", ds.Namespace, ds.Name, kruiseutil.DumpJSON(toUpdate.Status))
|
||||
klog.InfoS("Updated DaemonSet status", "daemonSet", klog.KObj(ds), "status", kruiseutil.DumpJSON(toUpdate.Status))
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Errorf("Update DaemonSet status %v failed: %v", ds.Status, updateErr)
|
||||
klog.ErrorS(updateErr, "Failed to Update DaemonSet status", "daemonSet", klog.KObj(ds), "status", ds.Status)
|
||||
// Update the set with the latest resource version for the next poll
|
||||
if toUpdate, getErr = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}); getErr != nil {
|
||||
// If the GET fails we can't trust status.Replicas anymore. This error
|
||||
// is bound to be more interesting than the update failure.
|
||||
klog.Errorf("Get DaemonSet %v for status update failed: %v", ds.Name, getErr)
|
||||
klog.ErrorS(getErr, "Failed to get DaemonSet for status update", "daemonSet", klog.KObj(ds))
|
||||
return getErr
|
||||
}
|
||||
return updateErr
|
||||
|
@ -706,7 +706,7 @@ func (dsc *ReconcileDaemonSet) syncNodes(ctx context.Context, ds *appsv1alpha1.D
|
|||
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
|
||||
errCh := make(chan error, createDiff+deleteDiff)
|
||||
|
||||
klog.V(4).Infof("Nodes needing daemon pods for DaemonSet %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
klog.V(4).InfoS("Nodes needing daemon pods for DaemonSet", "daemonSet", klog.KObj(ds), "nodes", nodesNeedingDaemonPods, "count", createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
|
@ -765,7 +765,7 @@ func (dsc *ReconcileDaemonSet) syncNodes(ctx context.Context, ds *appsv1alpha1.D
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).InfoS("Failed creation, decrementing expectations for DaemonSet", "daemonSet", klog.KObj(ds))
|
||||
dsc.expectations.CreationObserved(logger, dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
|
@ -776,7 +776,7 @@ func (dsc *ReconcileDaemonSet) syncNodes(ctx context.Context, ds *appsv1alpha1.D
|
|||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := createDiff - (batchSize + pos)
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
klog.V(2).InfoS("Slow-start failure. Skipping creation of pods, decrementing expectations for DaemonSet", "skippedPodCount", skippedPods, "daemonSet", klog.KObj(ds))
|
||||
dsc.expectations.LowerExpectations(logger, dsKey, skippedPods, 0)
|
||||
// The skipped pods will be retried later. The next controller resync will
|
||||
// retry the slow start process.
|
||||
|
@ -784,7 +784,7 @@ func (dsc *ReconcileDaemonSet) syncNodes(ctx context.Context, ds *appsv1alpha1.D
|
|||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Pods to delete for DaemonSet %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
klog.V(4).InfoS("Pods to delete for DaemonSet", "daemonSet", klog.KObj(ds), "podsToDelete", podsToDelete, "count", deleteDiff)
|
||||
deleteWait := sync.WaitGroup{}
|
||||
deleteWait.Add(deleteDiff)
|
||||
for i := 0; i < deleteDiff; i++ {
|
||||
|
@ -793,7 +793,7 @@ func (dsc *ReconcileDaemonSet) syncNodes(ctx context.Context, ds *appsv1alpha1.D
|
|||
if err := dsc.podControl.DeletePod(ctx, ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
dsc.expectations.DeletionObserved(logger, dsKey)
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Failed deletion, decremented expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).InfoS("Failed deletion, decremented expectations for DaemonSet", "daemonSet", klog.KObj(ds))
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
@ -827,7 +827,7 @@ func (dsc *ReconcileDaemonSet) syncWithPreparingDelete(ds *appsv1alpha1.DaemonSe
|
|||
if updated, gotPod, err := dsc.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingDelete, markPodNotReady); err != nil {
|
||||
return nil, err
|
||||
} else if updated {
|
||||
klog.V(3).Infof("DaemonSet %s/%s has marked Pod %s as PreparingDelete", ds.Namespace, ds.Name, podName)
|
||||
klog.V(3).InfoS("DaemonSet has marked Pod as PreparingDelete", "daemonSet", klog.KObj(ds), "podName", podName)
|
||||
dsc.resourceVersionExpectations.Expect(gotPod)
|
||||
}
|
||||
}
|
||||
|
@ -869,21 +869,21 @@ func (dsc *ReconcileDaemonSet) podsShouldBeOnNode(
|
|||
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
|
||||
if inBackoff {
|
||||
delay := dsc.failedPodsBackoff.Get(backoffKey)
|
||||
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
|
||||
pod.Namespace, pod.Name, node.Name, delay)
|
||||
klog.V(4).InfoS("Deleting failed pod on node has been limited by backoff",
|
||||
"pod", klog.KObj(pod), "nodeName", node.Name, "backoffDelay", delay)
|
||||
durationStore.Push(keyFunc(ds), delay)
|
||||
continue
|
||||
}
|
||||
|
||||
dsc.failedPodsBackoff.Next(backoffKey, now)
|
||||
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(2).Infof(msg)
|
||||
klog.V(2).InfoS("Found failed daemon pod on node, will try to kill it", "pod", klog.KObj(pod), "nodeName", node.Name)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, corev1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
dsc.eventRecorder.Eventf(ds, corev1.EventTypeWarning, FailedDaemonPodReason,
|
||||
fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name))
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
} else if isPodPreDeleting(pod) {
|
||||
klog.V(3).Infof("Found daemon pod %s/%s on node %s is in PreparingDelete state, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(3).InfoS("Found daemon pod on node is in PreparingDelete state, will try to kill it", "pod", klog.KObj(pod), "nodeName", node.Name)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
|
@ -935,10 +935,12 @@ func (dsc *ReconcileDaemonSet) podsShouldBeOnNode(
|
|||
if oldestNewPod != nil && oldestOldPod != nil {
|
||||
switch {
|
||||
case !podutil.IsPodReady(oldestOldPod):
|
||||
klog.V(5).Infof("Pod %s/%s from daemonset %s is no longer ready and will be replaced with newer pod %s", oldestOldPod.Namespace, oldestOldPod.Name, ds.Name, oldestNewPod.Name)
|
||||
klog.V(5).InfoS("Pod from DaemonSet is no longer ready and will be replaced with newer pod",
|
||||
"daemonSet", klog.KObj(ds), "oldestOldPod", klog.KObj(oldestOldPod), "oldestNewPod", klog.KObj(oldestNewPod))
|
||||
podsToDelete = append(podsToDelete, oldestOldPod.Name)
|
||||
case podutil.IsPodAvailable(oldestNewPod, ds.Spec.MinReadySeconds, metav1.Time{Time: dsc.failedPodsBackoff.Clock.Now()}):
|
||||
klog.V(5).Infof("Pod %s/%s from daemonset %s is now ready and will replace older pod %s", oldestNewPod.Namespace, oldestNewPod.Name, ds.Name, oldestOldPod.Name)
|
||||
klog.V(5).InfoS("Pod from DaemonSet is now ready and will replace older pod",
|
||||
"daemonSet", klog.KObj(ds), "oldestOldPod", klog.KObj(oldestOldPod), "oldestNewPod", klog.KObj(oldestNewPod))
|
||||
podsToDelete = append(podsToDelete, oldestOldPod.Name)
|
||||
case podutil.IsPodReady(oldestNewPod) && ds.Spec.MinReadySeconds > 0:
|
||||
durationStore.Push(keyFunc(ds), podAvailableWaitingTime(oldestNewPod, ds.Spec.MinReadySeconds, dsc.failedPodsBackoff.Clock.Now()))
|
||||
|
@ -951,7 +953,7 @@ func (dsc *ReconcileDaemonSet) podsShouldBeOnNode(
|
|||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
klog.V(5).Infof("If daemon pod isn't supposed to run on node %s, but it is, delete daemon pod %s/%s on node.", node.Name, pod.Namespace, pod.Name)
|
||||
klog.V(5).InfoS("If daemon pod isn't supposed to run on node, but it is, delete daemon pod on node.", "nodeName", node.Name, "pod", klog.KObj(pod))
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
|
@ -973,7 +975,7 @@ func (dsc *ReconcileDaemonSet) getNodesToDaemonPods(ctx context.Context, ds *app
|
|||
for _, pod := range claimedPods {
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v", pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
klog.InfoS("Failed to get target node name of Pod in DaemonSet", "pod", klog.KObj(pod), "daemonSet", klog.KObj(ds))
|
||||
continue
|
||||
}
|
||||
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
||||
|
@ -1066,7 +1068,7 @@ func (dsc *ReconcileDaemonSet) refreshUpdateStates(ctx context.Context, ds *apps
|
|||
}
|
||||
res := dsc.inplaceControl.Refresh(pod, opts)
|
||||
if res.RefreshErr != nil {
|
||||
klog.Errorf("DaemonSet %s/%s failed to update pod %s condition for inplace: %v", ds.Namespace, ds.Name, pod.Name, res.RefreshErr)
|
||||
klog.ErrorS(res.RefreshErr, "DaemonSet failed to update pod condition for inplace", "daemonSet", klog.KObj(ds), "pod", klog.KObj(pod))
|
||||
return res.RefreshErr
|
||||
}
|
||||
if res.DelayDuration != 0 {
|
||||
|
@ -1081,7 +1083,7 @@ func (dsc *ReconcileDaemonSet) hasPodExpectationsSatisfied(ctx context.Context,
|
|||
dsKey := keyFunc(ds)
|
||||
pods, err := dsc.getDaemonPods(ctx, ds)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get pods for DaemonSet")
|
||||
klog.ErrorS(err, "Failed to get pods for DaemonSet")
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -1089,9 +1091,10 @@ func (dsc *ReconcileDaemonSet) hasPodExpectationsSatisfied(ctx context.Context,
|
|||
dsc.resourceVersionExpectations.Observe(pod)
|
||||
if isSatisfied, unsatisfiedDuration := dsc.resourceVersionExpectations.IsSatisfied(pod); !isSatisfied {
|
||||
if unsatisfiedDuration >= kruiseExpectations.ExpectationTimeout {
|
||||
klog.Errorf("Expectation unsatisfied resourceVersion overtime for %v, wait for pod %v updating, timeout=%v", dsKey, pod.Name, unsatisfiedDuration)
|
||||
klog.ErrorS(nil, "Expectation unsatisfied resourceVersion overtime for DaemonSet, wait for pod updating",
|
||||
"daemonSet", klog.KObj(ds), "pod", klog.KObj(pod), "timeout", unsatisfiedDuration)
|
||||
} else {
|
||||
klog.V(5).Infof("Not satisfied resourceVersion for %v, wait for pod %v updating", dsKey, pod.Name)
|
||||
klog.V(5).InfoS("Not satisfied resourceVersion for DaemonSet, wait for pod updating", "daemonSet", klog.KObj(ds), "pod", klog.KObj(pod))
|
||||
durationStore.Push(dsKey, kruiseExpectations.ExpectationTimeout-unsatisfiedDuration)
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -369,7 +369,7 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *appsv1al
|
|||
}
|
||||
sort.Strings(lines)
|
||||
for _, line := range lines {
|
||||
klog.Info(line)
|
||||
klog.InfoS(line)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ func (e *podEventHandler) Create(ctx context.Context, evt event.CreateEvent, q w
|
|||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s/%s added.", pod.Namespace, pod.Name)
|
||||
klog.V(4).InfoS("Pod added", "pod", klog.KObj(pod))
|
||||
e.expectations.CreationObserved(logger, keyFunc(ds))
|
||||
enqueueDaemonSet(q, ds)
|
||||
return
|
||||
|
@ -83,7 +83,7 @@ func (e *podEventHandler) Create(ctx context.Context, evt event.CreateEvent, q w
|
|||
if len(dsList) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s/%s created, matched owner: %s", pod.Namespace, pod.Name, joinDaemonSetNames(dsList))
|
||||
klog.V(4).InfoS("Orphan Pod created", "pod", klog.KObj(pod), "owner", joinDaemonSetNames(dsList))
|
||||
for _, ds := range dsList {
|
||||
enqueueDaemonSet(q, ds)
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s/%s updated, owner: %s", curPod.Namespace, curPod.Name, ds.Name)
|
||||
klog.V(4).InfoS("Pod updated", "pod", klog.KObj(curPod), "owner", klog.KObj(ds))
|
||||
enqueueDaemonSet(q, ds)
|
||||
return
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
if len(dsList) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s/%s updated, matched owner: %s", curPod.Namespace, curPod.Name, joinDaemonSetNames(dsList))
|
||||
klog.V(4).InfoS("Orphan Pod updated", "pod", klog.KObj(curPod), "owner", joinDaemonSetNames(dsList))
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
for _, ds := range dsList {
|
||||
|
@ -154,7 +154,7 @@ func (e *podEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q w
|
|||
func (e *podEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
pod, ok := evt.Object.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.Errorf("DeleteEvent parse pod failed, DeleteStateUnknown: %#v, obj: %#v", evt.DeleteStateUnknown, evt.Object)
|
||||
klog.ErrorS(nil, "Skipped pod deletion event", "deleteStateUnknown", evt.DeleteStateUnknown, "object", evt.Object)
|
||||
return
|
||||
}
|
||||
e.deletePod(ctx, pod, q, true)
|
||||
|
@ -177,9 +177,9 @@ func (e *podEventHandler) deletePod(ctx context.Context, pod *v1.Pod, q workqueu
|
|||
}
|
||||
if isDeleted {
|
||||
e.deletionUIDCache.Delete(pod.UID)
|
||||
klog.V(4).Infof("Pod %s/%s deleted, owner: %s", pod.Namespace, pod.Name, ds.Name)
|
||||
klog.V(4).InfoS("Pod deleted", "pod", klog.KObj(pod), "owner", klog.KObj(ds))
|
||||
} else {
|
||||
klog.V(4).Infof("Pod %s/%s terminating, owner: %s", pod.Namespace, pod.Name, ds.Name)
|
||||
klog.V(4).InfoS("Pod terminating", "pod", klog.KObj(pod), "owner", klog.KObj(ds))
|
||||
}
|
||||
enqueueDaemonSet(q, ds)
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func (e *podEventHandler) getPodDaemonSets(pod *v1.Pod) []*appsv1alpha1.DaemonSe
|
|||
if len(dsMatched) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
klog.Warningf("Error! More than one DaemonSet is selecting pod %s/%s : %s", pod.Namespace, pod.Name, joinDaemonSetNames(dsMatched))
|
||||
klog.InfoS("Error! More than one DaemonSet is selecting pod", "pod", klog.KObj(pod), "daemonSets", joinDaemonSetNames(dsMatched))
|
||||
}
|
||||
return dsMatched
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func (e *nodeEventHandler) Create(ctx context.Context, evt event.CreateEvent, q
|
|||
dsList := &appsv1alpha1.DaemonSetList{}
|
||||
err := e.reader.List(context.TODO(), dsList)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
klog.V(4).ErrorS(err, "Error enqueueing DaemonSets")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ func (e *nodeEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q
|
|||
dsList := &appsv1alpha1.DaemonSetList{}
|
||||
err := e.reader.List(context.TODO(), dsList)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
klog.V(4).ErrorS(err, "Error listing DaemonSets")
|
||||
return
|
||||
}
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
|
@ -276,7 +276,7 @@ func (e *nodeEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q
|
|||
currentShouldRun, currentShouldContinueRunning := nodeShouldRunDaemonPod(curNode, ds)
|
||||
if (oldShouldRun != currentShouldRun) || (oldShouldContinueRunning != currentShouldContinueRunning) ||
|
||||
(NodeShouldUpdateBySelector(oldNode, ds) != NodeShouldUpdateBySelector(curNode, ds)) {
|
||||
klog.V(6).Infof("update node: %s triggers DaemonSet %s/%s to reconcile.", curNode.Name, ds.GetNamespace(), ds.GetName())
|
||||
klog.V(6).InfoS("Update node triggers DaemonSet to reconcile", "nodeName", curNode.Name, "daemonSet", klog.KObj(ds))
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: ds.GetName(),
|
||||
Namespace: ds.GetNamespace(),
|
||||
|
|
|
@ -226,7 +226,7 @@ func (dsc *ReconcileDaemonSet) snapshot(ctx context.Context, ds *appsv1alpha1.Da
|
|||
if updateErr != nil {
|
||||
return nil, updateErr
|
||||
}
|
||||
klog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
klog.V(2).InfoS("Found a hash collision for DaemonSet - bumping collisionCount to resolve it", "daemonSet", klog.KObj(ds), "collisionCount", *currDS.Status.CollisionCount)
|
||||
return nil, outerErr
|
||||
}
|
||||
return history, err
|
||||
|
|
|
@ -52,8 +52,8 @@ func (dsc *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alp
|
|||
maxUnavailable, _ = intstrutil.GetValueFromIntOrPercent(
|
||||
intstrutil.ValueOrDefault(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, intstrutil.FromInt(1)), dsPodsNumber, false)
|
||||
if partition == 0 && maxUnavailable >= dsPodsNumber {
|
||||
klog.V(4).Infof("DaemonSet %s/%s skipped to create ImagePullJob for all Pods update in one batch, replicas=%d, partition=%d, maxUnavailable=%d",
|
||||
ds.Namespace, ds.Name, dsPodsNumber, partition, maxUnavailable)
|
||||
klog.V(4).InfoS("DaemonSet skipped to create ImagePullJob for all Pods update in one batch",
|
||||
"daemonSet", klog.KObj(ds), "replicas", dsPodsNumber, "partition", partition, "maxUnavailable", maxUnavailable)
|
||||
return dsc.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -85,20 +85,20 @@ func (dsc *ReconcileDaemonSet) createImagePullJobsForInPlaceUpdate(ds *appsv1alp
|
|||
}
|
||||
|
||||
containerImages := diffImagesBetweenRevisions(oldRevisions, updateRevision)
|
||||
klog.V(3).Infof("DaemonSet %s/%s begin to create ImagePullJobs for revision %s: %v",
|
||||
ds.Namespace, ds.Name, updateRevision.Name, containerImages)
|
||||
klog.V(3).InfoS("DaemonSet begin to create ImagePullJobs for revision",
|
||||
"daemonSet", klog.KObj(ds), "revision", klog.KObj(updateRevision), "containerImageNames", containerImages)
|
||||
for name, image := range containerImages {
|
||||
// job name is revision name + container name, it can not be more than 255 characters
|
||||
jobName := fmt.Sprintf("%s-%s", updateRevision.Name, name)
|
||||
err := imagejobutilfunc.CreateJobForWorkload(dsc.Client, ds, controllerKind, jobName, image, labelMap, annotationMap, *selector, pullSecrets)
|
||||
if err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("DaemonSet %s/%s failed to create ImagePullJob %s: %v", ds.Namespace, ds.Name, jobName, err)
|
||||
klog.ErrorS(err, "DaemonSet failed to create ImagePullJob", "daemonSet", klog.KObj(ds), "jobName", jobName)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedCreateImagePullJob", "failed to create ImagePullJob %s: %v", jobName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("DaemonSet %s/%s created ImagePullJob %s for image: %s", ds.Namespace, ds.Name, jobName, image)
|
||||
klog.V(3).InfoS("DaemonSet created ImagePullJob for image", "daemonSet", klog.KObj(ds), "jobName", jobName, "image", image)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "CreatedImagePullJob", "created ImagePullJob %s for image: %s", jobName, image)
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
newPod, oldPod, ok := findUpdatedPodsOnNode(ds, pods, hash)
|
||||
if !ok {
|
||||
// let the manage loop clean up this node, and treat it as an unavailable node
|
||||
klog.V(3).Infof("DaemonSet %s/%s has excess pods on node %s, skipping to allow the core loop to process", ds.Namespace, ds.Name, nodeName)
|
||||
klog.V(3).InfoS("DaemonSet had excess pods on node, skipped to allow the core loop to process", "daemonSet", klog.KObj(ds), "nodeName", nodeName)
|
||||
numUnavailable++
|
||||
continue
|
||||
}
|
||||
|
@ -87,25 +87,25 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
case isPodNilOrPreDeleting(oldPod) && isPodNilOrPreDeleting(newPod), !isPodNilOrPreDeleting(oldPod) && !isPodNilOrPreDeleting(newPod):
|
||||
// the manage loop will handle creating or deleting the appropriate pod, consider this unavailable
|
||||
numUnavailable++
|
||||
klog.V(5).Infof("DaemonSet %s/%s find no pods (or pre-deleting) on node %s ", ds.Namespace, ds.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet found no pods (or pre-deleting) on node", "daemonSet", klog.KObj(ds), "nodeName", nodeName)
|
||||
case newPod != nil:
|
||||
// this pod is up to date, check its availability
|
||||
if !podutil.IsPodAvailable(newPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) {
|
||||
// an unavailable new pod is counted against maxUnavailable
|
||||
numUnavailable++
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is new and unavailable", ds.Namespace, ds.Name, newPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was new and unavailable", "daemonSet", klog.KObj(ds), "pod", klog.KObj(newPod), "nodeName", nodeName)
|
||||
}
|
||||
if isPodPreDeleting(newPod) {
|
||||
// a pre-deleting new pod is counted against maxUnavailable
|
||||
numUnavailable++
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is pre-deleting", ds.Namespace, ds.Name, newPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was pre-deleting", "daemonSet", klog.KObj(ds), "pod", klog.KObj(newPod), "nodeName", nodeName)
|
||||
}
|
||||
default:
|
||||
// this pod is old, it is an update candidate
|
||||
switch {
|
||||
case !podutil.IsPodAvailable(oldPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}):
|
||||
// the old pod isn't available, so it needs to be replaced
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is out of date and not available, allowing replacement", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was out of date and not available, allowed replacement", "daemonSet", klog.KObj(ds), "pod", klog.KObj(oldPod), "nodeName", nodeName)
|
||||
// record the replacement
|
||||
if allowedReplacementPods == nil {
|
||||
allowedReplacementPods = make([]string, 0, len(nodeToDaemonPods))
|
||||
|
@ -115,7 +115,7 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
// no point considering any other candidates
|
||||
continue
|
||||
default:
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is out of date, this is a candidate to replace", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was out of date, it was a candidate to replace", "daemonSet", klog.KObj(ds), "pod", klog.KObj(oldPod), "nodeName", nodeName)
|
||||
// record the candidate
|
||||
if candidatePodsToDelete == nil {
|
||||
candidatePodsToDelete = make([]string, 0, maxUnavailable)
|
||||
|
@ -126,7 +126,8 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
}
|
||||
|
||||
// use any of the candidates we can, including the allowedReplacemnntPods
|
||||
klog.V(5).Infof("DaemonSet %s/%s allowing %d replacements, up to %d unavailable, %d new are unavailable, %d candidates", ds.Namespace, ds.Name, len(allowedReplacementPods), maxUnavailable, numUnavailable, len(candidatePodsToDelete))
|
||||
klog.V(5).InfoS("DaemonSet allowing replacements, including some new unavailable and candidate pods, up to maxUnavailable",
|
||||
"daemonSet", klog.KObj(ds), "allowedReplacementPodCount", len(allowedReplacementPods), "maxUnavailable", maxUnavailable, "numUnavailable", numUnavailable, "candidatePodsToDeleteCount", len(candidatePodsToDelete))
|
||||
remainingUnavailable := maxUnavailable - numUnavailable
|
||||
if remainingUnavailable < 0 {
|
||||
remainingUnavailable = 0
|
||||
|
@ -169,7 +170,7 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
newPod, oldPod, ok := findUpdatedPodsOnNode(ds, pods, hash)
|
||||
if !ok {
|
||||
// let the manage loop clean up this node, and treat it as a surge node
|
||||
klog.V(3).Infof("DaemonSet %s/%s has excess pods on node %s, skipping to allow the core loop to process", ds.Namespace, ds.Name, nodeName)
|
||||
klog.V(3).InfoS("DaemonSet has excess pods on node, skipping to allow the core loop to process", "daemonSet", klog.KObj(ds), "nodeName", nodeName)
|
||||
numSurge++
|
||||
continue
|
||||
}
|
||||
|
@ -181,7 +182,7 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
switch {
|
||||
case !podutil.IsPodAvailable(oldPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}):
|
||||
// the old pod isn't available, allow it to become a replacement
|
||||
klog.V(5).Infof("DaemonSet %s/%s Pod %s on node %s is out of date and not available, allowing replacement", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet Pod on node was out of date and not available, allowed replacement", "daemonSet", klog.KObj(ds), "pod", klog.KObj(oldPod), "nodeName", nodeName)
|
||||
// record the replacement
|
||||
if allowedNewNodes == nil {
|
||||
allowedNewNodes = make([]string, 0, len(nodeToDaemonPods))
|
||||
|
@ -191,7 +192,7 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
// no point considering any other candidates
|
||||
continue
|
||||
default:
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is out of date, this is a surge candidate", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was out of date, so it was a surge candidate", "daemonSet", klog.KObj(ds), "pod", klog.KObj(oldPod), "nodeName", nodeName)
|
||||
// record the candidate
|
||||
if candidateNewNodes == nil {
|
||||
candidateNewNodes = make([]string, 0, maxSurge)
|
||||
|
@ -210,13 +211,14 @@ func (dsc *ReconcileDaemonSet) rollingUpdate(ctx context.Context, ds *appsv1alph
|
|||
continue
|
||||
}
|
||||
// we're available, delete the old pod
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is available, remove %s", ds.Namespace, ds.Name, newPod.Name, nodeName, oldPod.Name)
|
||||
klog.V(5).InfoS("DaemonSet pod on node was available, removed old pod", "daemonSet", klog.KObj(ds), "newPod", klog.KObj(newPod), "oldPod", klog.KObj(oldPod), "nodeName", nodeName)
|
||||
oldPodsToDelete = append(oldPodsToDelete, oldPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// use any of the candidates we can, including the allowedNewNodes
|
||||
klog.V(5).Infof("DaemonSet %s/%s allowing %d replacements, surge up to %d, %d are in progress, %d candidates", ds.Namespace, ds.Name, len(allowedNewNodes), maxSurge, numSurge, len(candidateNewNodes))
|
||||
klog.V(5).InfoS("DaemonSet allowing replacements, surge up to maxSurge",
|
||||
"daemonSet", klog.KObj(ds), "allowedNewNodeCount", len(allowedNewNodes), "maxSurge", maxSurge, "inProgressCount", numSurge, "candidateCount", len(candidateNewNodes))
|
||||
remainingSurge := maxSurge - numSurge
|
||||
if remainingSurge < 0 {
|
||||
remainingSurge = 0
|
||||
|
@ -259,10 +261,10 @@ func (dsc *ReconcileDaemonSet) updatedDesiredNodeCounts(ds *appsv1alpha1.DaemonS
|
|||
// if the daemonset returned with an impossible configuration, obey the default of unavailable=1 (in the
|
||||
// event the apiserver returns 0 for both surge and unavailability)
|
||||
if desiredNumberScheduled > 0 && maxUnavailable == 0 && maxSurge == 0 {
|
||||
klog.Warningf("DaemonSet %s/%s is not configured for surge or unavailability, defaulting to accepting unavailability", ds.Namespace, ds.Name)
|
||||
klog.InfoS("DaemonSet was not configured for surge or unavailability, defaulting to accepting unavailability", "daemonSet", klog.KObj(ds))
|
||||
maxUnavailable = 1
|
||||
}
|
||||
klog.V(5).Infof("DaemonSet %s/%s, maxSurge: %d, maxUnavailable: %d", ds.Namespace, ds.Name, maxSurge, maxUnavailable)
|
||||
klog.V(5).InfoS("DaemonSet with maxSurge and maxUnavailable", "daemonSet", klog.KObj(ds), "maxSurge", maxSurge, "maxUnavailable", maxUnavailable)
|
||||
return maxSurge, maxUnavailable, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count
|
|||
// TODO: workaround UpdatePodCondition calling time.Now() directly
|
||||
setCondition := podutil.GetPodReadyCondition(pod.Status)
|
||||
setCondition.LastTransitionTime.Time = dsc.failedPodsBackoff.Clock.Now()
|
||||
klog.Infof("marked pod %s ready=%t", pod.Name, ready)
|
||||
klog.InfoS("Marked pod ready status", "pod", klog.KObj(pod), "ready", ready)
|
||||
count--
|
||||
}
|
||||
if count > 0 {
|
||||
|
|
|
@ -94,7 +94,7 @@ func parseEphemeralContainerStatus(status *v1.ContainerStatus) ephemeralContaine
|
|||
func parseEphemeralPodStatus(ejob *appsv1alpha1.EphemeralJob, statuses []v1.ContainerStatus) (v1.PodPhase, error) {
|
||||
eContainerMap, empty := getEphemeralContainersMaps(ejob.Spec.Template.EphemeralContainers)
|
||||
if empty {
|
||||
klog.Error("ephemeral job spec containers is empty")
|
||||
klog.InfoS("EphemeralJob spec containers is empty")
|
||||
return v1.PodUnknown, fmt.Errorf("ephemeral job %s/%s spec containers is empty. ", ejob.Namespace, ejob.Name)
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ func parseEphemeralPodStatus(ejob *appsv1alpha1.EphemeralJob, statuses []v1.Cont
|
|||
}
|
||||
|
||||
status := parseEphemeralContainerStatus(&eContainerStatus)
|
||||
klog.V(5).Infof("parse ephemeral container %s status %v", eContainerStatus.Name, status)
|
||||
klog.V(5).InfoS("Parse ephemeral container status", "ephemeralContainerStatusName", eContainerStatus.Name, "status", status)
|
||||
switch status {
|
||||
case FailedStatus:
|
||||
return v1.PodFailed, nil
|
||||
|
@ -151,7 +151,7 @@ func (k *k8sControl) CreateEphemeralContainer(targetPod *v1.Pod) error {
|
|||
// The apiserver will return a 404 when the EphemeralContainers feature is disabled because the `/ephemeralcontainers` subresource
|
||||
// is missing. Unlike the 404 returned by a missing pod, the status details will be empty.
|
||||
if serr, ok := err.(*errors.StatusError); ok && serr.Status().Reason == metav1.StatusReasonNotFound && serr.ErrStatus.Details.Name == "" {
|
||||
klog.Errorf("ephemeral containers are disabled for this cluster (error from server: %q).", err)
|
||||
klog.ErrorS(err, "Ephemeral containers were disabled for this cluster (error from server)")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ func (k *k8sControl) CreateEphemeralContainer(targetPod *v1.Pod) error {
|
|||
// Kind the api server will respond with a not-registered error. When this happens we can optimistically try
|
||||
// using the old API.
|
||||
if runtime.IsNotRegisteredError(err) {
|
||||
klog.V(1).Infof("Falling back to legacy ephemeral container API because server returned error: %v", err)
|
||||
klog.V(1).ErrorS(err, "Falling back to legacy ephemeral container API because server returned error")
|
||||
return k.createEphemeralContainerLegacy(targetPod, eContainer)
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func (k *k8sControl) createEphemeralContainer(targetPod *v1.Pod, eContainer []v1
|
|||
return fmt.Errorf("error creating patch to add ephemeral containers: %v", err)
|
||||
}
|
||||
|
||||
klog.Infof("EphemeralJob %s/%s tries to patch containers to pod %s: %v", k.Namespace, k.Name, targetPod.Name, util.DumpJSON(patch))
|
||||
klog.InfoS("EphemeralJob tried to patch containers to Pod", "ephemeralJob", klog.KObj(k), "pod", klog.KObj(targetPod), "patch", util.DumpJSON(patch))
|
||||
|
||||
kubeClient := kubeclient.GetGenericClient().KubeClient
|
||||
_, err = kubeClient.CoreV1().Pods(targetPod.Namespace).
|
||||
|
@ -202,7 +202,7 @@ func (k *k8sControl) createEphemeralContainerLegacy(targetPod *v1.Pod, eContaine
|
|||
// we can present a JSON 6902 patch that the api server will apply.
|
||||
patch, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
klog.Errorf("error creating JSON 6902 patch for old /ephemeralcontainers API: %s", err)
|
||||
klog.ErrorS(err, "Failed to creat JSON 6902 patch for old /ephemeralcontainers API")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -214,13 +214,13 @@ func (k *k8sControl) createEphemeralContainerLegacy(targetPod *v1.Pod, eContaine
|
|||
|
||||
// RemoveEphemeralContainer is not support before kubernetes v1.23
|
||||
func (k *k8sControl) RemoveEphemeralContainer(target *v1.Pod) (*time.Duration, error) {
|
||||
klog.Warning("RemoveEphemeralContainer is not support before kubernetes v1.23")
|
||||
klog.InfoS("RemoveEphemeralContainer is not support before kubernetes v1.23")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UpdateEphemeralContainer is not support before kubernetes v1.23
|
||||
func (k *k8sControl) UpdateEphemeralContainer(target *v1.Pod) error {
|
||||
klog.Warning("UpdateEphemeralContainer is not support before kubernetes v1.23")
|
||||
klog.InfoS("UpdateEphemeralContainer is not support before kubernetes v1.23")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -115,14 +115,14 @@ type ReconcileEphemeralJob struct {
|
|||
// and what is in the EphemeralJob.Spec
|
||||
func (r *ReconcileEphemeralJob) Reconcile(context context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(5).Infof("Starting to process EphemeralJob %v", request.Name)
|
||||
klog.V(5).InfoS("Starting to process EphemeralJob", "ephemeralJob", request)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process EphemeralJob %v err %v, elapsedTime %v", request.Name, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process EphemeralJob", "ephemeralJob", request, "elapsedTime", time.Since(start))
|
||||
} else if res.RequeueAfter > 0 {
|
||||
klog.Infof("Finish to process EphemeralJob %v, elapsedTime %v, RetryAfter %v", request.Name, time.Since(start), res.RequeueAfter)
|
||||
klog.InfoS("Finish to process EphemeralJob with scheduled retry", "ephemeralJob", request, "elapsedTime", time.Since(start), "retryAfter", res.RequeueAfter)
|
||||
} else {
|
||||
klog.Infof("Finish to process EphemeralJob %v, elapsedTime %v", request.Name, time.Since(start))
|
||||
klog.InfoS("Finish to process EphemeralJob", "ephemeralJob", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -136,7 +136,7 @@ func (r *ReconcileEphemeralJob) Reconcile(context context.Context, request recon
|
|||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
klog.Errorf("Failed to get ephemeral job %s,", job.Name)
|
||||
klog.InfoS("Failed to get EphemeralJob", "EphemeralJob", request)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ func (r *ReconcileEphemeralJob) Reconcile(context context.Context, request recon
|
|||
|
||||
leftTime = time.Duration(*job.Spec.TTLSecondsAfterFinished)*time.Second - time.Since(job.Status.CompletionTime.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Infof("Deleting EphemeralJob %s/%s for ttlSecondsAfterFinished", job.Namespace, job.Name)
|
||||
klog.InfoS("Deleting EphemeralJob for ttlSecondsAfterFinished", "ephemeralJob", klog.KObj(job))
|
||||
if err = r.Delete(context, job); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("Delete ephemeral job error: %v. ", err)
|
||||
}
|
||||
|
@ -193,32 +193,32 @@ func (r *ReconcileEphemeralJob) Reconcile(context context.Context, request recon
|
|||
if requeueAfter < 0 {
|
||||
requeueAfter = 0
|
||||
}
|
||||
klog.Infof("Job %s has ActiveDeadlineSeconds, will resync after %d seconds", job.Name, requeueAfter)
|
||||
klog.InfoS("Job has ActiveDeadlineSeconds, will resync after seconds", "ephemeralJob", klog.KObj(job), "requeueAfter", requeueAfter)
|
||||
}
|
||||
|
||||
if scaleSatisfied, unsatisfiedDuration, scaleDirtyPods := scaleExpectations.SatisfiedExpectations(request.String()); !scaleSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for ejob %v, scaleDirtyPods=%v, overtime=%v", request.String(), scaleDirtyPods, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for ejob", "ephemeralJob", request, "scaleDirtyPods", scaleDirtyPods, "overtime", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.Infof("Not satisfied scale for ejob %v, scaleDirtyPods=%v", request.String(), scaleDirtyPods)
|
||||
klog.InfoS("Not satisfied scale for ejob", "ephemeralJob", request, "scaleDirtyPods", scaleDirtyPods)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
targetPods, err := r.filterPods(job)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get ephemeral job %s/%s related target pods: %v", job.Namespace, job.Name, err)
|
||||
klog.ErrorS(err, "Failed to get EphemeralJob related target pods", "ephemeralJob", klog.KObj(job))
|
||||
return reconcile.Result{RequeueAfter: requeueAfter}, err
|
||||
}
|
||||
|
||||
klog.V(5).Infof("filter target pods %d", len(targetPods))
|
||||
klog.V(5).InfoS("Filter target pods", "targetPodCount", len(targetPods))
|
||||
// calculate status
|
||||
if err := r.calculateStatus(job, targetPods); err != nil {
|
||||
klog.Errorf("Error calculate ephemeral job %s/%s status: %v", job.Namespace, job.Name, err)
|
||||
klog.ErrorS(err, "Error calculate EphemeralJob status", "ephemeralJob", klog.KObj(job))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
klog.Infof("Sync calculate job %s status: (match: %d, success: %d, failed: %d, running: %d, waiting: %d)",
|
||||
job.Name, job.Status.Matches, job.Status.Succeeded, job.Status.Failed, job.Status.Running, job.Status.Waiting)
|
||||
klog.InfoS("Sync calculate job status", "ephemeralJob", klog.KObj(job), "match", job.Status.Matches, "success", job.Status.Succeeded,
|
||||
"failed", job.Status.Failed, "running", job.Status.Running, "waiting", job.Status.Waiting)
|
||||
|
||||
if job.Status.Phase == appsv1alpha1.EphemeralJobPause {
|
||||
return reconcile.Result{RequeueAfter: requeueAfter}, r.updateJobStatus(job)
|
||||
|
@ -302,7 +302,7 @@ func (r *ReconcileEphemeralJob) filterInjectedPods(job *appsv1alpha1.EphemeralJo
|
|||
if owned {
|
||||
targetPods = append(targetPods, pod)
|
||||
} else {
|
||||
klog.Warningf("EphemeralJob %s/%s ignores Pod %s for it exists conflict ephemeral containers", job.Namespace, job.Name, pod)
|
||||
klog.InfoS("EphemeralJob ignored Pod for it existed conflict ephemeral containers", "ephemeralJob", klog.KObj(job), "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -313,11 +313,11 @@ func (r *ReconcileEphemeralJob) filterInjectedPods(job *appsv1alpha1.EphemeralJo
|
|||
func (r *ReconcileEphemeralJob) syncTargetPods(job *appsv1alpha1.EphemeralJob, targetPods []*v1.Pod) error {
|
||||
toCreatePods, _, _ := getSyncPods(job, targetPods)
|
||||
if len(toCreatePods) == 0 {
|
||||
klog.Infoln("there is no target pod to attach")
|
||||
klog.InfoS("There was no target pod to attach")
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Infof("Ready to create ephemeral containers in %d pods.", len(toCreatePods))
|
||||
klog.InfoS("Ready to create ephemeral containers in pods", "podCount", len(toCreatePods))
|
||||
parallelism := defaultParallelism
|
||||
if job.Spec.Parallelism != nil {
|
||||
parallelism = int(*job.Spec.Parallelism)
|
||||
|
@ -329,7 +329,8 @@ func (r *ReconcileEphemeralJob) syncTargetPods(job *appsv1alpha1.EphemeralJob, t
|
|||
|
||||
diff := parallelism - int(job.Status.Running)
|
||||
if diff < 0 {
|
||||
klog.Infof("error sync phemeraljob %s/%s for parallisem %d less than running pod %d", job.Namespace, job.Name, parallelism, job.Status.Running)
|
||||
klog.InfoS("Error sync EphemeralJob for parallisem less than running pod", "ephemeralJob", klog.KObj(job),
|
||||
"parallelism", parallelism, "runningPodCount", job.Status.Running)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -349,7 +350,7 @@ func (r *ReconcileEphemeralJob) syncTargetPods(job *appsv1alpha1.EphemeralJob, t
|
|||
return nil
|
||||
}
|
||||
|
||||
klog.Infof("Creating ephemeral container in pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.InfoS("Creating ephemeral container in pod", "pod", klog.KObj(pod))
|
||||
|
||||
for _, podEphemeralContainerName := range getPodEphemeralContainers(pod, job) {
|
||||
scaleExpectations.ExpectScale(key, expectations.Create, podEphemeralContainerName)
|
||||
|
@ -442,14 +443,14 @@ func (r *ReconcileEphemeralJob) calculateStatus(job *appsv1alpha1.EphemeralJob,
|
|||
}
|
||||
|
||||
func (r *ReconcileEphemeralJob) updateJobStatus(job *appsv1alpha1.EphemeralJob) error {
|
||||
klog.V(5).Infof("Updating job %s status %#v", job.Name, job.Status)
|
||||
klog.V(5).InfoS("Updating job status", "ephemeralJob", klog.KObj(job), "status", job.Status)
|
||||
return r.Status().Update(context.TODO(), job)
|
||||
}
|
||||
|
||||
func (r *ReconcileEphemeralJob) removeEphemeralContainers(job *appsv1alpha1.EphemeralJob) (*time.Duration, error) {
|
||||
targetPods, err := r.filterInjectedPods(job)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get ephemeral job %s/%s related target pods: %v", job.Namespace, job.Name, err)
|
||||
klog.ErrorS(err, "Failed to get ephemeral job related target pods", "ephemeralJob", klog.KObj(job))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ func (e *ejobHandler) Update(ctx context.Context, evt event.UpdateEvent, q workq
|
|||
}
|
||||
|
||||
if curEJob.DeletionTimestamp != nil {
|
||||
klog.V(3).Infof("Observed deleting ephemeral job: %s/%s", curEJob.Namespace, curEJob.Name)
|
||||
klog.V(3).InfoS("Observed deleting EphemeralJob", "ephemeralJob", klog.KObj(curEJob))
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: curEJob.Namespace, Name: curEJob.Name}})
|
||||
return
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func (e *ejobHandler) Update(ctx context.Context, evt event.UpdateEvent, q workq
|
|||
if oldEJob.Spec.TTLSecondsAfterFinished != curEJob.Spec.TTLSecondsAfterFinished ||
|
||||
oldEJob.Spec.Paused != curEJob.Spec.Paused || oldEJob.Spec.Parallelism != curEJob.Spec.Parallelism ||
|
||||
oldEJob.Spec.Replicas != curEJob.Spec.Replicas {
|
||||
klog.V(3).Infof("Observed updated Spec for ephemeral job: %s/%s", curEJob.Namespace, curEJob.Name)
|
||||
klog.V(3).InfoS("Observed updated Spec for EphemeralJob", "ephemeralJob", klog.KObj(curEJob))
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: curEJob.Namespace, Name: curEJob.Name}})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func calculateEphemeralContainerStatus(job *appsv1alpha1.EphemeralJob, pods []*v
|
|||
func parseEphemeralPodStatus(ejob *appsv1alpha1.EphemeralJob, statuses []v1.ContainerStatus) (v1.PodPhase, error) {
|
||||
eContainerMap, empty := getEphemeralContainersMaps(ejob.Spec.Template.EphemeralContainers)
|
||||
if empty {
|
||||
klog.Error("ephemeral job spec containers is empty")
|
||||
klog.InfoS("EphemeralJob spec containers is empty")
|
||||
return v1.PodUnknown, fmt.Errorf("ephemeral job %s/%s spec containers is empty. ", ejob.Namespace, ejob.Name)
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ func parseEphemeralPodStatus(ejob *appsv1alpha1.EphemeralJob, statuses []v1.Cont
|
|||
}
|
||||
|
||||
status := parseEphemeralContainerStatus(&eContainerStatus)
|
||||
klog.V(5).Infof("parse ephemeral container %s status %v", eContainerStatus.Name, status)
|
||||
klog.V(5).InfoS("Parse ephemeral container status", "ephemeralContainerStatusName", eContainerStatus.Name, "status", status)
|
||||
switch status {
|
||||
case FailedStatus:
|
||||
return v1.PodFailed, nil
|
||||
|
|
|
@ -128,7 +128,7 @@ type ReconcileImageListPullJob struct {
|
|||
// and what is in the ImageListPullJob.Spec
|
||||
// Automatically generate RBAC rules to allow the Controller to read and write ImageListPullJob
|
||||
func (r *ReconcileImageListPullJob) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
klog.V(5).Infof("Starting to process ImageListPullJob %v", request.NamespacedName)
|
||||
klog.V(5).InfoS("Starting to process ImageListPullJob", "imageListPullJob", request)
|
||||
|
||||
// 1.Fetch the ImageListPullJob instance
|
||||
job := &appsv1alpha1.ImageListPullJob{}
|
||||
|
@ -153,7 +153,7 @@ func (r *ReconcileImageListPullJob) Reconcile(_ context.Context, request reconci
|
|||
if job.Spec.CompletionPolicy.TTLSecondsAfterFinished != nil {
|
||||
leftTime = time.Duration(*job.Spec.CompletionPolicy.TTLSecondsAfterFinished)*time.Second - time.Since(job.Status.CompletionTime.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Infof("Deleting ImageListPullJob %s/%s for ttlSecondsAfterFinished", job.Namespace, job.Name)
|
||||
klog.InfoS("Deleting ImageListPullJob for ttlSecondsAfterFinished", "imageListPullJob", klog.KObj(job))
|
||||
if err = r.Delete(context.TODO(), job); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("delete ImageListPullJob error: %v", err)
|
||||
}
|
||||
|
@ -165,10 +165,10 @@ func (r *ReconcileImageListPullJob) Reconcile(_ context.Context, request reconci
|
|||
|
||||
if scaleSatisfied, unsatisfiedDuration, scaleDirtyImagePullJobs := scaleExpectations.SatisfiedExpectations(request.String()); !scaleSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for ImageListPullJob %v, scaleDirtyImagePullJobs=%v, overtime=%v", request.String(), scaleDirtyImagePullJobs, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for ImageListPullJob", "imageListPullJob", request, "scaleDirtyImagePullJobs", scaleDirtyImagePullJobs, "overtime", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied scale for ImageListPullJob %v, scaleDirtyImagePullJobs=%v", request.String(), scaleDirtyImagePullJobs)
|
||||
klog.V(4).InfoS("Not satisfied scale for ImageListPullJob", "imageListPullJob", request, "scaleDirtyImagePullJobs", scaleDirtyImagePullJobs)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
|
@ -183,10 +183,10 @@ func (r *ReconcileImageListPullJob) Reconcile(_ context.Context, request reconci
|
|||
resourceVersionExpectations.Observe(imagePullJob)
|
||||
if isSatisfied, unsatisfiedDuration := resourceVersionExpectations.IsSatisfied(imagePullJob); !isSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, timeout=%v", request.String(), unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime for ImageListPullJob", "imageListPullJob", request, "timeout", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied resourceVersion for %v", request.String())
|
||||
klog.V(4).InfoS("Not satisfied resourceVersion for ImageListPullJob", "imageListPullJob", request)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ func (r *ReconcileImageListPullJob) filterImagesAndImagePullJobs(job *appsv1alph
|
|||
}
|
||||
imagePullJob, ok := imagePullJobs[image]
|
||||
if !ok {
|
||||
klog.Warningf("can not found imagePullJob for image name %s", image)
|
||||
klog.InfoS("Could not found imagePullJob for image name", "imageName", image)
|
||||
continue
|
||||
}
|
||||
// should create new imagePullJob if the template is changed.
|
||||
|
@ -448,6 +448,6 @@ func isConsistentVersion(oldImagePullJob *appsv1alpha1.ImagePullJob, newTemplate
|
|||
return true
|
||||
}
|
||||
|
||||
klog.V(4).Infof("imagePullJob(%s/%s) specification changed", oldImagePullJob.Namespace, oldImagePullJob.Name)
|
||||
klog.V(4).InfoS("ImagePullJob specification changed", "imagePullJob", klog.KObj(oldImagePullJob))
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ type imagePullJobEventHandler struct {
|
|||
func isImageListPullJobController(controllerRef *metav1.OwnerReference) bool {
|
||||
refGV, err := schema.ParseGroupVersion(controllerRef.APIVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not parse OwnerReference %v APIVersion: %v", controllerRef, err)
|
||||
klog.ErrorS(err, "Could not parse APIVersion in OwnerReference", "ownerReference", controllerRef)
|
||||
return false
|
||||
}
|
||||
return controllerRef.Kind == controllerKind.Kind && refGV.Group == controllerKind.Group
|
||||
|
|
|
@ -151,14 +151,14 @@ type ReconcileImagePullJob struct {
|
|||
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
|
||||
func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(5).Infof("Starting to process ImagePullJob %v", request.NamespacedName)
|
||||
klog.V(5).InfoS("Starting to process ImagePullJob", "imagePullJob", request)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process ImagePullJob %v, elapsedTime %v, error: %v", request.NamespacedName, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process ImagePullJob", "imagePullJob", request, "elapsedTime", time.Since(start))
|
||||
} else if res.RequeueAfter > 0 {
|
||||
klog.Infof("Finish to process ImagePullJob %v, elapsedTime %v, RetryAfter %v", request.NamespacedName, time.Since(start), res.RequeueAfter)
|
||||
klog.InfoS("Finish to process ImagePullJob with scheduled retry", "imagePullJob", request, "elapsedTime", time.Since(start), "RetryAfter", res.RequeueAfter)
|
||||
} else {
|
||||
klog.Infof("Finish to process ImagePullJob %v, elapsedTime %v", request.NamespacedName, time.Since(start))
|
||||
klog.InfoS("Finish to process ImagePullJob", "imagePullJob", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -178,10 +178,10 @@ func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.R
|
|||
// If scale expectations have not satisfied yet, just skip this reconcile
|
||||
if scaleSatisfied, unsatisfiedDuration, dirtyData := scaleExpectations.SatisfiedExpectations(request.String()); !scaleSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("ImagePullJob: expectation unsatisfied overtime for %v, dirtyData=%v, overtime=%v", request.String(), dirtyData, unsatisfiedDuration)
|
||||
klog.InfoS("ImagePullJob: expectation unsatisfied overtime", "imagePullJob", request, "dirtyData", dirtyData, "overTime", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("ImagePullJob: not satisfied scale for %v, dirtyData=%v", request.String(), dirtyData)
|
||||
klog.V(4).InfoS("ImagePullJob: not satisfied scale", "imagePullJob", request, "dirtyData", dirtyData)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
|
@ -189,10 +189,10 @@ func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.R
|
|||
resourceVersionExpectations.Observe(job)
|
||||
if isSatisfied, unsatisfiedDuration := resourceVersionExpectations.IsSatisfied(job); !isSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, timeout=%v", request.String(), unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime", "imagePullJob", request, "timeout", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied resourceVersion for %v", request.String())
|
||||
klog.V(4).InfoS("Not satisfied resourceVersion", "imagePullJob", request)
|
||||
return reconcile.Result{RequeueAfter: expectations.ExpectationTimeout - unsatisfiedDuration}, nil
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,7 @@ func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.R
|
|||
if job.Spec.CompletionPolicy.TTLSecondsAfterFinished != nil {
|
||||
leftTime = time.Duration(*job.Spec.CompletionPolicy.TTLSecondsAfterFinished)*time.Second - time.Since(job.Status.CompletionTime.Time)
|
||||
if leftTime <= 0 {
|
||||
klog.Infof("Deleting ImagePullJob %s/%s for ttlSecondsAfterFinished", job.Namespace, job.Name)
|
||||
klog.InfoS("Deleting ImagePullJob for ttlSecondsAfterFinished", "imagePullJob", klog.KObj(job))
|
||||
if err = r.Delete(context.TODO(), job); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("delete job error: %v", err)
|
||||
}
|
||||
|
@ -238,10 +238,10 @@ func (r *ReconcileImagePullJob) Reconcile(_ context.Context, request reconcile.R
|
|||
resourceVersionExpectations.Observe(nodeImage)
|
||||
if isSatisfied, unsatisfiedDuration := resourceVersionExpectations.IsSatisfied(nodeImage); !isSatisfied {
|
||||
if unsatisfiedDuration >= expectations.ExpectationTimeout {
|
||||
klog.Warningf("Expectation unsatisfied overtime for %v, wait for NodeImage %v updating, timeout=%v", request.String(), nodeImage.Name, unsatisfiedDuration)
|
||||
klog.InfoS("Expectation unsatisfied overtime, waiting for NodeImage updating", "imagePullJob", request, "nodeImageName", nodeImage.Name, "timeout", unsatisfiedDuration)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
klog.V(4).Infof("Not satisfied resourceVersion for %v, wait for NodeImage %v updating", request.String(), nodeImage.Name)
|
||||
klog.V(4).InfoS("Not satisfied resourceVersion, waiting for NodeImage updating", "imagePullJob", request, "nodeImageName", nodeImage.Name)
|
||||
// fix issue https://github.com/openkruise/kruise/issues/1528
|
||||
return reconcile.Result{RequeueAfter: time.Second * 5}, nil
|
||||
}
|
||||
|
@ -315,8 +315,8 @@ func (r *ReconcileImagePullJob) syncNodeImages(job *appsv1alpha1.ImagePullJob, n
|
|||
}
|
||||
parallelism := parallelismLimit - int(newStatus.Active)
|
||||
if parallelism <= 0 {
|
||||
klog.V(3).Infof("Find ImagePullJob %s/%s have active pulling %d >= parallelism %d, so skip to sync the left %d NodeImages",
|
||||
job.Namespace, job.Name, newStatus.Active, parallelismLimit, len(notSyncedNodeImages))
|
||||
klog.V(3).InfoS("Found ImagePullJob have active pulling more than parallelism, so skip to sync the left NodeImages",
|
||||
"imagePullJob", klog.KObj(job), "activePullingCount", newStatus.Active, "parallelismLimit", parallelismLimit, "notSyncedNodeImagesCount", len(notSyncedNodeImages))
|
||||
return nil
|
||||
}
|
||||
if len(notSyncedNodeImages) < parallelism {
|
||||
|
@ -400,10 +400,10 @@ func (r *ReconcileImagePullJob) syncNodeImages(job *appsv1alpha1.ImagePullJob, n
|
|||
if updateErr != nil {
|
||||
return fmt.Errorf("update NodeImage %s error: %v", notSyncedNodeImages[i], updateErr)
|
||||
} else if skip {
|
||||
klog.V(4).Infof("ImagePullJob %s/%s find %s already synced in NodeImage %s", job.Namespace, job.Name, job.Spec.Image, notSyncedNodeImages[i])
|
||||
klog.V(4).InfoS("ImagePullJob found image already synced in NodeImage", "imagePullJob", klog.KObj(job), "image", job.Spec.Image, "nodeImage", notSyncedNodeImages[i])
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("ImagePullJob %s/%s has synced %s into NodeImage %s", job.Namespace, job.Name, job.Spec.Image, notSyncedNodeImages[i])
|
||||
klog.V(3).InfoS("ImagePullJob had synced image into NodeImage", "imagePullJob", klog.KObj(job), "image", job.Spec.Image, "nodeImage", notSyncedNodeImages[i])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ func (r *ReconcileImagePullJob) getTargetSecretMap(job *appsv1alpha1.ImagePullJo
|
|||
}
|
||||
sourceNs, sourceName, err := cache.SplitMetaNamespaceKey(target.Annotations[SourceSecretKeyAnno])
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to parse source key from target %s annotations: %s", target.Name, err)
|
||||
klog.ErrorS(err, "Failed to parse source key from annotations in Secret", "secret", klog.KObj(target))
|
||||
}
|
||||
if containsObject(sourceReferences, appsv1alpha1.ReferenceObject{Namespace: sourceNs, Name: sourceName}) {
|
||||
targetMap[target.Labels[SourceSecretUIDLabelKey]] = target
|
||||
|
|
|
@ -72,7 +72,7 @@ func (e *nodeImageEventHandler) handle(nodeImage *appsv1alpha1.NodeImage, q work
|
|||
// Get jobs related to this NodeImage
|
||||
jobs, _, err := utilimagejob.GetActiveJobsForNodeImage(e.Reader, nodeImage, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for NodeImage %s: %v", nodeImage.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for NodeImage", "nodeImageName", nodeImage.Name)
|
||||
}
|
||||
for _, j := range jobs {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: j.Namespace, Name: j.Name}})
|
||||
|
@ -102,18 +102,18 @@ func (e *nodeImageEventHandler) handleUpdate(nodeImage, oldNodeImage *appsv1alph
|
|||
for name := range tmpOldNodeImage.Status.ImageStatuses {
|
||||
changedImages.Insert(name)
|
||||
}
|
||||
klog.V(5).Infof("Find NodeImage %s updated and only affect images: %v", nodeImage.Name, changedImages.List())
|
||||
klog.V(5).InfoS("Found NodeImage updated and only affect images", "nodeImageName", nodeImage.Name, "changedImages", changedImages.List())
|
||||
|
||||
// Get jobs related to this NodeImage
|
||||
newJobs, oldJobs, err := utilimagejob.GetActiveJobsForNodeImage(e.Reader, nodeImage, oldNodeImage)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for NodeImage %s: %v", nodeImage.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for NodeImage", "nodeImageName", nodeImage.Name)
|
||||
}
|
||||
diffSet := diffJobs(newJobs, oldJobs)
|
||||
for _, j := range newJobs {
|
||||
imageName, _, err := daemonutil.NormalizeImageRefToNameTag(j.Spec.Image)
|
||||
if err != nil {
|
||||
klog.Warningf("Invalid image %s in job %s/%s", j.Spec.Image, j.Namespace, j.Name)
|
||||
klog.InfoS("Invalid image in job", "image", j.Spec.Image, "imagePullJob", klog.KObj(j))
|
||||
continue
|
||||
}
|
||||
if changedImages.Has(imageName) {
|
||||
|
@ -161,7 +161,7 @@ func (e *podEventHandler) handle(pod *v1.Pod, q workqueue.RateLimitingInterface)
|
|||
// Get jobs related to this Pod
|
||||
jobs, _, err := utilimagejob.GetActiveJobsForPod(e.Reader, pod, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for Pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for Pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
for _, j := range jobs {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: j.Namespace, Name: j.Name}})
|
||||
|
@ -178,7 +178,7 @@ func (e *podEventHandler) handleUpdate(pod, oldPod *v1.Pod, q workqueue.RateLimi
|
|||
// Get jobs related to this NodeImage
|
||||
newJobs, oldJobs, err := utilimagejob.GetActiveJobsForPod(e.Reader, pod, oldPod)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for Pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for Pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
if oldPod.Spec.NodeName == "" {
|
||||
for _, j := range newJobs {
|
||||
|
@ -218,7 +218,7 @@ func (e *secretEventHandler) Generic(ctx context.Context, evt event.GenericEvent
|
|||
func (e *secretEventHandler) handle(secret *v1.Secret, q workqueue.RateLimitingInterface) {
|
||||
if secret != nil && secret.Namespace == kruiseutil.GetKruiseDaemonConfigNamespace() {
|
||||
jobKeySet := referenceSetFromTarget(secret)
|
||||
klog.V(4).Infof("Observe secret %s/%s created, uid: %s, refs: %s", secret.Namespace, secret.Name, secret.UID, jobKeySet.String())
|
||||
klog.V(4).InfoS("Observed Secret created", "secret", klog.KObj(secret), "secretUID", secret.UID, "jobRefs", jobKeySet)
|
||||
for key := range jobKeySet {
|
||||
scaleExpectations.ObserveScale(key.String(), expectations.Create, secret.Labels[SourceSecretUIDLabelKey])
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ func (e *secretEventHandler) handle(secret *v1.Secret, q workqueue.RateLimitingI
|
|||
// Get jobs related to this Secret
|
||||
jobKeys, err := e.getActiveJobKeysForSecret(secret)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for Secret %s/%s: %v", secret.Namespace, secret.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for Secret", "secret", klog.KObj(secret))
|
||||
}
|
||||
for _, jKey := range jobKeys {
|
||||
q.Add(reconcile.Request{NamespacedName: jKey})
|
||||
|
@ -254,7 +254,7 @@ func (e *secretEventHandler) handleUpdate(secretNew, secretOld *v1.Secret, q wor
|
|||
// Get jobs related to this Secret
|
||||
jobKeys, err := e.getActiveJobKeysForSecret(secretNew)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get jobs for Secret %s/%s: %v", secretNew.Namespace, secretNew.Name, err)
|
||||
klog.ErrorS(err, "Failed to get jobs for Secret", "secret", klog.KObj(secretNew))
|
||||
}
|
||||
for _, jKey := range jobKeys {
|
||||
q.Add(reconcile.Request{NamespacedName: jKey})
|
||||
|
|
|
@ -180,7 +180,7 @@ func referenceSetFromTarget(target *v1.Secret) referenceSet {
|
|||
for _, ref := range refs {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(ref)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse job key from target secret %s annotations: %v", target.Name, err)
|
||||
klog.ErrorS(err, "Failed to parse job key from annotations in target Secret", "secret", klog.KObj(target))
|
||||
continue
|
||||
}
|
||||
keys.Insert(types.NamespacedName{Namespace: namespace, Name: name})
|
||||
|
|
|
@ -151,15 +151,15 @@ type ReconcileNodeImage struct {
|
|||
// Automatically generate RBAC rules to allow the Controller to read and write nodes
|
||||
func (r *ReconcileNodeImage) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(5).Infof("Starting to process NodeImage %v", request.Name)
|
||||
klog.V(5).InfoS("Starting to process NodeImage", "nodeImage", request)
|
||||
var requeueMsg string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process NodeImage %v err %v, elapsedTime %v", request.Name, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process NodeImage", "nodeImage", request, "elapsedTime", time.Since(start))
|
||||
} else if res.RequeueAfter > 0 {
|
||||
klog.Infof("Finish to process NodeImage %v, elapsedTime %v, RetryAfter %v: %v", request.Name, time.Since(start), res.RequeueAfter, requeueMsg)
|
||||
klog.InfoS("Finish to process NodeImage with scheduled retry", "nodeImage", request, "elapsedTime", time.Since(start), "retryAfter", res.RequeueAfter, "requeueMsg", requeueMsg)
|
||||
} else {
|
||||
klog.Infof("Finish to process NodeImage %v, elapsedTime %v", request.Name, time.Since(start))
|
||||
klog.InfoS("Finish to process NodeImage", "nodeImage", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -202,10 +202,10 @@ func (r *ReconcileNodeImage) Reconcile(_ context.Context, request reconcile.Requ
|
|||
// If Node exists, we should create a NodeImage
|
||||
if nodeImage == nil {
|
||||
if isReady, delay := getNodeReadyAndDelayTime(node); !isReady {
|
||||
klog.V(4).Infof("Skip to create NodeImage %s for Node has not ready yet.", request.Name)
|
||||
klog.V(4).InfoS("Skipped to create NodeImage for Node has not ready yet", "nodeImage", request)
|
||||
return reconcile.Result{}, nil
|
||||
} else if delay > 0 {
|
||||
klog.V(4).Infof("Skip to create NodeImage %s for waiting Node ready for %s.", request.Name, delay)
|
||||
klog.V(4).InfoS("Skipped to create NodeImage for waiting Node ready after delay", "nodeImage", request, "delay", delay)
|
||||
return reconcile.Result{RequeueAfter: delay}, nil
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ func (r *ReconcileNodeImage) Reconcile(_ context.Context, request reconcile.Requ
|
|||
if err = r.Create(context.TODO(), nodeImage); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to create nodeimage %v, err: %v", nodeImage.Name, err)
|
||||
}
|
||||
klog.Infof("Successfully create nodeimage %s", nodeImage.Name)
|
||||
klog.InfoS("Successfully create nodeimage", "nodeImage", klog.KObj(nodeImage))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -264,9 +264,9 @@ func (r *ReconcileNodeImage) updateNodeImage(name string, node *v1.Node, duratio
|
|||
})
|
||||
duration.Merge(tmpDuration)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update NodeImage %s spec(%v): %v", name, messages, err)
|
||||
klog.ErrorS(err, "Failed to update NodeImage", "nodeImageName", name, "message", messages)
|
||||
} else if modified {
|
||||
klog.Infof("Successfully update NodeImage %s spec(%v)", name, messages)
|
||||
klog.InfoS("Successfully update NodeImage", "nodeImageName", name, "message", messages)
|
||||
}
|
||||
return modified, err
|
||||
}
|
||||
|
@ -309,12 +309,12 @@ func (r *ReconcileNodeImage) doUpdateNodeImage(nodeImage *appsv1alpha1.NodeImage
|
|||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
klog.Errorf("Failed to check owners for %s in NodeImage %s, get job %v error: %v", fullName, name, util.DumpJSON(ref), err)
|
||||
klog.ErrorS(err, "Failed to check owners for image in NodeImage, get job error", "imageName", fullName, "nodeImageName", name, "job", util.DumpJSON(ref))
|
||||
activeRefs = append(activeRefs, ref)
|
||||
continue
|
||||
}
|
||||
if job.UID != ref.UID {
|
||||
klog.Warningf("When check owners for %s in NodeImage %s, get job %v find UID %v not equal", fullName, name, util.DumpJSON(ref), job.UID)
|
||||
klog.InfoS("When check owners for image in NodeImage, found job UID not equal", "imageName", fullName, "nodeImageName", name, "job", util.DumpJSON(ref), "jobUID", job.UID)
|
||||
continue
|
||||
}
|
||||
activeRefs = append(activeRefs, ref)
|
||||
|
@ -492,11 +492,11 @@ func (r *ReconcileNodeImage) updateNodeImageStatus(nodeImage *appsv1alpha1.NodeI
|
|||
newStatus.Succeeded = int32(succeeded)
|
||||
newStatus.Failed = int32(failed)
|
||||
|
||||
klog.V(3).Infof("Preparing to update status for NodeImage %s, old: %v, new: %v", nodeImage.Name, util.DumpJSON(nodeImage.Status), util.DumpJSON(newStatus))
|
||||
klog.V(3).InfoS("Preparing to update status for NodeImage", "nodeImage", klog.KObj(nodeImage), "oldStatus", util.DumpJSON(nodeImage.Status), "newStatus", util.DumpJSON(newStatus))
|
||||
nodeImage.Status = *newStatus
|
||||
err := r.Status().Update(context.TODO(), nodeImage)
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
klog.Errorf("Failed to update status for NodeImage %v: %v", nodeImage.Name, err)
|
||||
klog.ErrorS(err, "Failed to update status for NodeImage", "nodeImage", klog.KObj(nodeImage))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -84,26 +84,26 @@ func (e *nodeHandler) nodeCreateOrUpdate(node *v1.Node, q workqueue.RateLimiting
|
|||
namespacedName := types.NamespacedName{Name: node.Name}
|
||||
if err := e.Get(context.TODO(), namespacedName, nodeImage); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("Node create event for nodeimage %v", node.Name)
|
||||
klog.InfoS("Node created event for nodeimage", "nodeImageName", node.Name)
|
||||
if isReady, delay := getNodeReadyAndDelayTime(node); !isReady {
|
||||
klog.Infof("Skip to enqueue Node %s with not NodeImage, for not ready yet.", node.Name)
|
||||
klog.InfoS("Skipped to enqueue Node with not NodeImage, for not ready yet", "nodeImageName", node.Name)
|
||||
return
|
||||
} else if delay > 0 {
|
||||
klog.Infof("Enqueue Node %s with not NodeImage after %v.", node.Name, delay)
|
||||
klog.InfoS("Enqueue Node with not NodeImage after delay", "nodeImageName", node.Name, "delay", delay)
|
||||
q.AddAfter(reconcile.Request{NamespacedName: namespacedName}, delay)
|
||||
return
|
||||
}
|
||||
klog.Infof("Enqueue Node %s with not NodeImage.", node.Name)
|
||||
klog.InfoS("Enqueue Node with not NodeImage", "nodeImageName", node.Name)
|
||||
q.Add(reconcile.Request{NamespacedName: namespacedName})
|
||||
return
|
||||
}
|
||||
klog.Errorf("Failed to get NodeImage for Node %s: %v", node.Name, err)
|
||||
klog.ErrorS(err, "Failed to get NodeImage for Node", "nodeImageName", node.Name)
|
||||
return
|
||||
}
|
||||
if reflect.DeepEqual(node.Labels, nodeImage.Labels) {
|
||||
return
|
||||
}
|
||||
klog.Infof("Node update labels for nodeimage %v", node.Name)
|
||||
klog.InfoS("Node updated labels for NodeImage", "nodeImageName", node.Name)
|
||||
q.Add(reconcile.Request{NamespacedName: namespacedName})
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ func (e *nodeHandler) nodeDelete(node *v1.Node, q workqueue.RateLimitingInterfac
|
|||
if err := e.Get(context.TODO(), namespacedName, nodeImage); errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
klog.Infof("Node delete event for nodeimage %v", node.Name)
|
||||
klog.InfoS("Node deleted event for NodeImage", "nodeImageName", node.Name)
|
||||
q.Add(reconcile.Request{NamespacedName: namespacedName})
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ func (r *ReconcileNodePodProbe) syncPodFromNodePodProbe(npp *appsv1alpha1.NodePo
|
|||
pod := &corev1.Pod{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Namespace: obj.Namespace, Name: obj.Name}, pod)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("NodePodProbe get pod(%s/%s) failed: %s", obj.Namespace, obj.Name, err.Error())
|
||||
klog.ErrorS(err, "NodePodProbe got pod failed", "pod", klog.KRef(obj.Namespace, obj.Name))
|
||||
return nil, err
|
||||
}
|
||||
if errors.IsNotFound(err) || !kubecontroller.IsPodActive(pod) || string(pod.UID) != obj.UID {
|
||||
|
@ -202,7 +202,7 @@ func (r *ReconcileNodePodProbe) syncPodFromNodePodProbe(npp *appsv1alpha1.NodePo
|
|||
nppClone := &appsv1alpha1.NodePodProbe{}
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: npp.Name}, nppClone); err != nil {
|
||||
klog.Errorf("error getting updated npp %s from client", npp.Name)
|
||||
klog.ErrorS(err, "Failed to get updated NodePodProbe from client", "nodePodProbe", klog.KObj(npp))
|
||||
}
|
||||
if reflect.DeepEqual(newSpec, nppClone.Spec) {
|
||||
return nil
|
||||
|
@ -211,10 +211,10 @@ func (r *ReconcileNodePodProbe) syncPodFromNodePodProbe(npp *appsv1alpha1.NodePo
|
|||
return r.Client.Update(context.TODO(), nppClone)
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("NodePodProbe update NodePodProbe(%s) failed:%s", npp.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to update NodePodProbe", "nodePodProbe", klog.KObj(npp))
|
||||
return nil, err
|
||||
}
|
||||
klog.V(3).Infof("NodePodProbe update NodePodProbe(%s) from(%s) -> to(%s) success", npp.Name, util.DumpJSON(npp.Spec), util.DumpJSON(newSpec))
|
||||
klog.V(3).InfoS("Updated NodePodProbe success", "nodePodProbe", klog.KObj(npp), "oldSpec", util.DumpJSON(npp.Spec), "newSpec", util.DumpJSON(newSpec))
|
||||
return matchedPods, nil
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ func (r *ReconcileNodePodProbe) updatePodProbeStatus(pod *corev1.Pod, status app
|
|||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
klog.Errorf("NodePodProbe(%s) get pod(%s/%s) failed: %s", ppmName, pod.Namespace, pod.Name, err.Error())
|
||||
klog.ErrorS(err, "NodePodProbe got pod failed", "podProbeMarkerName", ppmName, "pod", klog.KObj(pod))
|
||||
return err
|
||||
} else if !ppm.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
|
@ -267,7 +267,7 @@ func (r *ReconcileNodePodProbe) updatePodProbeStatus(pod *corev1.Pod, status app
|
|||
}
|
||||
}
|
||||
if conditionType != "" && validConditionTypes.Has(conditionType) {
|
||||
klog.Warningf("NodePodProbe(%s) pod(%s/%s) condition(%s) is conflict", ppmName, pod.Namespace, pod.Name, conditionType)
|
||||
klog.InfoS("NodePodProbe pod condition was conflict", "podProbeMarkerName", ppmName, "pod", klog.KObj(pod), "conditionType", conditionType)
|
||||
// patch pod condition
|
||||
} else if conditionType != "" {
|
||||
validConditionTypes.Insert(conditionType)
|
||||
|
@ -328,7 +328,7 @@ func (r *ReconcileNodePodProbe) updatePodProbeStatus(pod *corev1.Pod, status app
|
|||
podClone := pod.DeepCopy()
|
||||
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err = r.Client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, podClone); err != nil {
|
||||
klog.Errorf("error getting updated pod(%s/%s) from client", pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to get updated pod from client", "pod", klog.KObj(pod))
|
||||
return err
|
||||
}
|
||||
oldStatus := podClone.Status.DeepCopy()
|
||||
|
@ -365,10 +365,10 @@ func (r *ReconcileNodePodProbe) updatePodProbeStatus(pod *corev1.Pod, status app
|
|||
// todo: resolve https://github.com/openkruise/kruise/issues/1597
|
||||
return r.Client.Status().Update(context.TODO(), podClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("NodePodProbe patch pod(%s/%s) status failed: %s", podClone.Namespace, podClone.Name, err.Error())
|
||||
klog.ErrorS(err, "NodePodProbe patched pod status failed", "pod", klog.KObj(podClone))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("NodePodProbe update pod(%s/%s) metadata(%s) conditions(%s) success", podClone.Namespace, podClone.Name,
|
||||
util.DumpJSON(probeMetadata), util.DumpJSON(probeConditions))
|
||||
klog.V(3).InfoS("NodePodProbe updated pod metadata and conditions success", "pod", klog.KObj(podClone), "metaData",
|
||||
util.DumpJSON(probeMetadata), "conditions", util.DumpJSON(probeConditions))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ func (p *enqueueRequestForPod) Delete(ctx context.Context, evt event.DeleteEvent
|
|||
npp := &appsalphav1.NodePodProbe{}
|
||||
if err := p.reader.Get(context.TODO(), client.ObjectKey{Name: obj.Spec.NodeName}, npp); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Get NodePodProbe(%s) failed: %s", obj.Spec.NodeName, err)
|
||||
klog.ErrorS(err, "Failed to get NodePodProbe", "nodeName", obj.Spec.NodeName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func (p *enqueueRequestForPod) Update(ctx context.Context, evt event.UpdateEvent
|
|||
npp := &appsalphav1.NodePodProbe{}
|
||||
if err := p.reader.Get(context.TODO(), client.ObjectKey{Name: new.Spec.NodeName}, npp); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("Get NodePodProbe(%s) failed: %s", new.Spec.NodeName, err)
|
||||
klog.ErrorS(err, "Failed to get NodePodProbe", "nodeName", new.Spec.NodeName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -188,17 +188,17 @@ func (e *enqueueRequestForNode) nodeCreate(node *corev1.Node, q workqueue.RateLi
|
|||
npp := &appsalphav1.NodePodProbe{}
|
||||
if err := e.Get(context.TODO(), client.ObjectKey{Name: node.Name}, npp); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("Node create event for nodePodProbe %v", node.Name)
|
||||
klog.InfoS("Node created event for nodePodProbe", "nodeName", node.Name)
|
||||
namespacedName := types.NamespacedName{Name: node.Name}
|
||||
if !isNodeReady(node) {
|
||||
klog.Infof("Skip to enqueue Node %s with not nodePodProbe, for not ready yet.", node.Name)
|
||||
klog.InfoS("Skipped to enqueue Node with not nodePodProbe, for not ready yet", "nodeName", node.Name)
|
||||
return
|
||||
}
|
||||
klog.Infof("Enqueue Node %s with not nodePodProbe.", node.Name)
|
||||
klog.InfoS("Enqueue Node with not nodePodProbe", "nodeName", node.Name)
|
||||
q.Add(reconcile.Request{NamespacedName: namespacedName})
|
||||
return
|
||||
}
|
||||
klog.Errorf("Failed to get nodePodProbe for Node %s: %v", node.Name, err)
|
||||
klog.ErrorS(err, "Failed to get nodePodProbe for Node", "nodeName", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ func (r *ReconcilePersistentPodState) Reconcile(_ context.Context, req ctrl.Requ
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("begin to reconcile PersistentPodState(%s/%s)", persistentPodState.Namespace, persistentPodState.Name)
|
||||
klog.V(3).InfoS("Begin to reconcile PersistentPodState", "persistentPodState", klog.KObj(persistentPodState))
|
||||
pods, innerSts, err := r.getPodsAndStatefulset(persistentPodState)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
|
@ -199,7 +199,7 @@ func (r *ReconcilePersistentPodState) Reconcile(_ context.Context, req ctrl.Requ
|
|||
return ctrl.Result{}, r.updatePersistentPodStateStatus(persistentPodState, newStatus)
|
||||
}
|
||||
|
||||
klog.V(3).Infof("reconcile statefulset(%s/%s) length(%d) pods for PersistentPodState", persistentPodState.Namespace, persistentPodState.Name, len(pods))
|
||||
klog.V(3).InfoS("Reconcile statefulset pods for PersistentPodState", "persistentPodState", klog.KObj(persistentPodState), "podCount", len(pods))
|
||||
newStatus := persistentPodState.Status.DeepCopy()
|
||||
newStatus.ObservedGeneration = persistentPodState.Generation
|
||||
if newStatus.PodStates == nil {
|
||||
|
@ -256,8 +256,7 @@ func (r *ReconcilePersistentPodState) Reconcile(_ context.Context, req ctrl.Requ
|
|||
for podName := range newStatus.PodStates {
|
||||
index, err := parseStsPodIndex(podName)
|
||||
if err != nil {
|
||||
klog.Errorf("parse PersistentPodState(%s/%s) podName(%s) failed: %s",
|
||||
persistentPodState.Namespace, persistentPodState.Name, podName, err.Error())
|
||||
klog.ErrorS(err, "Failed to parse PersistentPodState podName", "persistentPodState", klog.KObj(persistentPodState), "podName", podName)
|
||||
continue
|
||||
}
|
||||
if isInStatefulSetReplicas(index, innerSts) {
|
||||
|
@ -286,10 +285,10 @@ func (r *ReconcilePersistentPodState) updatePersistentPodStateStatus(pps *appsv1
|
|||
persistentPodStateClone.Status = newStatus
|
||||
return r.Client.Status().Update(context.TODO(), persistentPodStateClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("update PersistentPodState(%s/%s) status failed: %s", pps.Namespace, pps.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to update PersistentPodState status", "persistentPodState", klog.KObj(pps))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("update PersistentPodState(%s/%s) status pods(%d) -> pod(%d) success", pps.Namespace, pps.Name, len(pps.Status.PodStates), len(newStatus.PodStates))
|
||||
klog.V(3).InfoS("Updated PersistentPodState status success", "persistentPodState", klog.KObj(pps), "oldPodStatesCount", len(pps.Status.PodStates), "newPodStatesCount", len(newStatus.PodStates))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -304,7 +303,7 @@ func (r *ReconcilePersistentPodState) getPodsAndStatefulset(persistentPodState *
|
|||
ref := persistentPodState.Spec.TargetReference
|
||||
workload, err := r.finder.GetScaleAndSelectorForRef(ref.APIVersion, ref.Kind, persistentPodState.Namespace, ref.Name, "")
|
||||
if err != nil {
|
||||
klog.Errorf("persistentPodState(%s/%s) fetch statefulSet(%s) failed: %s", persistentPodState.Namespace, persistentPodState.Name, ref.Name, err.Error())
|
||||
klog.ErrorS(err, "PersistentPodState fetch statefulSet failed", "persistentPodState", klog.KObj(persistentPodState), "statefulSetName", ref.Name)
|
||||
return nil, nil, err
|
||||
} else if workload == nil {
|
||||
return nil, nil, nil
|
||||
|
@ -316,7 +315,7 @@ func (r *ReconcilePersistentPodState) getPodsAndStatefulset(persistentPodState *
|
|||
// DisableDeepCopy:true, indicates must be deep copy before update pod objection
|
||||
pods, _, err := r.finder.GetPodsForRef(ref.APIVersion, ref.Kind, persistentPodState.Namespace, ref.Name, true)
|
||||
if err != nil {
|
||||
klog.Errorf("list persistentPodState(%s/%s) pods failed: %s", persistentPodState.Namespace, persistentPodState.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to list persistentPodState pods", "persistentPodState", klog.KObj(persistentPodState))
|
||||
return nil, nil, err
|
||||
}
|
||||
matchedPods := make(map[string]*corev1.Pod, len(pods))
|
||||
|
@ -337,7 +336,7 @@ func (r *ReconcilePersistentPodState) getPodState(pod *corev1.Pod, nodeTopologyK
|
|||
node := &corev1.Node{}
|
||||
err := r.Get(context.TODO(), client.ObjectKey{Name: pod.Spec.NodeName}, node)
|
||||
if err != nil {
|
||||
klog.Errorf("fetch pod(%s/%s) node(%s) error %s", pod.Namespace, pod.Name, pod.Spec.NodeName, err.Error())
|
||||
klog.ErrorS(err, "Fetched node of pod error", "pod", klog.KObj(pod), "nodeName", pod.Spec.NodeName)
|
||||
return podState, err
|
||||
}
|
||||
podState.NodeName = pod.Spec.NodeName
|
||||
|
@ -374,7 +373,7 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
// example for generate#apps/v1#StatefulSet#echoserver
|
||||
arr := strings.Split(req.Name, "#")
|
||||
if len(arr) != 4 {
|
||||
klog.Warningf("Reconcile PersistentPodState workload(%s) is invalid", req.Name)
|
||||
klog.InfoS("Reconcile PersistentPodState workload is invalid", "workload", req)
|
||||
return nil
|
||||
}
|
||||
// fetch workload
|
||||
|
@ -383,7 +382,7 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
if err != nil {
|
||||
return err
|
||||
} else if workload == nil {
|
||||
klog.Warningf("Reconcile PersistentPodState workload(%s) is Not Found", req.Name)
|
||||
klog.InfoS("Reconcile PersistentPodState workload is Not Found", "workload", req)
|
||||
return nil
|
||||
}
|
||||
// fetch persistentPodState crd
|
||||
|
@ -401,7 +400,7 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
if workload.Metadata.Annotations[appsv1alpha1.AnnotationAutoGeneratePersistentPodState] == "true" {
|
||||
if workload.Metadata.Annotations[appsv1alpha1.AnnotationRequiredPersistentTopology] == "" &&
|
||||
workload.Metadata.Annotations[appsv1alpha1.AnnotationPreferredPersistentTopology] == "" {
|
||||
klog.Warningf("statefulSet(%s/%s) persistentPodState annotation is incomplete", workload.Metadata.Namespace, workload.Name)
|
||||
klog.InfoS("StatefulSet persistentPodState annotation was incomplete", "statefulSet", klog.KRef(workload.Metadata.Namespace, workload.Name))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -414,7 +413,7 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
}
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("create StatefulSet(%s/%s) persistentPodState(%s) success", ns, name, util.DumpJSON(newObj))
|
||||
klog.V(3).InfoS("Created StatefulSet persistentPodState success", "statefulSet", klog.KRef(ns, name), "persistentPodState", klog.KObj(newObj))
|
||||
return nil
|
||||
}
|
||||
// compare with old object
|
||||
|
@ -429,11 +428,11 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
objClone.Spec = *newObj.Spec.DeepCopy()
|
||||
return r.Client.Update(context.TODO(), objClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("update persistentPodState(%s/%s) failed: %s", newObj.Namespace, newObj.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to update persistentPodState", "persistentPodState", klog.KObj(newObj))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("update persistentPodState(%s/%s) from(%s) -> to(%s) success", newObj.Namespace, newObj.Name,
|
||||
util.DumpJSON(oldObj.Spec), util.DumpJSON(newObj.Spec))
|
||||
klog.V(3).InfoS("Updated persistentPodState success", "persistentPodState", klog.KObj(newObj), "oldSpec",
|
||||
util.DumpJSON(oldObj.Spec), "newSpec", util.DumpJSON(newObj.Spec))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -444,7 +443,7 @@ func (r *ReconcilePersistentPodState) autoGeneratePersistentPodState(req ctrl.Re
|
|||
if err = r.Delete(context.TODO(), oldObj); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("delete StatefulSet(%s/%s) persistentPodState done", ns, name)
|
||||
klog.V(3).InfoS("Deleted StatefulSet persistentPodState", "statefulSet", klog.KRef(ns, name))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ func (p *enqueueRequestForPod) fetchPersistentPodState(pod *corev1.Pod) *appsv1a
|
|||
ref := metav1.GetControllerOf(pod)
|
||||
whiteList, err := configuration.GetPPSWatchCustomWorkloadWhiteList(p.client)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get persistent pod state config white list, error: %v\n", err.Error())
|
||||
klog.ErrorS(err, "Failed to get persistent pod state config white list")
|
||||
return nil
|
||||
}
|
||||
if ref == nil || !whiteList.ValidateAPIVersionAndKind(ref.APIVersion, ref.Kind) {
|
||||
|
@ -105,7 +105,7 @@ func (p *enqueueRequestForPod) fetchPersistentPodState(pod *corev1.Pod) *appsv1a
|
|||
if ppsName != "" {
|
||||
obj := &appsv1alpha1.PersistentPodState{}
|
||||
if err := p.reader.Get(context.TODO(), client.ObjectKey{Namespace: pod.Namespace, Name: ppsName}, obj); err != nil {
|
||||
klog.Errorf("fetch pod(%s/%s) PersistentPodState(%s) failed: %s", pod.Namespace, pod.Name, ppsName, err.Error())
|
||||
klog.ErrorS(err, "Failed to fetch pod PersistentPodState", "pod", klog.KObj(pod), "persistentPodStateName", ppsName)
|
||||
return nil
|
||||
}
|
||||
return obj
|
||||
|
@ -202,7 +202,7 @@ func enqueuePersistentPodStateRequest(q workqueue.RateLimitingInterface, apiVers
|
|||
Namespace: ns,
|
||||
Name: qName,
|
||||
}})
|
||||
klog.V(3).Infof("enqueuePersistentPodStateRequest(%s)", qName)
|
||||
klog.V(3).InfoS("Enqueue PersistentPodState request", "qName", qName)
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &enqueueRequestForKruiseStatefulSet{}
|
||||
|
|
|
@ -147,7 +147,7 @@ func (r *ReconcilePodProbeMarker) syncPodProbeMarker(ns, name string) error {
|
|||
}
|
||||
pods, err := r.getMatchingPods(ppm)
|
||||
if err != nil {
|
||||
klog.Errorf("PodProbeMarker ppm(%s/%s) list pods failed: %s", ppm.Namespace, ppm.Name, err.Error())
|
||||
klog.ErrorS(err, "PodProbeMarker listed pods failed", "podProbeMarker", klog.KObj(ppm))
|
||||
return err
|
||||
}
|
||||
// remove podProbe from NodePodProbe.Spec
|
||||
|
@ -158,10 +158,10 @@ func (r *ReconcilePodProbeMarker) syncPodProbeMarker(ns, name string) error {
|
|||
if !controllerutil.ContainsFinalizer(ppm, PodProbeMarkerFinalizer) {
|
||||
err = util.UpdateFinalizer(r.Client, ppm, util.AddFinalizerOpType, PodProbeMarkerFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("add PodProbeMarker(%s/%s) finalizer failed: %s", ppm.Namespace, ppm.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to add PodProbeMarker finalizer", "podProbeMarker", klog.KObj(ppm))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("add PodProbeMarker(%s/%s) finalizer success", ppm.Namespace, ppm.Name)
|
||||
klog.V(3).InfoS("Added PodProbeMarker finalizer success", "podProbeMarker", klog.KObj(ppm))
|
||||
}
|
||||
|
||||
groupByNode := make(map[string][]*corev1.Pod)
|
||||
|
@ -180,7 +180,7 @@ func (r *ReconcilePodProbeMarker) syncPodProbeMarker(ns, name string) error {
|
|||
ppmClone := ppm.DeepCopy()
|
||||
if err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: ppm.Namespace, Name: ppm.Name}, ppmClone); err != nil {
|
||||
klog.Errorf("error getting updated podProbeMarker %s from client", ppm.Name)
|
||||
klog.ErrorS(err, "Failed to get updated PodProbeMarker from client", "podProbeMarker", klog.KObj(ppm))
|
||||
}
|
||||
if ppmClone.Status.ObservedGeneration == ppmClone.Generation && int(ppmClone.Status.MatchedPods) == len(pods) {
|
||||
return nil
|
||||
|
@ -189,10 +189,10 @@ func (r *ReconcilePodProbeMarker) syncPodProbeMarker(ns, name string) error {
|
|||
ppmClone.Status.MatchedPods = int64(len(pods))
|
||||
return r.Client.Status().Update(context.TODO(), ppmClone)
|
||||
}); err != nil {
|
||||
klog.Errorf("PodProbeMarker(%s/%s) update status failed: %s", ppm.Namespace, ppm.Name, err.Error())
|
||||
klog.ErrorS(err, "PodProbeMarker update status failed", "podProbeMarker", klog.KObj(ppm))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("PodProbeMarker(%s/%s) update status(%s) success", ppm.Namespace, ppm.Name, util.DumpJSON(ppmClone.Status))
|
||||
klog.V(3).InfoS("PodProbeMarker update status success", "podProbeMarker", klog.KObj(ppm), "status", util.DumpJSON(ppmClone.Status))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -207,10 +207,10 @@ func (r *ReconcilePodProbeMarker) handlerPodProbeMarkerFinalizer(ppm *appsv1alph
|
|||
}
|
||||
err := util.UpdateFinalizer(r.Client, ppm, util.RemoveFinalizerOpType, PodProbeMarkerFinalizer)
|
||||
if err != nil {
|
||||
klog.Errorf("remove PodProbeMarker(%s/%s) finalizer failed: %s", ppm.Namespace, ppm.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to remove PodProbeMarker finalizer", "podProbeMarker", klog.KObj(ppm))
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("remove PodProbeMarker(%s/%s) finalizer success", ppm.Namespace, ppm.Name)
|
||||
klog.V(3).InfoS("Removed PodProbeMarker finalizer success", "podProbeMarker", klog.KObj(ppm))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -219,11 +219,11 @@ func (r *ReconcilePodProbeMarker) updateNodePodProbes(ppm *appsv1alpha1.PodProbe
|
|||
err := r.Get(context.TODO(), client.ObjectKey{Name: nodeName}, npp)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Warningf("PodProbeMarker ppm(%s/%s) NodePodProbe(%s) is Not Found", ppm.Namespace, ppm.Name, npp.Name)
|
||||
klog.InfoS("PodProbeMarker NodePodProbe was Not Found", "podProbeMarker", klog.KObj(ppm), "nodePodProbe", klog.KObj(npp))
|
||||
return nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
klog.Errorf("PodProbeMarker ppm(%s/%s) get NodePodProbe(%s) failed: %s", ppm.Namespace, ppm.Name, nodeName, err.Error())
|
||||
klog.ErrorS(err, "PodProbeMarker got NodePodProbe failed", "podProbeMarker", klog.KObj(ppm), "nodePodProbe", klog.KObj(npp))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -242,14 +242,14 @@ func (r *ReconcilePodProbeMarker) updateNodePodProbes(ppm *appsv1alpha1.PodProbe
|
|||
if probe.Probe.TCPSocket != nil {
|
||||
probe, err = convertTcpSocketProbeCheckPort(probe, pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert tcpSocket probe port, err: %v, pod: %v/%v", err, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to convert tcpSocket probe port", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if probe.Probe.HTTPGet != nil {
|
||||
probe, err = convertHttpGetProbeCheckPort(probe, pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert httpGet probe port, err: %v, pod: %v/%v", err, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to convert httpGet probe port", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -266,14 +266,14 @@ func (r *ReconcilePodProbeMarker) updateNodePodProbes(ppm *appsv1alpha1.PodProbe
|
|||
if probe.Probe.TCPSocket != nil {
|
||||
probe, err = convertTcpSocketProbeCheckPort(probe, pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert tcpSocket probe port, err: %v, pod: %v/%v", err, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to convert tcpSocket probe port", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if probe.Probe.HTTPGet != nil {
|
||||
probe, err = convertHttpGetProbeCheckPort(probe, pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert httpGet probe port, err: %v, pod: %v/%v", err, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to convert httpGet probe port", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -292,11 +292,11 @@ func (r *ReconcilePodProbeMarker) updateNodePodProbes(ppm *appsv1alpha1.PodProbe
|
|||
}
|
||||
err = r.Update(context.TODO(), npp)
|
||||
if err != nil {
|
||||
klog.Errorf("PodProbeMarker ppm(%s/%s) Update NodePodProbe(%s) failed: %s", ppm.Namespace, ppm.Name, npp.Name, err.Error())
|
||||
klog.ErrorS(err, "PodProbeMarker updated NodePodProbe failed", "podProbeMarker", klog.KObj(ppm), "nodePodProbeName", npp.Name)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("PodProbeMarker ppm(%s/%s) update NodePodProbe(%s) from(%s) -> to(%s) success",
|
||||
ppm.Namespace, ppm.Name, npp.Name, util.DumpJSON(oldSpec), util.DumpJSON(npp.Spec))
|
||||
klog.V(3).InfoS("PodProbeMarker updated NodePodProbe success", "podProbeMarker", klog.KObj(ppm), "nodePodProbeName", npp.Name,
|
||||
"oldSpec", util.DumpJSON(oldSpec), "newSpec", util.DumpJSON(npp.Spec))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -409,9 +409,9 @@ func (r *ReconcilePodProbeMarker) removePodProbeFromNodePodProbe(ppmName, nppNam
|
|||
npp.Spec = newSpec
|
||||
err = r.Update(context.TODO(), npp)
|
||||
if err != nil {
|
||||
klog.Errorf("NodePodProbe(%s) remove PodProbe(%s) failed: %s", nppName, ppmName, err.Error())
|
||||
klog.ErrorS(err, "NodePodProbe removed PodProbeMarker failed", "nodePodProbeName", nppName, "podProbeMarkerName", ppmName)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("NodePodProbe(%s) remove PodProbe(%s) success", nppName, ppmName)
|
||||
klog.V(3).InfoS("NodePodProbe removed PodProbeMarker success", "nodePodProbeName", nppName, "podProbeMarkerName", ppmName)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ func (p *enqueueRequestForPod) Update(ctx context.Context, evt event.UpdateEvent
|
|||
newInitialCondition.Status == corev1.ConditionTrue) || old.Status.PodIP != new.Status.PodIP {
|
||||
ppms, err := p.getPodProbeMarkerForPod(new)
|
||||
if err != nil {
|
||||
klog.Errorf("List PodProbeMarker fail: %s", err.Error())
|
||||
klog.ErrorS(err, "Failed to List PodProbeMarker")
|
||||
return
|
||||
}
|
||||
for _, ppm := range ppms {
|
||||
|
|
|
@ -101,12 +101,12 @@ type ReconcilePodReadiness struct {
|
|||
|
||||
func (r *ReconcilePodReadiness) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
start := time.Now()
|
||||
klog.V(3).Infof("Starting to process Pod %v", request.NamespacedName)
|
||||
klog.V(3).InfoS("Starting to process Pod", "pod", request)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to process Pod %v, elapsedTime %v, error: %v", request.NamespacedName, time.Since(start), err)
|
||||
klog.ErrorS(err, "Failed to process Pod", "pod", request, "elapsedTime", time.Since(start))
|
||||
} else {
|
||||
klog.Infof("Finish to process Pod %v, elapsedTime %v", request.NamespacedName, time.Since(start))
|
||||
klog.InfoS("Finished to process Pod", "pod", request, "elapsedTime", time.Since(start))
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
klog.Infof("add podunavailablebudget reconcile.Reconciler success")
|
||||
klog.InfoS("Added podunavailablebudget reconcile.Reconciler success")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,7 @@ func (r *ReconcilePodUnavailableBudget) Reconcile(_ context.Context, req ctrl.Re
|
|||
pub := &policyv1alpha1.PodUnavailableBudget{}
|
||||
err := r.Get(context.TODO(), req.NamespacedName, pub)
|
||||
if (err != nil && errors.IsNotFound(err)) || (err == nil && !pub.DeletionTimestamp.IsZero()) {
|
||||
klog.V(3).Infof("pub(%s/%s) is Deletion in this time", req.Namespace, req.Name)
|
||||
klog.V(3).InfoS("PodUnavailableBudget is Deletion in this time", "podUnavailableBudget", req)
|
||||
if cacheErr := util.GlobalCache.Delete(&policyv1alpha1.PodUnavailableBudget{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyv1alpha1.GroupVersion.String(),
|
||||
|
@ -227,7 +227,7 @@ func (r *ReconcilePodUnavailableBudget) Reconcile(_ context.Context, req ctrl.Re
|
|||
Namespace: req.Namespace,
|
||||
},
|
||||
}); cacheErr != nil {
|
||||
klog.Errorf("Delete cache failed for PodUnavailableBudget(%s/%s): %s", req.Namespace, req.Name, err.Error())
|
||||
klog.ErrorS(err, "Deleted cache failed for PodUnavailableBudget", "podUnavailableBudget", req)
|
||||
}
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
|
@ -237,7 +237,7 @@ func (r *ReconcilePodUnavailableBudget) Reconcile(_ context.Context, req ctrl.Re
|
|||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("begin to process podUnavailableBudget(%s/%s)", pub.Namespace, pub.Name)
|
||||
klog.V(3).InfoS("Began to process PodUnavailableBudget", "podUnavailableBudget", klog.KObj(pub))
|
||||
recheckTime, err := r.syncPodUnavailableBudget(pub)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
|
@ -259,12 +259,12 @@ func (r *ReconcilePodUnavailableBudget) syncPodUnavailableBudget(pub *policyv1al
|
|||
} else {
|
||||
// patch related-pub annotation in all pods of workload
|
||||
if err = r.patchRelatedPubAnnotationInPod(pub, pods); err != nil {
|
||||
klog.Errorf("pub(%s/%s) patch pod annotation failed: %s", pub.Namespace, pub.Name, err.Error())
|
||||
klog.ErrorS(err, "PodUnavailableBudget patch pod annotation failed", "podUnavailableBudget", klog.KObj(pub))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).Infof("pub(%s/%s) controller pods(%d) expectedCount(%d)", pub.Namespace, pub.Name, len(pods), expectedCount)
|
||||
klog.V(3).InfoS("PodUnavailableBudget controller pods expectedCount", "podUnavailableBudget", klog.KObj(pub), "podCount", len(pods), "expectedCount", expectedCount)
|
||||
desiredAvailable, err := r.getDesiredAvailableForPub(pub, expectedCount)
|
||||
if err != nil {
|
||||
r.recorder.Eventf(pub, corev1.EventTypeWarning, "CalculateExpectedPodCountFailed", "Failed to calculate the number of expected pods: %v", err)
|
||||
|
@ -287,14 +287,14 @@ func (r *ReconcilePodUnavailableBudget) syncPodUnavailableBudget(pub *policyv1al
|
|||
pubClone, err = kubeClient.GetGenericClient().KruiseClient.PolicyV1alpha1().
|
||||
PodUnavailableBudgets(pub.Namespace).Get(context.TODO(), pub.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Get PodUnavailableBudget(%s/%s) failed from etcd: %s", pub.Namespace, pub.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to get PodUnavailableBudget from etcd", "podUnavailableBudget", klog.KObj(pub))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// compare local cache and informer cache, then get the newer one
|
||||
item, _, err := util.GlobalCache.Get(pub)
|
||||
if err != nil {
|
||||
klog.Errorf("Get PodUnavailableBudget(%s/%s) cache failed: %s", pub.Namespace, pub.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to get PodUnavailableBudget cache", "podUnavailableBudget", klog.KObj(pub))
|
||||
}
|
||||
if localCached, ok := item.(*policyv1alpha1.PodUnavailableBudget); ok {
|
||||
pubClone = localCached.DeepCopy()
|
||||
|
@ -331,10 +331,10 @@ func (r *ReconcilePodUnavailableBudget) syncPodUnavailableBudget(pub *policyv1al
|
|||
conflictTimes++
|
||||
return updateErr
|
||||
})
|
||||
klog.V(3).Infof("Controller cost of pub(%s/%s): conflict times %v, cost of Get %v, cost of Update %v",
|
||||
pub.Namespace, pub.Name, conflictTimes, costOfGet, costOfUpdate)
|
||||
klog.V(3).InfoS("Controller cost of PodUnavailableBudget", "podUnavailableBudget", klog.KObj(pub), "conflictTimes", conflictTimes,
|
||||
"costOfGet", costOfGet, "costOfUpdate", costOfUpdate)
|
||||
if err != nil {
|
||||
klog.Errorf("update pub(%s/%s) status failed: %s", pub.Namespace, pub.Name, err.Error())
|
||||
klog.ErrorS(err, "Failed to update PodUnavailableBudget status", "podUnavailableBudget", klog.KObj(pub))
|
||||
}
|
||||
return recheckTime, err
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ func (r *ReconcilePodUnavailableBudget) patchRelatedPubAnnotationInPod(pub *poli
|
|||
return err
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("patch pub(%s/%s) old pods(%d) related-pub annotation success", pub.Namespace, pub.Name, len(updatedPods))
|
||||
klog.V(3).InfoS("Patched PodUnavailableBudget old pods related-pub annotation success", "podUnavailableBudget", klog.KObj(pub), "podCount", len(updatedPods))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -498,9 +498,9 @@ func (r *ReconcilePodUnavailableBudget) updatePubStatus(pub *policyv1alpha1.PodU
|
|||
return err
|
||||
}
|
||||
if err = util.GlobalCache.Add(pub); err != nil {
|
||||
klog.Errorf("Add cache failed for PodUnavailableBudget(%s/%s): %s", pub.Namespace, pub.Name, err.Error())
|
||||
klog.ErrorS(err, "Added cache failed for PodUnavailableBudget", "podUnavailableBudget", klog.KObj(pub))
|
||||
}
|
||||
klog.V(3).Infof("pub(%s/%s) update status(disruptedPods:%d, unavailablePods:%d, expectedCount:%d, desiredAvailable:%d, currentAvailable:%d, unavailableAllowed:%d)",
|
||||
pub.Namespace, pub.Name, len(disruptedPods), len(unavailablePods), expectedCount, desiredAvailable, currentAvailable, unavailableAllowed)
|
||||
klog.V(3).InfoS("PodUnavailableBudget update status", "podUnavailableBudget", klog.KObj(pub), "disruptedPods", len(disruptedPods), "unavailablePods", len(unavailablePods),
|
||||
"expectedCount", expectedCount, "desiredAvailable", desiredAvailable, "currentAvailable", currentAvailable, "unavailableAllowed", unavailableAllowed)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func (p *enqueueRequestForPod) addPod(q workqueue.RateLimitingInterface, obj run
|
|||
if pub == nil {
|
||||
return
|
||||
}
|
||||
klog.V(3).Infof("add pod(%s/%s) reconcile pub(%s/%s)", pod.Namespace, pod.Name, pub.Namespace, pub.Name)
|
||||
klog.V(3).InfoS("Added Pod reconcile PodUnavailableBudget", "pod", klog.KObj(pod), "podUnavailableBudget", klog.KObj(pub))
|
||||
q.Add(reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: pub.Name,
|
||||
|
@ -162,7 +162,7 @@ func isPodAvailableChanged(oldPod, newPod *corev1.Pod, pub *policyv1alpha1.PodUn
|
|||
// If the pod's deletion timestamp is set, remove endpoint from ready address.
|
||||
if oldPod.DeletionTimestamp.IsZero() && !newPod.DeletionTimestamp.IsZero() {
|
||||
enqueueDelayTime = time.Second * 5
|
||||
klog.V(3).Infof("pod(%s/%s) DeletionTimestamp changed, and reconcile pub(%s/%s) delayTime(5s)", newPod.Namespace, newPod.Name, pub.Namespace, pub.Name)
|
||||
klog.V(3).InfoS("Pod DeletionTimestamp changed, and reconcile PodUnavailableBudget after 5s", "pod", klog.KObj(newPod), "podUnavailableBudget", klog.KObj(pub))
|
||||
return true, enqueueDelayTime
|
||||
// oldPod Deletion is set, then no reconcile
|
||||
} else if !oldPod.DeletionTimestamp.IsZero() {
|
||||
|
@ -177,8 +177,8 @@ func isPodAvailableChanged(oldPod, newPod *corev1.Pod, pub *policyv1alpha1.PodUn
|
|||
oldReady := control.IsPodReady(oldPod) && control.IsPodStateConsistent(oldPod)
|
||||
newReady := control.IsPodReady(newPod) && control.IsPodStateConsistent(newPod)
|
||||
if oldReady != newReady {
|
||||
klog.V(3).Infof("pod(%s/%s) ConsistentAndReady changed(from %v to %v), and reconcile pub(%s/%s)",
|
||||
newPod.Namespace, newPod.Name, oldReady, newReady, pub.Namespace, pub.Name)
|
||||
klog.V(3).InfoS("Pod ConsistentAndReady changed, and reconcile PodUnavailableBudget", "pod", klog.KObj(newPod), "oldReady", oldReady,
|
||||
"newReady", newReady, "podUnavailableBudget", klog.KObj(pub))
|
||||
return true, enqueueDelayTime
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ func (e *SetEnqueueRequestForPUB) addSetRequest(object client.Object, q workqueu
|
|||
// fetch matched pub
|
||||
pubList := &policyv1alpha1.PodUnavailableBudgetList{}
|
||||
if err := e.mgr.GetClient().List(context.TODO(), pubList, &client.ListOptions{Namespace: namespace}); err != nil {
|
||||
klog.Errorf("SetEnqueueRequestForPUB list pub failed: %s", err.Error())
|
||||
klog.ErrorS(err, "SetEnqueueRequestForPUB list pub failed")
|
||||
return
|
||||
}
|
||||
var matched policyv1alpha1.PodUnavailableBudget
|
||||
|
@ -279,6 +279,6 @@ func (e *SetEnqueueRequestForPUB) addSetRequest(object client.Object, q workqueu
|
|||
Namespace: matched.Namespace,
|
||||
},
|
||||
})
|
||||
klog.V(3).Infof("workload(%s/%s) changed, and reconcile pub(%s/%s)",
|
||||
namespace, targetRef.Name, matched.Namespace, matched.Name)
|
||||
klog.V(3).InfoS("Workload changed, and reconcile PodUnavailableBudget",
|
||||
"wordload", klog.KRef(namespace, targetRef.Name), "podUnavailableBudget", klog.KRef(matched.Namespace, matched.Name))
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
oldObj := e.ObjectOld.(*appsv1alpha1.ResourceDistribution)
|
||||
newObj := e.ObjectNew.(*appsv1alpha1.ResourceDistribution)
|
||||
if !reflect.DeepEqual(oldObj.Spec, newObj.Spec) {
|
||||
klog.V(3).Infof("Observed updated Spec for ResourceDistribution: %s/%s", oldObj.Namespace, newObj.Name)
|
||||
klog.V(3).InfoS("Observed updated Spec for ResourceDistribution", "resourceDistribution", klog.KObj(oldObj))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -171,7 +171,7 @@ type ReconcileResourceDistribution struct {
|
|||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
|
||||
func (r *ReconcileResourceDistribution) Reconcile(_ context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.V(3).Infof("ResourceDistribution(%s) begin to reconcile", req.NamespacedName.Name)
|
||||
klog.V(3).InfoS("ResourceDistribution begin to reconcile", "resourceDistribution", req)
|
||||
// fetch resourcedistribution instance as distributor
|
||||
distributor := &appsv1alpha1.ResourceDistribution{}
|
||||
if err := r.Client.Get(context.TODO(), req.NamespacedName, distributor); err != nil {
|
||||
|
@ -191,13 +191,13 @@ func (r *ReconcileResourceDistribution) Reconcile(_ context.Context, req ctrl.Re
|
|||
func (r *ReconcileResourceDistribution) doReconcile(distributor *appsv1alpha1.ResourceDistribution) (ctrl.Result, error) {
|
||||
resource, errs := utils.DeserializeResource(&distributor.Spec.Resource, field.NewPath("resource"))
|
||||
if len(errs) != 0 || resource == nil {
|
||||
klog.Errorf("DeserializeResource error: %v, name: %s", errs.ToAggregate(), distributor.Name)
|
||||
klog.ErrorS(errs.ToAggregate(), "DeserializeResource error", "resourceDistribution", klog.KObj(distributor))
|
||||
return reconcile.Result{}, nil // no need to retry
|
||||
}
|
||||
|
||||
matchedNamespaces, unmatchedNamespaces, err := listNamespacesForDistributor(r.Client, &distributor.Spec.Targets)
|
||||
if err != nil {
|
||||
klog.Errorf("listNamespaceForDistributor error: %v, name: %s", err, distributor.Name)
|
||||
klog.ErrorS(err, "Failed to list namespace for ResourceDistributor", "resourceDistribution", klog.KObj(distributor))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ func (r *ReconcileResourceDistribution) distributeResource(distributor *appsv1al
|
|||
oldResource.SetGroupVersionKind(resource.GetObjectKind().GroupVersionKind())
|
||||
getErr := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: resourceName}, oldResource)
|
||||
if getErr != nil && !errors.IsNotFound(getErr) {
|
||||
klog.Errorf("Error occurred when getting resource in namespace %s, err: %v, name: %s", namespace, getErr, distributor.Name)
|
||||
klog.ErrorS(getErr, "Error occurred when getting resource in namespace", "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: getErr,
|
||||
namespace: namespace,
|
||||
|
@ -252,20 +252,20 @@ func (r *ReconcileResourceDistribution) distributeResource(distributor *appsv1al
|
|||
if getErr != nil && errors.IsNotFound(getErr) {
|
||||
newResource := makeResourceObject(distributor, namespace, resource, resourceHashCode, nil)
|
||||
if createErr := r.Client.Create(context.TODO(), newResource.(client.Object)); createErr != nil {
|
||||
klog.Errorf("Error occurred when creating resource in namespace %s, err: %v, name: %s", namespace, createErr, distributor.Name)
|
||||
klog.ErrorS(createErr, "Error occurred when creating resource in namespace", "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: createErr,
|
||||
namespace: namespace,
|
||||
conditionID: CreateConditionID,
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("ResourceDistribution(%s) created (%s/%s) in namespaces %s", distributor.Name, resourceKind, resourceName, namespace)
|
||||
klog.V(3).InfoS("ResourceDistribution created resource in namespace", "resourceDistribution", klog.KObj(distributor), "resourceKind", resourceKind, "resourceName", resourceName, "namespace", namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 3. check conflict
|
||||
if !isControlledByDistributor(oldResource, distributor) {
|
||||
klog.Errorf("Conflict with existing resource(%s/%s) in namespaces %s, name: %s", resourceKind, resourceName, namespace, distributor.Name)
|
||||
klog.InfoS("Conflict with existing resource in namespace", "resourceKind", resourceKind, "resourceName", resourceName, "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: fmt.Errorf("conflict with existing resources because of the same namespace, group, version, kind and name"),
|
||||
namespace: namespace,
|
||||
|
@ -277,14 +277,14 @@ func (r *ReconcileResourceDistribution) distributeResource(distributor *appsv1al
|
|||
if needToUpdate(oldResource, utils.ConvertToUnstructured(resource)) {
|
||||
newResource := makeResourceObject(distributor, namespace, resource, resourceHashCode, oldResource)
|
||||
if updateErr := r.Client.Update(context.TODO(), newResource.(client.Object)); updateErr != nil {
|
||||
klog.Errorf("Error occurred when updating resource in namespace %s, err: %v, name: %s", namespace, updateErr, distributor.Name)
|
||||
klog.ErrorS(updateErr, "Error occurred when updating resource in namespace", "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: updateErr,
|
||||
namespace: namespace,
|
||||
conditionID: UpdateConditionID,
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("ResourceDistribution(%s) updated (%s/%s) for namespaces %s", distributor.Name, resourceKind, resourceName, namespace)
|
||||
klog.V(3).InfoS("ResourceDistribution updated for namespaces", "resourceDistribution", klog.KObj(distributor), "resourceKind", resourceKind, "resourceName", resourceName, "namespace", namespace)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -309,7 +309,7 @@ func (r *ReconcileResourceDistribution) cleanResource(distributor *appsv1alpha1.
|
|||
if errors.IsNotFound(getErr) {
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("Error occurred when getting resource in namespace %s, err: %v, name: %s", namespace, getErr, distributor.Name)
|
||||
klog.ErrorS(getErr, "Error occurred when getting resource in namespace", "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: getErr,
|
||||
namespace: namespace,
|
||||
|
@ -324,14 +324,14 @@ func (r *ReconcileResourceDistribution) cleanResource(distributor *appsv1alpha1.
|
|||
|
||||
// 3. else clean the resource
|
||||
if deleteErr := r.Client.Delete(context.TODO(), oldResource); deleteErr != nil && !errors.IsNotFound(deleteErr) {
|
||||
klog.Errorf("Error occurred when deleting resource in namespace %s from client, err: %v, name: %s", namespace, deleteErr, distributor.Name)
|
||||
klog.ErrorS(deleteErr, "Error occurred when deleting resource in namespace from client", "namespace", namespace, "resourceDistribution", klog.KObj(distributor))
|
||||
return &UnexpectedError{
|
||||
err: deleteErr,
|
||||
namespace: namespace,
|
||||
conditionID: DeleteConditionID,
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("ResourceDistribution(%s) deleted (%s/%s) in namespaces %s", distributor.Name, resourceKind, resourceName, namespace)
|
||||
klog.V(3).InfoS("ResourceDistribution deleted in namespace", "resourceDistribution", klog.KObj(distributor), "resourceKind", resourceKind, "resourceName", resourceName, "namespace", namespace)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func (p *enqueueRequestForNamespace) addNamespace(q workqueue.RateLimitingInterf
|
|||
|
||||
resourceDistributions, err := p.getNamespaceMatchedResourceDistributions(namespace, fn)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get the ResourceDistributions related with namespace %s, err: %v", namespace.Name, err)
|
||||
klog.ErrorS(err, "Unable to get the ResourceDistributions related with namespace", "namespace", namespace.Name)
|
||||
return
|
||||
}
|
||||
addMatchedResourceDistributionToWorkQueue(q, resourceDistributions)
|
||||
|
|
|
@ -90,7 +90,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
oldScS := e.ObjectOld.(*appsv1alpha1.SidecarSet)
|
||||
newScS := e.ObjectNew.(*appsv1alpha1.SidecarSet)
|
||||
if oldScS.GetGeneration() != newScS.GetGeneration() {
|
||||
klog.V(3).Infof("Observed updated Spec for SidecarSet: %s/%s", newScS.GetNamespace(), newScS.GetName())
|
||||
klog.V(3).InfoS("Observed updated Spec for SidecarSet", "sidecarSet", klog.KObj(newScS))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -138,6 +138,6 @@ func (r *ReconcileSidecarSet) Reconcile(_ context.Context, request reconcile.Req
|
|||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("begin to process sidecarset %v for reconcile", sidecarSet.Name)
|
||||
klog.V(3).InfoS("Began to process sidecarset for reconcile", "sidecarSet", klog.KObj(sidecarSet))
|
||||
return r.processor.UpdateSidecarSet(sidecarSet)
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func (p *Processor) flipPodSidecarContainer(control sidecarcontrol.SidecarContro
|
|||
Name: podClone.Name,
|
||||
}
|
||||
if err := p.Client.Get(context.TODO(), key, podClone); err != nil {
|
||||
klog.Errorf("error getting updated pod(%s/%s) from client", podClone.Namespace, podClone.Name)
|
||||
klog.ErrorS(err, "Failed to get updated pod from client", "pod", klog.KObj(podClone))
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
|
@ -83,8 +83,8 @@ func flipPodSidecarContainerDo(control sidecarcontrol.SidecarControl, pod *corev
|
|||
}
|
||||
// flip the empty sidecar container image
|
||||
containerNeedFlip := containersInPod[emptyContainer]
|
||||
klog.V(3).Infof("try to reset %v/%v/%v from %s to empty(%s)", pod.Namespace, pod.Name, containerNeedFlip.Name,
|
||||
containerNeedFlip.Image, sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage)
|
||||
klog.V(3).InfoS("Tried to reset container's image to empty", "pod", klog.KObj(pod), "containerName", containerNeedFlip.Name,
|
||||
"imageName", containerNeedFlip.Image, "hotUpgradeEmptyImageName", sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage)
|
||||
containerNeedFlip.Image = sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage
|
||||
changedContainer = append(changedContainer, containerNeedFlip.Name)
|
||||
// update pod sidecarSet version annotations
|
||||
|
|
|
@ -51,7 +51,7 @@ func (p *enqueueRequestForPod) deletePod(obj runtime.Object) {
|
|||
|
||||
sidecarSets, err := p.getPodMatchedSidecarSets(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get sidecarSets related with pod %s/%s, err: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Unable to get sidecarSets related with pod", "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
for _, sidecarSet := range sidecarSets {
|
||||
|
@ -69,12 +69,12 @@ func (p *enqueueRequestForPod) addPod(q workqueue.RateLimitingInterface, obj run
|
|||
|
||||
sidecarSets, err := p.getPodMatchedSidecarSets(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get sidecarSets related with pod %s/%s, err: %v", pod.Namespace, pod.Name, err)
|
||||
klog.ErrorS(err, "Unable to get sidecarSets related with pod", "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
|
||||
for _, sidecarSet := range sidecarSets {
|
||||
klog.V(3).Infof("Create pod(%s/%s) and reconcile sidecarSet(%s)", pod.Namespace, pod.Name, sidecarSet.Name)
|
||||
klog.V(3).InfoS("Created pod and reconcile sidecarSet", "pod", klog.KObj(pod), "sidecarSet", klog.KObj(sidecarSet))
|
||||
q.Add(reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: sidecarSet.Name,
|
||||
|
@ -92,7 +92,7 @@ func (p *enqueueRequestForPod) updatePod(q workqueue.RateLimitingInterface, old,
|
|||
|
||||
matchedSidecarSets, err := p.getPodMatchedSidecarSets(newPod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get sidecarSets of pod %s/%s, err: %v", newPod.Namespace, newPod.Name, err)
|
||||
klog.ErrorS(err, "Unable to get sidecarSets of pod", "pod", klog.KObj(newPod))
|
||||
return
|
||||
}
|
||||
for _, sidecarSet := range matchedSidecarSets {
|
||||
|
@ -128,7 +128,7 @@ func (p *enqueueRequestForPod) getPodMatchedSidecarSets(pod *corev1.Pod) ([]*app
|
|||
Name: sidecarSetName,
|
||||
}, sidecarSet); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(6).Infof("pod(%s/%s) sidecarSet(%s) Not Found", pod.Namespace, pod.Name, sidecarSetName)
|
||||
klog.V(6).InfoS("Could not find SidecarSet for Pod", "pod", klog.KObj(pod), "sidecarSetName", sidecarSetName)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@ -162,7 +162,7 @@ func (p *enqueueRequestForPod) getPodMatchedSidecarSets(pod *corev1.Pod) ([]*app
|
|||
func isPodStatusChanged(oldPod, newPod *corev1.Pod) bool {
|
||||
// If the pod's deletion timestamp is set, remove endpoint from ready address.
|
||||
if oldPod.DeletionTimestamp.IsZero() && !newPod.DeletionTimestamp.IsZero() {
|
||||
klog.V(3).Infof("pod(%s/%s) DeletionTimestamp changed, and reconcile sidecarSet", newPod.Namespace, newPod.Name)
|
||||
klog.V(3).InfoS("Pod DeletionTimestamp changed, and reconcile sidecarSet", "pod", klog.KObj(newPod))
|
||||
return true
|
||||
// oldPod Deletion is set, then no reconcile
|
||||
} else if !oldPod.DeletionTimestamp.IsZero() {
|
||||
|
@ -176,8 +176,7 @@ func isPodStatusChanged(oldPod, newPod *corev1.Pod) bool {
|
|||
oldReady := podutil.IsPodReady(oldPod)
|
||||
newReady := podutil.IsPodReady(newPod)
|
||||
if oldReady != newReady {
|
||||
klog.V(3).Infof("pod(%s/%s) Ready changed(from %v to %v), and reconcile sidecarSet",
|
||||
newPod.Namespace, newPod.Name, oldReady, newReady)
|
||||
klog.V(3).InfoS("Pod Ready changed, and reconcile SidecarSet", "pod", klog.KObj(newPod), "oldReady", oldReady, "newReady", newReady)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -191,16 +190,16 @@ func isPodConsistentChanged(oldPod, newPod *corev1.Pod, sidecarSet *appsv1alpha1
|
|||
oldConsistent := control.IsPodStateConsistent(oldPod, nil)
|
||||
newConsistent := control.IsPodStateConsistent(newPod, nil)
|
||||
if oldConsistent != newConsistent {
|
||||
klog.V(3).Infof("pod(%s/%s) sidecar containers consistent changed(from %v to %v), and reconcile sidecarSet(%s)",
|
||||
newPod.Namespace, newPod.Name, oldConsistent, newConsistent, sidecarSet.Name)
|
||||
klog.V(3).InfoS("Pod's sidecar containers consistent changed, and reconcile SidecarSet",
|
||||
"pod", klog.KObj(newPod), "oldConsistent", oldConsistent, "newConsistent", newConsistent, "sidecarSet", klog.KObj(sidecarSet))
|
||||
enqueueDelayTime = 5 * time.Second
|
||||
return true, enqueueDelayTime
|
||||
}
|
||||
|
||||
// If the pod's labels changed, and sidecarSet enable selector updateStrategy, should reconcile.
|
||||
if !reflect.DeepEqual(oldPod.Labels, newPod.Labels) && sidecarSet.Spec.UpdateStrategy.Selector != nil {
|
||||
klog.V(3).Infof("pod(%s/%s) Labels changed and sidecarSet (%s) enable selector upgrade strategy, "+
|
||||
"and reconcile sidecarSet", newPod.Namespace, newPod.Name, sidecarSet.Name)
|
||||
klog.V(3).InfoS("Pod's Labels changed and SidecarSet enable selector upgrade strategy, and reconcile SidecarSet",
|
||||
"pod", klog.KObj(newPod), "sidecarSet", klog.KObj(sidecarSet))
|
||||
return true, 0
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ func (p *Processor) UpdateSidecarSet(sidecarSet *appsv1alpha1.SidecarSet) (recon
|
|||
// 1. get matching pods with the sidecarSet
|
||||
pods, err := p.getMatchingPods(sidecarSet)
|
||||
if err != nil {
|
||||
klog.Errorf("sidecarSet get matching pods error, err: %v, name: %s", err, sidecarSet.Name)
|
||||
klog.ErrorS(err, "SidecarSet get matching pods error", "sidecarSet", klog.KObj(sidecarSet))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ func (p *Processor) UpdateSidecarSet(sidecarSet *appsv1alpha1.SidecarSet) (recon
|
|||
// return the latest revision that corresponds to this sidecarSet.
|
||||
latestRevision, collisionCount, err := p.registerLatestRevision(sidecarSet, pods)
|
||||
if latestRevision == nil {
|
||||
klog.Errorf("sidecarSet register the latest revision error, err: %v, name: %s", err, sidecarSet.Name)
|
||||
klog.ErrorS(err, "SidecarSet register the latest revision error", "sidecarSet", klog.KObj(sidecarSet))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ func (p *Processor) UpdateSidecarSet(sidecarSet *appsv1alpha1.SidecarSet) (recon
|
|||
}
|
||||
allUpdated, _, inflightPods := sidecarcontrol.UpdateExpectations.SatisfiedExpectations(sidecarSet.Name, sidecarcontrol.GetSidecarSetRevision(sidecarSet))
|
||||
if !allUpdated {
|
||||
klog.V(3).Infof("sidecarset %s matched pods has some update in flight: %v, will sync later", sidecarSet.Name, inflightPods)
|
||||
klog.V(3).InfoS("Sidecarset matched pods has some update in flight, will sync later", "sidecarSet", klog.KObj(sidecarSet), "pods", inflightPods)
|
||||
return reconcile.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
|
||||
|
@ -138,13 +138,13 @@ func (p *Processor) UpdateSidecarSet(sidecarSet *appsv1alpha1.SidecarSet) (recon
|
|||
|
||||
// 5. sidecarset already updates all matched pods, then return
|
||||
if isSidecarSetUpdateFinish(status) {
|
||||
klog.V(3).Infof("sidecarSet(%s) matched pods(number=%d) are latest, and don't need update", sidecarSet.Name, len(pods))
|
||||
klog.V(3).InfoS("SidecarSet matched pods were latest, and don't need update", "sidecarSet", klog.KObj(sidecarSet), "matchedPodCount", len(pods))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// 6. Paused indicates that the SidecarSet is paused to update matched pods
|
||||
if sidecarSet.Spec.UpdateStrategy.Paused {
|
||||
klog.V(3).Infof("sidecarSet is paused, name: %s", sidecarSet.Name)
|
||||
klog.V(3).InfoS("SidecarSet was paused", "sidecarSet", klog.KObj(sidecarSet))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ func (p *Processor) updatePods(control sidecarcontrol.SidecarControl, pods []*co
|
|||
upgradePods, notUpgradablePods := NewStrategy().GetNextUpgradePods(control, pods)
|
||||
for _, pod := range notUpgradablePods {
|
||||
if err := p.updatePodSidecarSetUpgradableCondition(sidecarset, pod, false); err != nil {
|
||||
klog.Errorf("update NotUpgradable PodCondition error, s:%s, pod:%s, err:%v", sidecarset.Name, pod.Name, err)
|
||||
klog.ErrorS(err, "Failed to update NotUpgradable PodCondition", "sidecarSet", klog.KObj(sidecarset), "pod", klog.KObj(pod))
|
||||
return err
|
||||
}
|
||||
// Since the pod sidecarSet hash is not updated here, it cannot be called ExpectUpdated
|
||||
|
@ -172,7 +172,7 @@ func (p *Processor) updatePods(control sidecarcontrol.SidecarControl, pods []*co
|
|||
}
|
||||
|
||||
if len(upgradePods) == 0 {
|
||||
klog.V(3).Infof("sidecarSet next update is nil, skip this round, name: %s", sidecarset.Name)
|
||||
klog.V(3).InfoS("SidecarSet next update was nil, skip this round", "sidecarSet", klog.KObj(sidecarset))
|
||||
return nil
|
||||
}
|
||||
// mark upgrade pods list
|
||||
|
@ -181,13 +181,13 @@ func (p *Processor) updatePods(control sidecarcontrol.SidecarControl, pods []*co
|
|||
for _, pod := range upgradePods {
|
||||
podNames = append(podNames, pod.Name)
|
||||
if err := p.updatePodSidecarAndHash(control, pod); err != nil {
|
||||
klog.Errorf("updatePodSidecarAndHash error, s:%s, pod:%s, err:%v", sidecarset.Name, pod.Name, err)
|
||||
klog.ErrorS(err, "UpdatePodSidecarAndHash error", "sidecarSet", klog.KObj(sidecarset), "pod", klog.KObj(pod))
|
||||
return err
|
||||
}
|
||||
sidecarcontrol.UpdateExpectations.ExpectUpdated(sidecarset.Name, sidecarcontrol.GetSidecarSetRevision(sidecarset), pod)
|
||||
}
|
||||
|
||||
klog.V(3).Infof("sidecarSet(%s) updated pods(%s)", sidecarset.Name, strings.Join(podNames, ","))
|
||||
klog.V(3).InfoS("SidecarSet updated pods", "sidecarSet", klog.KObj(sidecarset), "podNames", strings.Join(podNames, ","))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ func (p *Processor) updatePodSidecarAndHash(control sidecarcontrol.SidecarContro
|
|||
sidecarSet := control.GetSidecarset()
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
if err := p.Client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, podClone); err != nil {
|
||||
klog.Errorf("sidecarset(%s) error getting updated pod %s/%s from client", control.GetSidecarset().Name, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "SidecarSet got updated pod from client failed", "sidecarSet", klog.KObj(sidecarSet), "pod", klog.KObj(pod))
|
||||
}
|
||||
// update pod sidecar container
|
||||
updatePodSidecarContainer(control, podClone)
|
||||
|
@ -209,7 +209,7 @@ func (p *Processor) updatePodSidecarAndHash(control sidecarcontrol.SidecarContro
|
|||
// patch pod metadata
|
||||
_, err := sidecarcontrol.PatchPodMetadata(&podClone.ObjectMeta, sidecarSet.Spec.PatchPodMetadata)
|
||||
if err != nil {
|
||||
klog.Errorf("sidecarSet(%s) patch pod(%s/%s) metadata failed: %s", sidecarSet.Name, podClone.Namespace, podClone.Name, err.Error())
|
||||
klog.ErrorS(err, "SidecarSet patched pod metadata failed", "sidecarSet", klog.KObj(sidecarSet), "pod", klog.KObj(podClone))
|
||||
return err
|
||||
}
|
||||
// update pod in store
|
||||
|
@ -232,11 +232,11 @@ func (p *Processor) listMatchedSidecarSets(pod *corev1.Pod) string {
|
|||
podNamespace = "default"
|
||||
}
|
||||
if err := p.Client.List(context.TODO(), sidecarSetList, client.MatchingFields{fieldindex.IndexNameForSidecarSetNamespace: podNamespace}, utilclient.DisableDeepCopy); err != nil {
|
||||
klog.Errorf("List SidecarSets failed: %s", err.Error())
|
||||
klog.ErrorS(err, "Listed SidecarSets failed")
|
||||
return ""
|
||||
}
|
||||
if err := p.Client.List(context.TODO(), sidecarSetList2, client.MatchingFields{fieldindex.IndexNameForSidecarSetNamespace: fieldindex.IndexValueSidecarSetClusterScope}, utilclient.DisableDeepCopy); err != nil {
|
||||
klog.Errorf("List SidecarSets failed: %s", err.Error())
|
||||
klog.ErrorS(err, "Listed SidecarSets failed")
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -270,15 +270,15 @@ func (p *Processor) updateSidecarSetStatus(sidecarSet *appsv1alpha1.SidecarSet,
|
|||
Name: sidecarSetClone.Name,
|
||||
}
|
||||
if err := p.Client.Get(context.TODO(), key, sidecarSetClone); err != nil {
|
||||
klog.Errorf("error getting updated sidecarset %s from client", sidecarSetClone.Name)
|
||||
klog.ErrorS(err, "Failed to get updated SidecarSet from client", "sidecarSet", klog.KObj(sidecarSetClone))
|
||||
}
|
||||
return updateErr
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("sidecarSet(%s) update status(MatchedPods:%d, UpdatedPods:%d, ReadyPods:%d, UpdatedReadyPods:%d) success",
|
||||
sidecarSet.Name, status.MatchedPods, status.UpdatedPods, status.ReadyPods, status.UpdatedReadyPods)
|
||||
klog.V(3).InfoS("SidecarSet updated status success", "sidecarSet", klog.KObj(sidecarSet), "matchedPods", status.MatchedPods,
|
||||
"updatedPods", status.UpdatedPods, "readyPods", status.ReadyPods, "updateReadyPods", status.UpdatedReadyPods)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -344,7 +344,7 @@ func (p *Processor) registerLatestRevision(set *appsv1alpha1.SidecarSet, pods []
|
|||
// list all revisions
|
||||
revisions, err := p.historyController.ListControllerRevisions(sidecarcontrol.MockSidecarSetForRevision(set), hc.GetRevisionSelector(sidecarSet))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list history controllerRevisions, err %v, name %v", err, sidecarSet.Name)
|
||||
klog.ErrorS(err, "Failed to list history controllerRevisions", "sidecarSet", klog.KObj(sidecarSet))
|
||||
return nil, collisionCount, err
|
||||
}
|
||||
|
||||
|
@ -399,7 +399,7 @@ func (p *Processor) registerLatestRevision(set *appsv1alpha1.SidecarSet, pods []
|
|||
|
||||
// only store limited history revisions
|
||||
if err = p.truncateHistory(revisions, sidecarSet, pods); err != nil {
|
||||
klog.Errorf("Failed to truncate history for %s: err: %v", sidecarSet.Name, err)
|
||||
klog.ErrorS(err, "Failed to truncate history for SidecarSet", "sidecarSet", klog.KObj(sidecarSet))
|
||||
}
|
||||
|
||||
return latestRevision, collisionCount, nil
|
||||
|
@ -416,7 +416,7 @@ func (p *Processor) updateCustomVersionLabel(revision *apps.ControllerRevision,
|
|||
patchBody := fmt.Sprintf(`{"metadata":{"labels":{"%v":"%v"}}}`, appsv1alpha1.SidecarSetCustomVersionLabel, customVersion)
|
||||
err := p.Client.Patch(context.TODO(), newRevision, client.RawPatch(types.StrategicMergePatchType, []byte(patchBody)))
|
||||
if err != nil {
|
||||
klog.Errorf(`Failed to patch custom revision label "%v" to latest revision %v, err: %v`, revision.Name, customVersion, err)
|
||||
klog.ErrorS(err, `Failed to patch custom revision label to latest revision`, "revision", klog.KObj(revision), "customVersion", customVersion)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -436,7 +436,7 @@ func (p *Processor) truncateHistory(revisions []*apps.ControllerRevision, s *app
|
|||
return nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Find %v revisions more than limitation %v, name: %v", revisionCount, limitation, s.Name)
|
||||
klog.V(3).InfoS("Found revisions more than limitation", "revisionCount", revisionCount, "limitation", limitation, "sidecarSet", klog.KObj(s))
|
||||
|
||||
// the number of revisions need to delete
|
||||
deletionCount := revisionCount - limitation
|
||||
|
@ -545,7 +545,7 @@ func calculateStatus(control sidecarcontrol.SidecarControl, pods []*corev1.Pod,
|
|||
|
||||
func isSidecarSetNotUpdate(s *appsv1alpha1.SidecarSet) bool {
|
||||
if s.Spec.UpdateStrategy.Type == appsv1alpha1.NotUpdateSidecarSetStrategyType {
|
||||
klog.V(3).Infof("sidecarSet spreading RollingUpdate config type, name: %s, type: %s", s.Name, s.Spec.UpdateStrategy.Type)
|
||||
klog.V(3).InfoS("SidecarSet spreading RollingUpdate config type", "sidecarSet", klog.KObj(s), "type", s.Spec.UpdateStrategy.Type)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -683,6 +683,6 @@ func (p *Processor) updatePodSidecarSetUpgradableCondition(sidecarset *appsv1alp
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("sidecarSet(%s) update pod %s/%s condition(%s=%s) success", sidecarset.Name, pod.Namespace, pod.Name, sidecarcontrol.SidecarSetUpgradable, condition.Status)
|
||||
klog.V(3).InfoS("SidecarSet updated pod condition success", "sidecarSet", klog.KObj(sidecarset), "pod", klog.KObj(pod), "conditionType", sidecarcontrol.SidecarSetUpgradable, "conditionStatus", condition.Status)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func (p *spreadingStrategy) GetNextUpgradePods(control sidecarcontrol.SidecarCon
|
|||
// if selector failed, always return false
|
||||
selector, err := util.ValidatedLabelSelectorAsSelector(strategy.Selector)
|
||||
if err != nil {
|
||||
klog.Errorf("sidecarSet(%s) rolling selector error, err: %v", sidecarset.Name, err)
|
||||
klog.ErrorS(err, "SidecarSet rolling selector error", "sidecarSet", klog.KObj(sidecarset))
|
||||
return false
|
||||
}
|
||||
//matched
|
||||
|
@ -82,7 +82,8 @@ func (p *spreadingStrategy) GetNextUpgradePods(control sidecarcontrol.SidecarCon
|
|||
}
|
||||
}
|
||||
|
||||
klog.V(3).Infof("sidecarSet(%s) matchedPods(%d) waitUpdated(%d) notUpgradable(%d)", sidecarset.Name, len(pods), len(waitUpgradedIndexes), len(notUpgradableIndexes))
|
||||
klog.V(3).InfoS("SidecarSet's pods status", "sidecarSet", klog.KObj(sidecarset), "matchedPods", len(pods),
|
||||
"waitUpdated", len(waitUpgradedIndexes), "notUpgradable", len(notUpgradableIndexes))
|
||||
//2. sort Pods with default sequence and scatter
|
||||
waitUpgradedIndexes = SortUpdateIndexes(strategy, pods, waitUpgradedIndexes)
|
||||
|
||||
|
|
|
@ -55,11 +55,9 @@ func (r *ReconcileSidecarTerminator) executeInPlaceUpdateAction(originalPod *cor
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("SidecarTerminator -- Error occurred when inPlace update pod(%v/%v), error %v",
|
||||
originalPod.Namespace, originalPod.Name, err)
|
||||
klog.ErrorS(err, "SidecarTerminator -- Error occurred when inPlace update pod", "pod", klog.KObj(originalPod))
|
||||
} else {
|
||||
klog.V(3).Infof("SidecarTerminator -- InPlace update pod(%v/%v) successfully",
|
||||
originalPod.Namespace, originalPod.Name)
|
||||
klog.V(3).InfoS("SidecarTerminator -- InPlace update pod successfully", "pod", klog.KObj(originalPod))
|
||||
|
||||
r.recorder.Eventf(originalPod, corev1.EventTypeNormal, "SidecarTerminator",
|
||||
"Kruise SidecarTerminator is trying to terminate sidecar %v using in-place update", uncompletedSidecars.List())
|
||||
|
|
|
@ -39,10 +39,10 @@ func (r *ReconcileSidecarTerminator) executeKillContainerAction(pod *corev1.Pod,
|
|||
existingCRR := &appsv1alpha1.ContainerRecreateRequest{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: getCRRName(pod)}, existingCRR)
|
||||
if err == nil {
|
||||
klog.V(3).Infof("SidecarTerminator -- CRR(%s/%s) exists, waiting for this CRR to complete", existingCRR.Namespace, existingCRR.Name)
|
||||
klog.V(3).InfoS("SidecarTerminator -- CRR exists, waiting for this CRR to complete", "containerRecreateRequest", klog.KObj(existingCRR))
|
||||
return nil
|
||||
} else if client.IgnoreNotFound(err) != nil {
|
||||
klog.Errorf("SidecarTerminator -- Error occurred when try to get CRR(%s/%s), error: %v", existingCRR.Namespace, existingCRR.Name, err)
|
||||
klog.ErrorS(err, "SidecarTerminator -- Error occurred when try to get CRR", "containerRecreateRequest", klog.KObj(existingCRR))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -73,9 +73,9 @@ func (r *ReconcileSidecarTerminator) executeKillContainerAction(pod *corev1.Pod,
|
|||
|
||||
err = r.Create(context.TODO(), crr)
|
||||
if err != nil {
|
||||
klog.Errorf("SidecarTerminator -- Error occurred when creating CRR(%v/%v), error %v", crr.Namespace, crr.Name, err)
|
||||
klog.ErrorS(err, "SidecarTerminator -- Error occurred when creating", "containerRecreateRequest", klog.KObj(crr))
|
||||
} else {
|
||||
klog.V(3).Infof("SidecarTerminator -- Creating CRR(%v/%v) successfully", crr.Namespace, crr.Name)
|
||||
klog.V(3).InfoS("SidecarTerminator -- Creating CRR successfully", "containerRecreateRequest", klog.KObj(crr))
|
||||
|
||||
r.recorder.Eventf(pod, corev1.EventTypeNormal, "SidecarTerminator",
|
||||
"Kruise SidecarTerminator is trying to terminate sidecar %v using crr", uncompletedSidecars.List())
|
||||
|
|
|
@ -124,9 +124,9 @@ func (r *ReconcileSidecarTerminator) Reconcile(_ context.Context, request reconc
|
|||
}
|
||||
|
||||
start := time.Now()
|
||||
klog.V(3).Infof("SidecarTerminator -- begin to process pod %v for reconcile", request.NamespacedName)
|
||||
klog.V(3).InfoS("SidecarTerminator -- begin to process pod for reconcile", "pod", request)
|
||||
defer func() {
|
||||
klog.V(3).Infof("SidecarTerminator -- process pod %v done, cost: %v", request.NamespacedName, time.Since(start))
|
||||
klog.V(3).InfoS("SidecarTerminator -- process pod done", "pod", request, "cost", time.Since(start))
|
||||
}()
|
||||
|
||||
return r.doReconcile(pod)
|
||||
|
@ -168,7 +168,7 @@ func (r *ReconcileSidecarTerminator) markJobPodTerminated(pod *corev1.Pod) error
|
|||
// after the pod is terminated by the sidecar terminator, kubelet will kill the containers that are not in the terminal phase
|
||||
// 1. sidecar container terminate with non-zero exit code
|
||||
// 2. sidecar container is not in a terminal phase (still running or waiting)
|
||||
klog.V(3).Infof("all of the main containers are completed, will terminate the job pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(3).InfoS("All of the main containers are completed, will terminate the job pod", "pod", klog.KObj(pod))
|
||||
// terminate the pod, ignore the status of the sidecar containers.
|
||||
// in kubelet,pods are not allowed to transition out of terminal phases.
|
||||
|
||||
|
@ -190,7 +190,7 @@ func (r *ReconcileSidecarTerminator) markJobPodTerminated(pod *corev1.Pod) error
|
|||
} else {
|
||||
status.Phase = corev1.PodFailed
|
||||
}
|
||||
klog.V(3).Infof("terminate the job pod %s/%s phase=%s", pod.Namespace, pod.Name, status.Phase)
|
||||
klog.V(3).InfoS("Terminated the job pod", "pod", klog.KObj(pod), "phase", status.Phase)
|
||||
|
||||
by, _ := json.Marshal(status)
|
||||
patchCondition := fmt.Sprintf(`{"status":%s}`, string(by))
|
||||
|
|
|
@ -215,7 +215,7 @@ func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(set *appsv1beta1.State
|
|||
claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName)
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration", claimName)
|
||||
klog.V(4).InfoS("Expected claim missing, continuing to pick up in next iteration", "claimName", claimName)
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
|
||||
default:
|
||||
|
@ -236,7 +236,7 @@ func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(set *appsv1beta1
|
|||
claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName)
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
klog.V(4).Infof("Expected claim %s missing, continuing to pick up in next iteration.", claimName)
|
||||
klog.V(4).InfoS("Expected claim missing, continuing to pick up in next iteration", "claimName", claimName)
|
||||
case err != nil:
|
||||
return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
|
||||
default:
|
||||
|
|
|
@ -532,7 +532,7 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
|
||||
// If update expectations have not satisfied yet, skip updating pods
|
||||
if updateSatisfied, _, updateDirtyPods := updateExpectations.SatisfiedExpectations(getStatefulSetKey(set), updateRevision.Name); !updateSatisfied {
|
||||
klog.V(4).Infof("Not satisfied update for %v, updateDirtyPods=%v", getStatefulSetKey(set), updateDirtyPods)
|
||||
klog.V(4).InfoS("Not satisfied update for statefulSet", "statefulSet", klog.KObj(set), "updateDirtyPods", updateDirtyPods)
|
||||
return status, nil
|
||||
}
|
||||
|
||||
|
@ -589,11 +589,8 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
unavailablePods.Insert(replicas[target].Name)
|
||||
} else if completedErr := opts.CheckPodUpdateCompleted(replicas[target]); completedErr != nil {
|
||||
// 2. count pod as unavailable if it's in-place updating and not ready
|
||||
klog.V(4).Infof("StatefulSet %s/%s check Pod %s in-place update not-ready: %v",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[target].Name,
|
||||
completedErr)
|
||||
klog.V(4).ErrorS(completedErr, "StatefulSet found Pod in-place update not-ready",
|
||||
"statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[target]))
|
||||
unavailablePods.Insert(replicas[target].Name)
|
||||
} else if isAvailable, waitTime := isRunningAndAvailable(replicas[target], minReadySeconds); !isAvailable {
|
||||
// 3. count pod as unavailable if it's not available yet given the minReadySeconds requirement
|
||||
|
@ -607,7 +604,7 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
}
|
||||
|
||||
updateIndexes := sortPodsToUpdate(set.Spec.UpdateStrategy.RollingUpdate, updateRevision.Name, *set.Spec.Replicas, replicas)
|
||||
klog.V(3).Infof("Prepare to update pods indexes %v for StatefulSet %s", updateIndexes, getStatefulSetKey(set))
|
||||
klog.V(3).InfoS("Prepare to update pods indexes for StatefulSet", "statefulSet", klog.KObj(set), "podIndexes", updateIndexes)
|
||||
// update pods in sequence
|
||||
for _, target := range updateIndexes {
|
||||
|
||||
|
@ -619,12 +616,8 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
// the unavailable pods count exceed the maxUnavailable and the target is available, so we can't process it,
|
||||
// wait for unhealthy Pods on update
|
||||
if len(unavailablePods) >= maxUnavailable && !unavailablePods.Has(replicas[target].Name) {
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for unavailable Pods %v to update, blocked pod %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
unavailablePods.List(),
|
||||
replicas[target].Name)
|
||||
klog.V(4).InfoS("StatefulSet was waiting for unavailable Pods to update, blocked pod",
|
||||
"statefulSet", klog.KObj(set), "unavailablePods", unavailablePods.List(), "blockedPod", klog.KObj(replicas[target]))
|
||||
return status, nil
|
||||
}
|
||||
|
||||
|
@ -636,10 +629,7 @@ func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods(
|
|||
return status, inplaceUpdateErr
|
||||
}
|
||||
if !inplacing {
|
||||
klog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[target].Name)
|
||||
klog.V(2).InfoS("StatefulSet terminating Pod for update", "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[target]))
|
||||
if _, err := ssc.deletePod(set, replicas[target]); err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
@ -662,8 +652,7 @@ func (ssc *defaultStatefulSetControl) deletePod(set *appsv1beta1.StatefulSet, po
|
|||
if updated, _, err := ssc.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingDelete, markPodNotReady); err != nil {
|
||||
return false, err
|
||||
} else if updated {
|
||||
klog.V(3).Infof("StatefulSet %s scaling update pod %s lifecycle to PreparingDelete",
|
||||
getStatefulSetKey(set), pod.Name)
|
||||
klog.V(3).InfoS("StatefulSet scaling update pod lifecycle to PreparingDelete", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
|
@ -687,8 +676,8 @@ func (ssc *defaultStatefulSetControl) refreshPodState(set *appsv1beta1.StatefulS
|
|||
|
||||
res := ssc.inplaceControl.Refresh(pod, opts)
|
||||
if res.RefreshErr != nil {
|
||||
klog.Errorf("AdvancedStatefulSet %s failed to update pod %s condition for inplace: %v",
|
||||
getStatefulSetKey(set), pod.Name, res.RefreshErr)
|
||||
klog.ErrorS(res.RefreshErr, "AdvancedStatefulSet failed to update pod condition for inplace",
|
||||
"statefulSet", klog.KObj(set), "pod", klog.KObj(pod))
|
||||
return false, 0, res.RefreshErr
|
||||
}
|
||||
|
||||
|
@ -729,8 +718,7 @@ func (ssc *defaultStatefulSetControl) refreshPodState(set *appsv1beta1.StatefulS
|
|||
if updated, _, err := ssc.lifecycleControl.UpdatePodLifecycle(pod, state, markPodNotReady); err != nil {
|
||||
return false, 0, err
|
||||
} else if updated {
|
||||
klog.V(3).Infof("AdvancedStatefulSet %s update pod %s lifecycle to %s",
|
||||
getStatefulSetKey(set), pod.Name, state)
|
||||
klog.V(3).InfoS("AdvancedStatefulSet updated pod lifecycle", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod), "lifecycleState", state)
|
||||
return true, res.DelayDuration, nil
|
||||
}
|
||||
}
|
||||
|
@ -772,8 +760,7 @@ func (ssc *defaultStatefulSetControl) inPlaceUpdatePod(
|
|||
if set.Spec.Lifecycle != nil && lifecycle.IsPodHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) {
|
||||
markPodNotReady := set.Spec.Lifecycle.InPlaceUpdate.MarkPodNotReady
|
||||
if updated, _, err = ssc.lifecycleControl.UpdatePodLifecycle(pod, appspub.LifecycleStatePreparingUpdate, markPodNotReady); err == nil && updated {
|
||||
klog.V(3).Infof("StatefulSet %s updated pod %s lifecycle to PreparingUpdate",
|
||||
getStatefulSetKey(set), pod.Name)
|
||||
klog.V(3).InfoS("StatefulSet updated pod lifecycle to PreparingUpdate", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod))
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
@ -785,8 +772,7 @@ func (ssc *defaultStatefulSetControl) inPlaceUpdatePod(
|
|||
inPlaceUpdateHandler = set.Spec.Lifecycle.InPlaceUpdate
|
||||
}
|
||||
if updated, _, err = ssc.lifecycleControl.UpdatePodLifecycleWithHandler(pod, appspub.LifecycleStatePreparingUpdate, inPlaceUpdateHandler); err == nil && updated {
|
||||
klog.V(3).Infof("StatefulSet %s updated pod %s lifecycle to PreparingUpdate",
|
||||
getStatefulSetKey(set), pod.Name)
|
||||
klog.V(3).InfoS("StatefulSet updated pod lifecycle to PreparingUpdate", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod))
|
||||
}
|
||||
return true, err
|
||||
case appspub.LifecycleStatePreparingUpdate:
|
||||
|
|
|
@ -165,7 +165,7 @@ func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv1beta
|
|||
const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
switch {
|
||||
default:
|
||||
klog.Errorf("Unknown policy %v; treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||
klog.InfoS("Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||
fallthrough
|
||||
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
||||
if hasOwnerRef(claim, set) ||
|
||||
|
@ -227,7 +227,7 @@ func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *appsv
|
|||
const delete = appsv1beta1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
switch {
|
||||
default:
|
||||
klog.Errorf("Unknown policy %v, treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||
klog.InfoS("Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||
fallthrough
|
||||
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
||||
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
||||
|
|
|
@ -193,7 +193,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
oldSS := e.ObjectOld.(*appsv1beta1.StatefulSet)
|
||||
newSS := e.ObjectNew.(*appsv1beta1.StatefulSet)
|
||||
if oldSS.Status.Replicas != newSS.Status.Replicas {
|
||||
klog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", newSS.Name, oldSS.Status.Replicas, newSS.Status.Replicas)
|
||||
klog.V(4).InfoS("Observed updated replica count for StatefulSet",
|
||||
"statefulSet", klog.KObj(newSS), "oldReplicas", oldSS.Status.Replicas, "newReplicas", newSS.Status.Replicas)
|
||||
}
|
||||
return true
|
||||
},
|
||||
|
@ -209,7 +210,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("finished to add statefulset-controller")
|
||||
klog.V(4).InfoS("Finished to add statefulset-controller")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -235,18 +236,18 @@ func (ssc *ReconcileStatefulSet) Reconcile(ctx context.Context, request reconcil
|
|||
defer func() {
|
||||
if retErr == nil {
|
||||
if res.Requeue || res.RequeueAfter > 0 {
|
||||
klog.Infof("Finished syncing statefulset %q (%v), result: %v", key, time.Since(startTime), res)
|
||||
klog.InfoS("Finished syncing StatefulSet", "statefulSet", request, "elapsedTime", time.Since(startTime), "result", res)
|
||||
} else {
|
||||
klog.Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
|
||||
klog.InfoS("Finished syncing StatefulSet", "statefulSet", request, "elapsedTime", time.Since(startTime))
|
||||
}
|
||||
} else {
|
||||
klog.Infof("Finished syncing statefulset %q (%v), error: %v", key, time.Since(startTime), retErr)
|
||||
klog.ErrorS(retErr, "Finished syncing StatefulSet error", "statefulSet", request, "elapsedTime", time.Since(startTime))
|
||||
}
|
||||
}()
|
||||
|
||||
set, err := ssc.setLister.StatefulSets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("StatefulSet has been deleted %v", key)
|
||||
klog.InfoS("StatefulSet deleted", "statefulSet", key)
|
||||
updateExpectations.DeleteExpectations(key)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
@ -338,11 +339,11 @@ func (ssc *ReconcileStatefulSet) getPodsForStatefulSet(ctx context.Context, set
|
|||
|
||||
// syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod).
|
||||
func (ssc *ReconcileStatefulSet) syncStatefulSet(ctx context.Context, set *appsv1beta1.StatefulSet, pods []*v1.Pod) error {
|
||||
klog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
|
||||
klog.V(4).InfoS("Syncing StatefulSet with pods", "statefulSet", klog.KObj(set), "podCount", len(pods))
|
||||
// TODO: investigate where we mutate the set during the update as it is not obvious.
|
||||
if err := ssc.control.UpdateStatefulSet(ctx, set.DeepCopy(), pods); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("Successfully synced StatefulSet %s/%s", set.Namespace, set.Name)
|
||||
klog.V(4).InfoS("Successfully synced StatefulSet", "statefulSet", klog.KObj(set))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -827,7 +827,8 @@ func NewStatefulSetController(
|
|||
oldPS := old.(*appsv1beta1.StatefulSet)
|
||||
curPS := cur.(*appsv1beta1.StatefulSet)
|
||||
if oldPS.Status.Replicas != curPS.Status.Replicas {
|
||||
klog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
|
||||
klog.V(4).InfoS("Observed updated replica count for StatefulSet",
|
||||
"statefulSetName", curPS.Name, "oldReplicas", oldPS.Status.Replicas, "newReplicas", curPS.Status.Replicas)
|
||||
}
|
||||
ssc.enqueueStatefulSet(cur)
|
||||
},
|
||||
|
@ -844,8 +845,8 @@ func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
|
|||
defer utilruntime.HandleCrash()
|
||||
defer ssc.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting stateful set controller")
|
||||
defer klog.Infof("Shutting down statefulset controller")
|
||||
klog.InfoS("Starting stateful set controller")
|
||||
defer klog.InfoS("Shutting down statefulset controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) {
|
||||
return
|
||||
|
@ -920,7 +921,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) {
|
|||
if set == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
klog.V(4).InfoS("Pod created", "pod", klog.KObj(pod), "labels", pod.Labels)
|
||||
ssc.enqueueStatefulSet(set)
|
||||
return
|
||||
}
|
||||
|
@ -931,7 +932,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) {
|
|||
if len(sets) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
klog.V(4).InfoS("Orphan Pod created", "pod", klog.KObj(pod), "labels", pod.Labels)
|
||||
for _, set := range sets {
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
|
@ -965,7 +966,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) {
|
|||
if set == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).InfoS("Pod updated", "pod", klog.KObj(curPod), "oldObjectMeta", oldPod.ObjectMeta, "newObjectMeta", curPod.ObjectMeta)
|
||||
ssc.enqueueStatefulSet(set)
|
||||
return
|
||||
}
|
||||
|
@ -977,7 +978,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) {
|
|||
if len(sets) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).InfoS("Orphan Pod updated", "pod", klog.KObj(curPod), "oldObjectMeta", oldPod.ObjectMeta, "newObjectMeta", curPod.ObjectMeta)
|
||||
for _, set := range sets {
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
|
@ -1014,7 +1015,7 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) {
|
|||
if set == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller())
|
||||
klog.V(4).InfoS("Pod deleted", "pod", klog.KObj(pod), "caller", utilruntime.GetCaller())
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
|
||||
|
|
|
@ -48,15 +48,15 @@ func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *a
|
|||
// ignore if update type is OnDelete or pod update policy is ReCreate
|
||||
if string(sts.Spec.UpdateStrategy.Type) == string(apps.OnDeleteStatefulSetStrategyType) || sts.Spec.UpdateStrategy.RollingUpdate == nil ||
|
||||
string(sts.Spec.UpdateStrategy.RollingUpdate.PodUpdatePolicy) == string(appsv1beta1.RecreatePodUpdateStrategyType) {
|
||||
klog.V(4).Infof("Statefulset %s/%s skipped to create ImagePullJob for update type is %s",
|
||||
sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
|
||||
klog.V(4).InfoS("Statefulset skipped to create ImagePullJob for certain update type",
|
||||
"statefulSet", klog.KObj(sts), "updateType", sts.Spec.UpdateStrategy.Type)
|
||||
return dss.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
// ignore if replicas <= minimumReplicasToPreDownloadImage
|
||||
if *sts.Spec.Replicas <= minimumReplicasToPreDownloadImage {
|
||||
klog.V(4).Infof("Statefulset %s/%s skipped to create ImagePullJob for replicas %d <= %d",
|
||||
sts.Namespace, sts.Name, *sts.Spec.Replicas, minimumReplicasToPreDownloadImage)
|
||||
klog.V(4).InfoS("Statefulset skipped to create ImagePullJob for replicas less than or equal to minimumReplicasToPreDownloadImage",
|
||||
"statefulSet", klog.KObj(sts), "replicas", *sts.Spec.Replicas, "minimumReplicasToPreDownloadImage", minimumReplicasToPreDownloadImage)
|
||||
return dss.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -69,8 +69,8 @@ func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *a
|
|||
maxUnavailable, _ = intstrutil.GetValueFromIntOrPercent(
|
||||
intstrutil.ValueOrDefault(sts.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, intstrutil.FromString("1")), int(*sts.Spec.Replicas), false)
|
||||
if partition == 0 && maxUnavailable >= int(*sts.Spec.Replicas) {
|
||||
klog.V(4).Infof("Statefulset %s/%s skipped to create ImagePullJob for all Pods update in one batch, replicas=%d, partition=%d, maxUnavailable=%d",
|
||||
sts.Namespace, sts.Name, *sts.Spec.Replicas, partition, maxUnavailable)
|
||||
klog.V(4).InfoS("Statefulset skipped to create ImagePullJob for all Pods update in one batch",
|
||||
"statefulSet", klog.KObj(sts), "replicas", *sts.Spec.Replicas, "partition", partition, "maxUnavailable", maxUnavailable)
|
||||
return dss.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -83,8 +83,8 @@ func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *a
|
|||
// ignore if this revision can not update in-place
|
||||
inplaceControl := inplaceupdate.New(sigsruntimeClient, revisionadapter.NewDefaultImpl())
|
||||
if !inplaceControl.CanUpdateInPlace(currentRevision, updateRevision, opts) {
|
||||
klog.V(4).Infof("Statefulset %s/%s skipped to create ImagePullJob for %s -> %s can not update in-place",
|
||||
sts.Namespace, sts.Name, currentRevision.Name, updateRevision.Name)
|
||||
klog.V(4).InfoS("Statefulset skipped to create ImagePullJob because it could not update in-place",
|
||||
"statefulSet", klog.KObj(sts), "currentRevisionName", currentRevision.Name, "updateRevisionName", updateRevision.Name)
|
||||
return dss.patchControllerRevisionLabels(updateRevision, appsv1alpha1.ImagePreDownloadIgnoredKey, "true")
|
||||
}
|
||||
|
||||
|
@ -116,20 +116,20 @@ func (dss *defaultStatefulSetControl) createImagePullJobsForInPlaceUpdate(sts *a
|
|||
}
|
||||
|
||||
containerImages := diffImagesBetweenRevisions(currentRevision, updateRevision)
|
||||
klog.V(3).Infof("Statefulset %s/%s begin to create ImagePullJobs for revision %s -> %s: %v",
|
||||
sts.Namespace, sts.Name, currentRevision.Name, updateRevision.Name, containerImages)
|
||||
klog.V(3).InfoS("Statefulset began to create ImagePullJobs", "statefulSet", klog.KObj(sts), "currentRevisionName", currentRevision.Name,
|
||||
"updateRevisionName", updateRevision.Name, "containerImages", containerImages)
|
||||
for name, image := range containerImages {
|
||||
// job name is revision name + container name, it can not be more than 255 characters
|
||||
jobName := fmt.Sprintf("%s-%s", updateRevision.Name, name)
|
||||
err := imagejobutilfunc.CreateJobForWorkload(sigsruntimeClient, sts, controllerKind, jobName, image, labelMap, annotationMap, *selector, pullSecrets)
|
||||
if err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("Statefulset %s/%s failed to create ImagePullJob %s: %v", sts.Namespace, sts.Name, jobName, err)
|
||||
klog.ErrorS(err, "Statefulset failed to create ImagePullJob", "statefulSet", klog.KObj(sts), "jobName", jobName)
|
||||
dss.recorder.Eventf(sts, v1.EventTypeNormal, "FailedCreateImagePullJob", "failed to create ImagePullJob %s: %v", jobName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("Statefulset %s/%s created ImagePullJob %s for image: %s", sts.Namespace, sts.Name, jobName, image)
|
||||
klog.V(3).InfoS("Statefulset created ImagePullJob for image", "statefulSet", klog.KObj(sts), "jobName", jobName, "image", image)
|
||||
dss.recorder.Eventf(sts, v1.EventTypeNormal, "CreatedImagePullJob", "created ImagePullJob %s for image: %s", jobName, image)
|
||||
}
|
||||
|
||||
|
|
|
@ -181,17 +181,17 @@ func (a *AdvancedStatefulSetAdapter) ApplySubsetTemplate(ud *alpha1.UnitedDeploy
|
|||
TemplateSpecBytes, _ := json.Marshal(set.Spec.Template)
|
||||
modified, err := strategicpatch.StrategicMergePatch(TemplateSpecBytes, subSetConfig.Patch.Raw, &corev1.PodTemplateSpec{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to merge patch raw %s", subSetConfig.Patch.Raw)
|
||||
klog.ErrorS(err, "Failed to merge patch raw", "patch", subSetConfig.Patch.Raw)
|
||||
return err
|
||||
}
|
||||
patchedTemplateSpec := corev1.PodTemplateSpec{}
|
||||
if err = json.Unmarshal(modified, &patchedTemplateSpec); err != nil {
|
||||
klog.Errorf("failed to unmarshal %s to podTemplateSpec", modified)
|
||||
klog.ErrorS(err, "Failed to unmarshal modified JSON to podTemplateSpec", "JSON", modified)
|
||||
return err
|
||||
}
|
||||
|
||||
set.Spec.Template = patchedTemplateSpec
|
||||
klog.V(2).Infof("AdvancedStatefulSet [%s/%s] was patched successfully: %s", set.Namespace, set.GenerateName, subSetConfig.Patch.Raw)
|
||||
klog.V(2).InfoS("AdvancedStatefulSet was patched successfully", "advancedStatefulSet", klog.KRef(set.Namespace, set.GenerateName), "patch", subSetConfig.Patch.Raw)
|
||||
}
|
||||
if set.Annotations == nil {
|
||||
set.Annotations = make(map[string]string)
|
||||
|
|
|
@ -142,17 +142,17 @@ func (a *CloneSetAdapter) ApplySubsetTemplate(ud *alpha1.UnitedDeployment, subse
|
|||
TemplateSpecBytes, _ := json.Marshal(set.Spec.Template)
|
||||
modified, err := strategicpatch.StrategicMergePatch(TemplateSpecBytes, subSetConfig.Patch.Raw, &corev1.PodTemplateSpec{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to merge patch raw %s", subSetConfig.Patch.Raw)
|
||||
klog.ErrorS(err, "Failed to merge patch raw", "patch", subSetConfig.Patch.Raw)
|
||||
return err
|
||||
}
|
||||
patchedTemplateSpec := corev1.PodTemplateSpec{}
|
||||
if err = json.Unmarshal(modified, &patchedTemplateSpec); err != nil {
|
||||
klog.Errorf("failed to unmarshal %s to podTemplateSpec", modified)
|
||||
klog.ErrorS(err, "Failed to unmarshal modified JSON to podTemplateSpec", "JSON", modified)
|
||||
return err
|
||||
}
|
||||
|
||||
set.Spec.Template = patchedTemplateSpec
|
||||
klog.V(2).Infof("CloneSet [%s/%s] was patched successfully: %s", set.Namespace, set.GenerateName, subSetConfig.Patch.Raw)
|
||||
klog.V(2).InfoS("CloneSet was patched successfully", "cloneSet", klog.KRef(set.Namespace, set.GenerateName), "patch", subSetConfig.Patch.Raw)
|
||||
}
|
||||
if set.Annotations == nil {
|
||||
set.Annotations = make(map[string]string)
|
||||
|
|
|
@ -154,17 +154,17 @@ func (a *DeploymentAdapter) ApplySubsetTemplate(ud *alpha1.UnitedDeployment, sub
|
|||
TemplateSpecBytes, _ := json.Marshal(set.Spec.Template)
|
||||
modified, err := strategicpatch.StrategicMergePatch(TemplateSpecBytes, subSetConfig.Patch.Raw, &corev1.PodTemplateSpec{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to merge patch raw %s", subSetConfig.Patch.Raw)
|
||||
klog.ErrorS(err, "Failed to merge patch raw", "patch", subSetConfig.Patch.Raw)
|
||||
return err
|
||||
}
|
||||
patchedTemplateSpec := corev1.PodTemplateSpec{}
|
||||
if err = json.Unmarshal(modified, &patchedTemplateSpec); err != nil {
|
||||
klog.Errorf("failed to unmarshal %s to podTemplateSpec", modified)
|
||||
klog.ErrorS(err, "Failed to unmarshal modified JSON to podTemplateSpec", "JSON", modified)
|
||||
return err
|
||||
}
|
||||
|
||||
set.Spec.Template = patchedTemplateSpec
|
||||
klog.V(2).Infof("Deployment [%s/%s] was patched successfully: %s", set.Namespace, set.GenerateName, subSetConfig.Patch.Raw)
|
||||
klog.V(2).InfoS("Deployment was patched successfully", "deployment", klog.KRef(set.Namespace, set.GenerateName), "patch", subSetConfig.Patch.Raw)
|
||||
}
|
||||
if set.Annotations == nil {
|
||||
set.Annotations = make(map[string]string)
|
||||
|
|
|
@ -164,17 +164,17 @@ func (a *StatefulSetAdapter) ApplySubsetTemplate(ud *alpha1.UnitedDeployment, su
|
|||
TemplateSpecBytes, _ := json.Marshal(set.Spec.Template)
|
||||
modified, err := strategicpatch.StrategicMergePatch(TemplateSpecBytes, subSetConfig.Patch.Raw, &corev1.PodTemplateSpec{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to merge patch raw %s", subSetConfig.Patch.Raw)
|
||||
klog.ErrorS(err, "Failed to merge patch raw", "patch", subSetConfig.Patch.Raw)
|
||||
return err
|
||||
}
|
||||
patchedTemplateSpec := corev1.PodTemplateSpec{}
|
||||
if err = json.Unmarshal(modified, &patchedTemplateSpec); err != nil {
|
||||
klog.Errorf("failed to unmarshal %s to podTemplateSpec", modified)
|
||||
klog.ErrorS(err, "Failed to unmarshal modified JSON to podTemplateSpec", "JSON", modified)
|
||||
return err
|
||||
}
|
||||
|
||||
set.Spec.Template = patchedTemplateSpec
|
||||
klog.V(2).Infof("StatefulSet [%s/%s] was patched successfully: %s", set.Namespace, set.GenerateName, subSetConfig.Patch.Raw)
|
||||
klog.V(2).InfoS("StatefulSet was patched successfully", "statefulSet", klog.KRef(set.Namespace, set.GenerateName), "patch", subSetConfig.Patch.Raw)
|
||||
}
|
||||
if set.Annotations == nil {
|
||||
set.Annotations = make(map[string]string)
|
||||
|
@ -259,7 +259,7 @@ func (a *StatefulSetAdapter) deleteStuckPods(set *appsv1.StatefulSet, revision s
|
|||
pod := pods[i]
|
||||
// If the pod is considered as stuck, delete it.
|
||||
if isPodStuckForRollingUpdate(pod, revision, partition) {
|
||||
klog.V(2).Infof("Delete pod %s/%s at stuck state", pod.Namespace, pod.Name)
|
||||
klog.V(2).InfoS("Deleted pod at stuck state", "pod", klog.KObj(pod))
|
||||
err = a.Delete(context.TODO(), pod, client.PropagationPolicy(metav1.DeletePropagationBackground))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -87,7 +87,7 @@ func (s *specificAllocator) Alloc(nameToSubset *map[string]*Subset) (*map[string
|
|||
}
|
||||
|
||||
specifiedReplicas := getSpecifiedSubsetReplicas(expectedReplicas, s.UnitedDeployment)
|
||||
klog.V(4).Infof("UnitedDeployment %s/%s specifiedReplicas: %v", s.Namespace, s.Name, specifiedReplicas)
|
||||
klog.V(4).InfoS("UnitedDeployment specifiedReplicas", "unitedDeployment", klog.KObj(s), "specifiedReplicas", specifiedReplicas)
|
||||
return s.AllocateReplicas(expectedReplicas, specifiedReplicas)
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ func (s *specificAllocator) validateReplicas(replicas int32, subsetReplicasLimit
|
|||
specifiedCount++
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("specifiedCount: %d, len(*s.subsets): %d", specifiedCount, len(*s.subsets))
|
||||
klog.V(4).InfoS("SpecifiedCount and subsetsCount", "specifiedCount", specifiedCount, "subsetsCount", len(*s.subsets))
|
||||
if replicas != -1 {
|
||||
if specifiedReplicas > replicas {
|
||||
return fmt.Errorf("specified subsets' replica (%d) is greater than UnitedDeployment replica (%d)",
|
||||
|
@ -137,8 +137,8 @@ func getSpecifiedSubsetReplicas(replicas int32, ud *appsv1alpha1.UnitedDeploymen
|
|||
if specifiedReplicas, err := ParseSubsetReplicas(replicas, *subsetDef.Replicas); err == nil {
|
||||
replicaLimits[subsetDef.Name] = specifiedReplicas
|
||||
} else {
|
||||
klog.Warningf("Fail to consider the replicas of subset %s when parsing replicaLimits during managing replicas of UnitedDeployment %s/%s: %s",
|
||||
subsetDef.Name, ud.Namespace, ud.Name, err)
|
||||
klog.ErrorS(err, "Failed to consider the replicas of subset when parsing replicaLimits during managing replicas of UnitedDeployment",
|
||||
"subsetName", subsetDef.Name, "unitedDeployment", klog.KObj(ud))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ func (r *ReconcileUnitedDeployment) controlledHistories(ud *appsalphav1.UnitedDe
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(1).Infof("List controller revision of UnitedDeployment %s/%s: count %d\n", ud.Namespace, ud.Name, len(histories.Items))
|
||||
klog.V(1).InfoS("List controller revision of UnitedDeployment", "unitedDeployment", klog.KObj(ud), "count", len(histories.Items))
|
||||
|
||||
// Use ControllerRefManager to adopt/orphan as needed.
|
||||
cm, err := refmanager.New(r.Client, ud.Spec.Selector, ud, r.scheme)
|
||||
|
|
|
@ -84,7 +84,7 @@ func (m *SubsetControl) CreateSubset(ud *alpha1.UnitedDeployment, subsetName str
|
|||
return err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Have %d replicas when creating Subset for UnitedDeployment %s/%s", replicas, ud.Namespace, ud.Name)
|
||||
klog.V(4).InfoS("Replicas when creating Subset for UnitedDeployment", "replicas", replicas, "unitedDeployment", klog.KObj(ud))
|
||||
return m.Create(context.TODO(), set)
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ type ReconcileUnitedDeployment struct {
|
|||
// Reconcile reads that state of the cluster for a UnitedDeployment object and makes changes based on the state read
|
||||
// and what is in the UnitedDeployment.Spec
|
||||
func (r *ReconcileUnitedDeployment) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
klog.V(4).Infof("Reconcile UnitedDeployment %s/%s", request.Namespace, request.Name)
|
||||
klog.V(4).InfoS("Reconcile UnitedDeployment", "unitedDeployment", request)
|
||||
// Fetch the UnitedDeployment instance
|
||||
instance := &appsv1alpha1.UnitedDeployment{}
|
||||
err := r.Get(context.TODO(), request.NamespacedName, instance)
|
||||
|
@ -185,31 +185,30 @@ func (r *ReconcileUnitedDeployment) Reconcile(_ context.Context, request reconci
|
|||
|
||||
currentRevision, updatedRevision, _, collisionCount, err := r.constructUnitedDeploymentRevisions(instance)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to construct controller revision of UnitedDeployment %s/%s: %s", instance.Namespace, instance.Name, err)
|
||||
klog.ErrorS(err, "Failed to construct controller revision of UnitedDeployment", "unitedDeployment", klog.KObj(instance))
|
||||
r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeRevisionProvision), err.Error())
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
control, subsetType := r.getSubsetControls(instance)
|
||||
|
||||
klog.V(4).Infof("Get UnitedDeployment %s/%s all subsets", request.Namespace, request.Name)
|
||||
klog.V(4).InfoS("Got all subsets of UnitedDeployment", "unitedDeployment", request)
|
||||
expectedRevision := currentRevision.Name
|
||||
if updatedRevision != nil {
|
||||
expectedRevision = updatedRevision.Name
|
||||
}
|
||||
nameToSubset, err := r.getNameToSubset(instance, control, expectedRevision)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to get Subsets of UnitedDeployment %s/%s: %s", instance.Namespace, instance.Name, err)
|
||||
klog.ErrorS(err, "Failed to get Subsets of UnitedDeployment", "unitedDeployment", klog.KObj(instance))
|
||||
r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed %s",
|
||||
eventTypeFindSubsets), err.Error())
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
nextReplicas, err := NewReplicaAllocator(instance).Alloc(nameToSubset)
|
||||
klog.V(4).Infof("Get UnitedDeployment %s/%s next replicas %v", instance.Namespace, instance.Name, nextReplicas)
|
||||
klog.V(4).InfoS("Got UnitedDeployment next replicas", "unitedDeployment", klog.KObj(instance), "nextReplicas", nextReplicas)
|
||||
if err != nil {
|
||||
klog.Errorf("UnitedDeployment %s/%s Specified subset replicas is ineffective: %s",
|
||||
instance.Namespace, instance.Name, err.Error())
|
||||
klog.ErrorS(err, "UnitedDeployment specified subset replicas is ineffective", "unitedDeployment", klog.KObj(instance))
|
||||
r.recorder.Eventf(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed %s",
|
||||
eventTypeSpecifySubbsetReplicas), "Specified subset replicas is ineffective: %s", err.Error())
|
||||
return reconcile.Result{}, err
|
||||
|
@ -217,18 +216,18 @@ func (r *ReconcileUnitedDeployment) Reconcile(_ context.Context, request reconci
|
|||
|
||||
nextPartitions := calcNextPartitions(instance, nextReplicas)
|
||||
nextUpdate := getNextUpdate(instance, nextReplicas, nextPartitions)
|
||||
klog.V(4).Infof("Get UnitedDeployment %s/%s next update %v", instance.Namespace, instance.Name, nextUpdate)
|
||||
klog.V(4).InfoS("Got UnitedDeployment next update", "unitedDeployment", klog.KObj(instance), "nextUpdate", nextUpdate)
|
||||
|
||||
newStatus, err := r.manageSubsets(instance, nameToSubset, nextUpdate, currentRevision, updatedRevision, subsetType)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to update UnitedDeployment %s/%s: %s", instance.Namespace, instance.Name, err)
|
||||
klog.ErrorS(err, "Failed to update UnitedDeployment", "unitedDeployment", klog.KObj(instance))
|
||||
r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeSubsetsUpdate), err.Error())
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
selector, err := util.ValidatedLabelSelectorAsSelector(instance.Spec.Selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Error converting UnitedDeployment %s selector: %v", request, err)
|
||||
klog.ErrorS(err, "Error converting UnitedDeployment selector", "unitedDeployment", klog.KObj(instance))
|
||||
// This is a non-transient error, so don't retry.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
@ -244,7 +243,7 @@ func (r *ReconcileUnitedDeployment) getNameToSubset(instance *appsv1alpha1.Unite
|
|||
return nil, fmt.Errorf("fail to get all Subsets for UnitedDeployment %s/%s: %s", instance.Namespace, instance.Name, err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Classify UnitedDeployment %s/%s by subSet name", instance.Namespace, instance.Name)
|
||||
klog.V(4).InfoS("Classify UnitedDeployment by subSet name", "unitedDeployment", klog.KObj(instance))
|
||||
nameToSubsets := r.classifySubsetBySubsetName(instance, subSets)
|
||||
|
||||
nameToSubset, err := r.deleteDupSubset(instance, nameToSubsets, control)
|
||||
|
@ -294,7 +293,7 @@ func (r *ReconcileUnitedDeployment) deleteDupSubset(ud *appsv1alpha1.UnitedDeplo
|
|||
for name, subsets := range nameToSubsets {
|
||||
if len(subsets) > 1 {
|
||||
for _, subset := range subsets[1:] {
|
||||
klog.V(0).Infof("Delete duplicated Subset %s/%s for subset name %s", subset.Namespace, subset.Name, name)
|
||||
klog.InfoS("Deleted duplicated Subset for subset name", "subset", klog.KObj(subset), "subsetName", name)
|
||||
if err := control.DeleteSubset(subset); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
|
@ -440,12 +439,14 @@ func (r *ReconcileUnitedDeployment) updateUnitedDeployment(ud *appsv1alpha1.Unit
|
|||
|
||||
var getErr, updateErr error
|
||||
for i, obj := 0, ud; ; i++ {
|
||||
klog.V(4).Infof(fmt.Sprintf("The %d th time updating status for %v: %s/%s, ", i, obj.Kind, obj.Namespace, obj.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", obj.Status.Replicas, newStatus.Replicas, obj.Spec.Replicas) +
|
||||
fmt.Sprintf("readyReplicas %d->%d (need %d), ", obj.Status.ReadyReplicas, newStatus.ReadyReplicas, obj.Spec.Replicas) +
|
||||
fmt.Sprintf("updatedReplicas %d->%d, ", obj.Status.UpdatedReplicas, newStatus.UpdatedReplicas) +
|
||||
fmt.Sprintf("updatedReadyReplicas %d->%d, ", obj.Status.UpdatedReadyReplicas, newStatus.UpdatedReadyReplicas) +
|
||||
fmt.Sprintf("sequence No: %v->%v", obj.Status.ObservedGeneration, newStatus.ObservedGeneration))
|
||||
klog.V(4).InfoS("Updating status",
|
||||
"updateCount", i, "unitedDeployment", klog.KObj(obj),
|
||||
"replicasSpec", obj.Spec.Replicas, "oldReplicas", obj.Status.Replicas, "newReplicas", newStatus.Replicas,
|
||||
"readyReplicasSpec", obj.Spec.Replicas, "oldReadyReplicas", obj.Status.ReadyReplicas, "newReadyReplicas", newStatus.ReadyReplicas,
|
||||
"oldUpdatedReplicas", obj.Status.UpdatedReplicas, "newUpdatedReplicas", newStatus.UpdatedReplicas,
|
||||
"oldUpdatedReadyReplicas", obj.Status.UpdatedReadyReplicas, "newUpdatedReadyReplicas", newStatus.UpdatedReadyReplicas,
|
||||
"oldObservedGeneration", obj.Status.ObservedGeneration, "newObservedGeneration", newStatus.ObservedGeneration,
|
||||
)
|
||||
|
||||
obj.Status = *newStatus
|
||||
|
||||
|
@ -463,6 +464,6 @@ func (r *ReconcileUnitedDeployment) updateUnitedDeployment(ud *appsv1alpha1.Unit
|
|||
obj = tmpObj
|
||||
}
|
||||
|
||||
klog.Errorf("fail to update UnitedDeployment %s/%s status: %s", ud.Namespace, ud.Name, updateErr)
|
||||
klog.ErrorS(updateErr, "Failed to update UnitedDeployment status", "unitedDeployment", klog.KObj(ud))
|
||||
return nil, updateErr
|
||||
}
|
||||
|
|
|
@ -65,7 +65,9 @@ func (r *ReconcileUnitedDeployment) manageSubsets(ud *appsv1alpha1.UnitedDeploym
|
|||
replicas := nextUpdate[cell].Replicas
|
||||
partition := nextUpdate[cell].Partition
|
||||
|
||||
klog.V(0).Infof("UnitedDeployment %s/%s needs to update Subset (%s) %s/%s with revision %s, replicas %d, partition %d", ud.Namespace, ud.Name, subsetType, subset.Namespace, subset.Name, expectedRevision.Name, replicas, partition)
|
||||
klog.InfoS("UnitedDeployment needed to update Subset with revision, replicas and partition",
|
||||
"unitedDeployment", klog.KObj(ud), "subsetType", subsetType, "subset", klog.KObj(subset),
|
||||
"expectedRevisionName", expectedRevision.Name, "replicas", replicas, "partition", partition)
|
||||
updateSubsetErr := r.subSetControls[subsetType].UpdateSubset(subset, ud, expectedRevision.Name, replicas, partition)
|
||||
if updateSubsetErr != nil {
|
||||
r.recorder.Event(ud.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeSubsetsUpdate), fmt.Sprintf("Error updating PodSet (%s) %s when updating: %s", subsetType, subset.Name, updateSubsetErr))
|
||||
|
@ -93,7 +95,7 @@ func (r *ReconcileUnitedDeployment) manageSubsetProvision(ud *appsv1alpha1.Unite
|
|||
for subsetName := range *nameToSubset {
|
||||
gotSubsets.Insert(subsetName)
|
||||
}
|
||||
klog.V(4).Infof("UnitedDeployment %s/%s has subsets %v, expects subsets %v", ud.Namespace, ud.Name, gotSubsets.List(), expectedSubsets.List())
|
||||
klog.V(4).InfoS("UnitedDeployment subsets information", "unitedDeployment", klog.KObj(ud), "subsets", gotSubsets.List(), "expectedSubsets", expectedSubsets.List())
|
||||
|
||||
creates := expectedSubsets.Difference(gotSubsets).List()
|
||||
deletes := gotSubsets.Difference(expectedSubsets).List()
|
||||
|
@ -107,7 +109,7 @@ func (r *ReconcileUnitedDeployment) manageSubsetProvision(ud *appsv1alpha1.Unite
|
|||
// manage creating
|
||||
if len(creates) > 0 {
|
||||
// do not consider deletion
|
||||
klog.V(0).Infof("UnitedDeployment %s/%s needs creating subset (%s) with name: %v", ud.Namespace, ud.Name, subsetType, creates)
|
||||
klog.InfoS("UnitedDeployment needed creating subset with name", "unitedDeployment", klog.KObj(ud), "subsetType", subsetType, "subsetNames", creates)
|
||||
createdSubsets := make([]string, len(creates))
|
||||
for i, subset := range creates {
|
||||
createdSubsets[i] = subset
|
||||
|
@ -138,7 +140,7 @@ func (r *ReconcileUnitedDeployment) manageSubsetProvision(ud *appsv1alpha1.Unite
|
|||
|
||||
// manage deleting
|
||||
if len(deletes) > 0 {
|
||||
klog.V(0).Infof("UnitedDeployment %s/%s needs deleting subset (%s) with name: [%v]", ud.Namespace, ud.Name, subsetType, deletes)
|
||||
klog.InfoS("UnitedDeployment needed deleting subset with name", "unitedDeployment", klog.KObj(ud), "subsetType", subsetType, "subsetNames", deletes)
|
||||
var deleteErrs []error
|
||||
for _, subsetName := range deletes {
|
||||
subset := (*nameToSubset)[subsetName]
|
||||
|
|
|
@ -43,12 +43,12 @@ func DiscoverGVK(gvk schema.GroupVersionKind) bool {
|
|||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Warningf("Not found kind %s in group version %s, waiting time %s", gvk.Kind, gvk.GroupVersion().String(), time.Since(startTime))
|
||||
klog.InfoS("Kind not found in group version after waiting", "kind", gvk.Kind, "groupVersion", gvk.GroupVersion(), "waitingTime", time.Since(startTime))
|
||||
return false
|
||||
}
|
||||
|
||||
// This might be caused by abnormal apiserver or etcd, ignore it
|
||||
klog.Errorf("Failed to find resources in group version %s: %v, waiting time %s", gvk.GroupVersion().String(), err, time.Since(startTime))
|
||||
klog.ErrorS(err, "Failed to find resources in group version after waiting", "groupVersion", gvk.GroupVersion(), "waitingTime", time.Since(startTime))
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -61,7 +61,7 @@ func AddWatcherDynamically(mgr manager.Manager, c controller.Controller, h handl
|
|||
}
|
||||
|
||||
if !DiscoverGVK(gvk) {
|
||||
klog.Errorf("Failed to find GVK(%v) in cluster for %s", gvk.String(), controllerKey)
|
||||
klog.ErrorS(nil, "Failed to find GVK in cluster for controller", "GVK", gvk, "controllerKey", controllerKey)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func (r *ReconcileWorkloadSpread) rescheduleSubset(ws *appsv1alpha1.WorkloadSpre
|
|||
}
|
||||
unschedulable := len(scheduleFailedPods) > 0
|
||||
if unschedulable {
|
||||
klog.V(3).Infof("Subset (%s) of WorkloadSpread (%s/%s) is unschedulable", subsetStatus.Name, ws.Namespace, ws.Name)
|
||||
klog.V(3).InfoS("Subset of WorkloadSpread is unschedulable", "subsetName", subsetStatus.Name, "workloadSpread", klog.KObj(ws))
|
||||
}
|
||||
|
||||
oldCondition := GetWorkloadSpreadSubsetCondition(oldSubsetStatus, appsv1alpha1.SubsetSchedulable)
|
||||
|
@ -104,8 +104,7 @@ func (r *ReconcileWorkloadSpread) deletePodsForSubset(ws *appsv1alpha1.WorkloadS
|
|||
pod.Namespace, pod.Name, subsetName, ws.Namespace, ws.Name)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("WorkloadSpread (%s/%s) delete unschedulabe Pod (%s/%s) in Subset %s successfully",
|
||||
ws.Namespace, ws.Name, pod.Namespace, pod.Name, subsetName)
|
||||
klog.V(3).InfoS("WorkloadSpread deleted unschedulabe Pod in Subset successfully", "workloadSpread", klog.KObj(ws), "pod", klog.KObj(pod), "subsetName", subsetName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (r *ReconcileWorkloadSpread) updateDeletionCost(ws *appsv1alpha1.WorkloadSp
|
|||
|
||||
latestVersion, err := r.getWorkloadLatestVersion(ws)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get the latest version for workload in workloadSpread %v, err: %v", klog.KObj(ws), err)
|
||||
klog.ErrorS(err, "Failed to get the latest version for workload in workloadSpread", "workloadSpread", klog.KObj(ws))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -154,8 +154,7 @@ func (r *ReconcileWorkloadSpread) syncSubsetPodDeletionCost(
|
|||
} else {
|
||||
subsetMaxReplicas, err := intstr.GetValueFromIntOrPercent(subset.MaxReplicas, int(workloadReplicas), true)
|
||||
if err != nil || subsetMaxReplicas < 0 {
|
||||
klog.Errorf("failed to get maxReplicas value from subset (%s) of WorkloadSpread (%s/%s)",
|
||||
subset.Name, ws.Namespace, ws.Name)
|
||||
klog.ErrorS(err, "Failed to get maxReplicas value from subset of WorkloadSpread", "subsetName", subset.Name, "workloadSpread", klog.KObj(ws))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -227,8 +226,7 @@ func (r *ReconcileWorkloadSpread) patchPodDeletionCost(ws *appsv1alpha1.Workload
|
|||
if err := r.Patch(context.TODO(), clone, client.RawPatch(types.StrategicMergePatchType, []byte(body))); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("WorkloadSpread (%s/%s) paths deletion-cost annotation to %s for Pod (%s/%s) successfully",
|
||||
ws.Namespace, ws.Name, deletionCostStr, pod.Namespace, pod.Name)
|
||||
klog.V(3).InfoS("WorkloadSpread patched deletion-cost annotation for Pod successfully", "workloadSpread", klog.KObj(ws), "deletionCost", deletionCostStr, "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ func (r *ReconcileWorkloadSpread) Reconcile(_ context.Context, req reconcile.Req
|
|||
Name: req.Name,
|
||||
},
|
||||
}); cacheErr != nil {
|
||||
klog.Warningf("Failed to delete workloadSpread(%s/%s) cache after deletion, err: %v", req.Namespace, req.Name, cacheErr)
|
||||
klog.ErrorS(cacheErr, "Failed to delete workloadSpread cache after deletion", "workloadSpread", req)
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
} else if err != nil {
|
||||
|
@ -228,9 +228,9 @@ func (r *ReconcileWorkloadSpread) Reconcile(_ context.Context, req reconcile.Req
|
|||
}
|
||||
|
||||
startTime := time.Now()
|
||||
klog.V(3).Infof("Begin to process WorkloadSpread (%s/%s)", ws.Namespace, ws.Name)
|
||||
klog.V(3).InfoS("Began to process WorkloadSpread", "workloadSpread", klog.KObj(ws))
|
||||
err = r.syncWorkloadSpread(ws)
|
||||
klog.V(3).Infof("Finished syncing WorkloadSpread (%s/%s), cost: %v", ws.Namespace, ws.Name, time.Since(startTime))
|
||||
klog.V(3).InfoS("Finished syncing WorkloadSpread", "workloadSpread", klog.KObj(ws), "cost", time.Since(startTime))
|
||||
return reconcile.Result{RequeueAfter: durationStore.Pop(getWorkloadSpreadKey(ws))}, err
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ func (r *ReconcileWorkloadSpread) getPodJob(ref *appsv1alpha1.TargetReference, n
|
|||
if err != nil {
|
||||
// when error is NotFound, it is ok here.
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(3).Infof("cannot find Job (%s/%s)", namespace, ref.Name)
|
||||
klog.V(3).InfoS("Could not find Job", "job", klog.KRef(namespace, ref.Name))
|
||||
return nil, 0, nil
|
||||
}
|
||||
return nil, -1, err
|
||||
|
@ -253,7 +253,7 @@ func (r *ReconcileWorkloadSpread) getPodJob(ref *appsv1alpha1.TargetReference, n
|
|||
|
||||
labelSelector, err := util.ValidatedLabelSelectorAsSelector(job.Spec.Selector)
|
||||
if err != nil {
|
||||
klog.Errorf("gets labelSelector failed: %s", err.Error())
|
||||
klog.ErrorS(err, "Failed to get labelSelector")
|
||||
return nil, -1, nil
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ func (r *ReconcileWorkloadSpread) getPodsForWorkloadSpread(ws *appsv1alpha1.Work
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("WorkloadSpread (%s/%s) handles targetReference failed: %s", ws.Namespace, ws.Name, err.Error())
|
||||
klog.ErrorS(err, "WorkloadSpread handled targetReference failed", "workloadSpread", klog.KObj(ws))
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
|
@ -314,12 +314,12 @@ func (r *ReconcileWorkloadSpread) syncWorkloadSpread(ws *appsv1alpha1.WorkloadSp
|
|||
pods, workloadReplicas, err := r.getPodsForWorkloadSpread(ws)
|
||||
if err != nil || workloadReplicas == -1 {
|
||||
if err != nil {
|
||||
klog.Errorf("WorkloadSpread (%s/%s) gets matched pods failed: %v", ws.Namespace, ws.Name, err)
|
||||
klog.ErrorS(err, "WorkloadSpread got matched pods failed", "workloadSpread", klog.KObj(ws))
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
klog.Warningf("WorkloadSpread (%s/%s) has no matched pods, target workload's replicas[%d]", ws.Namespace, ws.Name, workloadReplicas)
|
||||
klog.InfoS("WorkloadSpread had no matched pods", "workloadSpread", klog.KObj(ws), "targetWorkloadReplicas", workloadReplicas)
|
||||
}
|
||||
|
||||
// group Pods by pod-revision and subset
|
||||
|
@ -359,7 +359,7 @@ func getInjectWorkloadSpreadFromPod(pod *corev1.Pod) *wsutil.InjectWorkloadSprea
|
|||
injectWS := &wsutil.InjectWorkloadSpread{}
|
||||
err := json.Unmarshal([]byte(injectStr), injectWS)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal %s from Pod (%s/%s)", injectStr, pod.Namespace, pod.Name)
|
||||
klog.ErrorS(err, "Failed to unmarshal JSON from Pod", "JSON", injectStr, "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
return injectWS
|
||||
|
@ -470,8 +470,8 @@ func (r *ReconcileWorkloadSpread) getAndUpdateSuitableSubsetName(ws *appsv1alpha
|
|||
if err != nil {
|
||||
// requiredSelectorTerm field was validated at webhook stage, so this error should not occur
|
||||
// this error should not be returned, because it is a non-transient error
|
||||
klog.Errorf("unexpected error occurred when matching pod (%s/%s) with subset, please check requiredSelectorTerm field of subset (%s) in WorkloadSpread (%s/%s), err: %s",
|
||||
pod.Namespace, pod.Name, subset.Name, ws.Namespace, ws.Name, err.Error())
|
||||
klog.ErrorS(err, "Unexpected error occurred when matching pod with subset, please check requiredSelectorTerm field of subset in WorkloadSpread",
|
||||
"pod", klog.KObj(pod), "subsetName", subset.Name, "workloadSpread", klog.KObj(ws))
|
||||
}
|
||||
// select the most favorite subsets for the pod by subset.PreferredNodeSelectorTerms
|
||||
if matched && preferredScore > maxPreferredScore {
|
||||
|
@ -523,8 +523,8 @@ func (r *ReconcileWorkloadSpread) patchFavoriteSubsetMetadataToPod(pod *corev1.P
|
|||
})
|
||||
|
||||
if err := r.Patch(context.TODO(), pod, client.RawPatch(types.StrategicMergePatchType, patch)); err != nil {
|
||||
klog.Errorf(`Failed to patch "matched-workloadspread:{Name: %s, Subset: %s} annotation" for pod (%s/%s), err: %s`,
|
||||
ws.Name, favoriteSubset.Name, pod.Namespace, pod.Name, err.Error())
|
||||
klog.ErrorS(err, `Failed to patch "matched-workloadspread" annotation for pod`,
|
||||
"pod", klog.KObj(pod), "annotationValue", fmt.Sprintf("{Name: %s, Subset: %s}", ws.Name, favoriteSubset.Name))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -651,8 +651,7 @@ func (r *ReconcileWorkloadSpread) calculateWorkloadSpreadSubsetStatus(ws *appsv1
|
|||
} else {
|
||||
subsetMaxReplicas, err = intstr.GetValueFromIntOrPercent(subset.MaxReplicas, int(workloadReplicas), true)
|
||||
if err != nil || subsetMaxReplicas < 0 {
|
||||
klog.Errorf("failed to get maxReplicas value from subset (%s) of WorkloadSpread (%s/%s)",
|
||||
subset.Name, ws.Namespace, ws.Name)
|
||||
klog.ErrorS(err, "Failed to get maxReplicas value from subset of WorkloadSpread", "subsetName", subset.Name, "workloadSpread", klog.KObj(ws))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -755,7 +754,7 @@ func (r *ReconcileWorkloadSpread) UpdateWorkloadSpreadStatus(ws *appsv1alpha1.Wo
|
|||
|
||||
err := r.writeWorkloadSpreadStatus(clone)
|
||||
if err == nil {
|
||||
klog.V(3).Info(makeStatusChangedLog(ws, status))
|
||||
klog.V(3).InfoS(makeStatusChangedLog(ws, status), "workloadSpread", klog.KObj(ws))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -816,7 +815,7 @@ func (r *ReconcileWorkloadSpread) writeWorkloadSpreadStatus(ws *appsv1alpha1.Wor
|
|||
err := r.Status().Update(context.TODO(), ws)
|
||||
if err == nil {
|
||||
if cacheErr := util.GlobalCache.Add(ws); cacheErr != nil {
|
||||
klog.Warningf("Failed to update workloadSpread(%s/%s) cache after update status, err: %v", ws.Namespace, ws.Name, cacheErr)
|
||||
klog.ErrorS(cacheErr, "Failed to update WorkloadSpread cache after update status", "workloadSpread", klog.KObj(ws))
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -123,13 +123,13 @@ func podPreferredScore(subset *appsv1alpha1.WorkloadSpreadSubset, pod *corev1.Po
|
|||
podBytes, _ := json.Marshal(pod)
|
||||
modified, err := strategicpatch.StrategicMergePatch(podBytes, subset.Patch.Raw, &corev1.Pod{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to merge patch raw for pod %v and subset %v", klog.KObj(pod), subset.Name)
|
||||
klog.ErrorS(err, "Failed to merge patch raw for pod and subset", "pod", klog.KObj(pod), "subsetName", subset.Name)
|
||||
return 0
|
||||
}
|
||||
patchedPod := &corev1.Pod{}
|
||||
err = json.Unmarshal(modified, patchedPod)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal for pod %v and subset %v", klog.KObj(pod), subset.Name)
|
||||
klog.ErrorS(err, "Failed to unmarshal for pod and subset", "pod", klog.KObj(pod), "subsetName", subset.Name)
|
||||
return 0
|
||||
}
|
||||
// TODO: consider json annotation just like `{"json_key": ["value1", "value2"]}`.
|
||||
|
|
|
@ -80,12 +80,12 @@ func (p *podEventHandler) handlePod(q workqueue.RateLimitingInterface, obj runti
|
|||
if value, exist := pod.GetAnnotations()[wsutil.MatchedWorkloadSpreadSubsetAnnotations]; exist {
|
||||
injectWorkloadSpread := &wsutil.InjectWorkloadSpread{}
|
||||
if err := json.Unmarshal([]byte(value), injectWorkloadSpread); err != nil {
|
||||
klog.Errorf("Failed to unmarshal %s to WorkloadSpread", value)
|
||||
klog.ErrorS(err, "Failed to unmarshal JSON to WorkloadSpread", "JSON", value)
|
||||
return
|
||||
}
|
||||
nsn := types.NamespacedName{Namespace: pod.GetNamespace(), Name: injectWorkloadSpread.Name}
|
||||
klog.V(5).Infof("%s Pod (%s/%s) and reconcile WorkloadSpread (%s/%s)",
|
||||
action, pod.Namespace, pod.Name, nsn.Namespace, nsn.Name)
|
||||
klog.V(5).InfoS("Handle Pod and reconcile WorkloadSpread",
|
||||
"action", action, "pod", klog.KObj(pod), "workloadSpread", nsn)
|
||||
q.Add(reconcile.Request{NamespacedName: nsn})
|
||||
}
|
||||
}
|
||||
|
@ -154,13 +154,13 @@ func (w workloadEventHandler) Update(ctx context.Context, evt event.UpdateEvent,
|
|||
owner := metav1.GetControllerOfNoCopy(evt.ObjectNew)
|
||||
ws, err := w.getWorkloadSpreadForWorkload(workloadNsn, gvk, owner)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get WorkloadSpread related with %s (%s/%s), err: %v",
|
||||
gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, err)
|
||||
klog.ErrorS(err, "Unable to get WorkloadSpread related with resource kind",
|
||||
"kind", gvk.Kind, "workload", workloadNsn)
|
||||
return
|
||||
}
|
||||
if ws != nil {
|
||||
klog.V(3).Infof("%s (%s/%s) changed replicas from %d to %d managed by WorkloadSpread (%s/%s)",
|
||||
gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, oldReplicas, newReplicas, ws.GetNamespace(), ws.GetName())
|
||||
klog.V(3).InfoS("Workload changed replicas managed by WorkloadSpread",
|
||||
"kind", gvk.Kind, "workload", workloadNsn, "oldReplicas", oldReplicas, "newReplicas", newReplicas, "workloadSpread", klog.KObj(ws))
|
||||
nsn := types.NamespacedName{Namespace: ws.GetNamespace(), Name: ws.GetName()}
|
||||
q.Add(reconcile.Request{NamespacedName: nsn})
|
||||
}
|
||||
|
@ -201,13 +201,13 @@ func (w *workloadEventHandler) handleWorkload(q workqueue.RateLimitingInterface,
|
|||
owner := metav1.GetControllerOfNoCopy(obj)
|
||||
ws, err := w.getWorkloadSpreadForWorkload(workloadNsn, gvk, owner)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get WorkloadSpread related with %s (%s/%s), err: %v",
|
||||
gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, err)
|
||||
klog.ErrorS(err, "Unable to get WorkloadSpread related with workload",
|
||||
"kind", gvk.Kind, "workload", workloadNsn)
|
||||
return
|
||||
}
|
||||
if ws != nil {
|
||||
klog.V(5).Infof("%s %s (%s/%s) and reconcile WorkloadSpread (%s/%s)",
|
||||
action, gvk.Kind, workloadNsn.Namespace, workloadNsn.Namespace, ws.Namespace, ws.Name)
|
||||
klog.V(5).InfoS("Handle workload and reconcile WorkloadSpread",
|
||||
"action", action, "kind", gvk.Kind, "workload", workloadNsn, "workloadSpread", klog.KObj(ws))
|
||||
nsn := types.NamespacedName{Namespace: ws.GetNamespace(), Name: ws.GetName()}
|
||||
q.Add(reconcile.Request{NamespacedName: nsn})
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func (w *workloadEventHandler) getWorkloadSpreadForWorkload(
|
|||
wsList := &appsv1alpha1.WorkloadSpreadList{}
|
||||
listOptions := &client.ListOptions{Namespace: workloadNamespaceName.Namespace}
|
||||
if err := w.List(context.TODO(), wsList, listOptions); err != nil {
|
||||
klog.Errorf("List WorkloadSpread failed: %s", err.Error())
|
||||
klog.ErrorS(err, "Failed to list WorkloadSpread", "namespace", workloadNamespaceName.Namespace)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue