Fix logging around pkg (#1310)

- use more performant functions (mostly remove formatters were possible)
- move zap.Error() to the call site, rather than creating a new sugared logger with a key attached (not cheap)
- fix *w usages where the key was not provided.
This commit is contained in:
Victor Agababov 2020-05-07 15:00:45 -07:00 committed by GitHub
parent 66c1551b08
commit 66f1d63f10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 19 additions and 19 deletions

View File

@ -427,20 +427,20 @@ func (c *Impl) processNextWorkItem() bool {
// Embed the key into the logger and attach that to the context we pass
// to the Reconciler.
logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, keyStr))
ctx := logging.WithLogger(context.TODO(), logger)
ctx := logging.WithLogger(context.Background(), logger)
// Run Reconcile, passing it the namespace/name string of the
// resource to be synced.
if err = c.Reconciler.Reconcile(ctx, keyStr); err != nil {
c.handleErr(err, key)
logger.Infof("Reconcile failed. Time taken: %v.", time.Since(startTime))
logger.Info("Reconcile failed. Time taken: ", time.Since(startTime))
return true
}
// Finally, if no error occurs we Forget this item so it does not
// have any delay when another change happens.
c.WorkQueue.Forget(key)
logger.Infof("Reconcile succeeded. Time taken: %v.", time.Since(startTime))
logger.Info("Reconcile succeeded. Time taken: ", time.Since(startTime))
return true
}

View File

@ -189,7 +189,7 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
// Set up leader election config
leaderElectionConfig, err := GetLeaderElectionConfig(ctx)
if err != nil {
logger.Fatalf("Error loading leader election configuration: %v", err)
logger.Fatalw("Error loading leader election configuration", zap.Error(err))
}
leConfig := leaderElectionConfig.GetComponentConfig(component)
@ -343,9 +343,9 @@ func SetupConfigMapWatchOrDie(ctx context.Context, logger *zap.SugaredLogger) *c
if cmLabel := system.ResourceLabel(); cmLabel != "" {
req, err := configmap.FilterConfigByLabelExists(cmLabel)
if err != nil {
logger.With(zap.Error(err)).Fatalf("Failed to generate requirement for label %q")
logger.Fatalw("Failed to generate requirement for label "+cmLabel, zap.Error(err))
}
logger.Infof("Setting up ConfigMap watcher with label selector %q", req)
logger.Info("Setting up ConfigMap watcher with label selector: ", req)
cmLabelReqs = append(cmLabelReqs, *req)
}
// TODO(mattmoor): This should itself take a context and be injection-based.
@ -360,7 +360,7 @@ func WatchLoggingConfigOrDie(ctx context.Context, cmw *configmap.InformedWatcher
metav1.GetOptions{}); err == nil {
cmw.Watch(logging.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, component))
} else if !apierrors.IsNotFound(err) {
logger.With(zap.Error(err)).Fatalf("Error reading ConfigMap %q", logging.ConfigMapName())
logger.Fatalw("Error reading ConfigMap "+logging.ConfigMapName(), zap.Error(err))
}
}
@ -374,7 +374,7 @@ func WatchObservabilityConfigOrDie(ctx context.Context, cmw *configmap.InformedW
metrics.ConfigMapWatcher(component, SecretFetcher(ctx), logger),
profilingHandler.UpdateFromConfigMap)
} else if !apierrors.IsNotFound(err) {
logger.With(zap.Error(err)).Fatalf("Error reading ConfigMap %q", metrics.ConfigMapName())
logger.Fatalw("Error reading ConfigMap "+metrics.ConfigMapName(), zap.Error(err))
}
}
@ -457,7 +457,7 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
EventRecorder: recorder,
})
if err != nil {
logger.Fatalw("Error creating lock: %v", err)
logger.Fatalw("Error creating lock", zap.Error(err))
}
// Execute the `run` function when we have the lock.
@ -469,7 +469,7 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
logger.Fatal("leaderelection lost")
logger.Fatal("Leader election lost")
},
},
ReleaseOnCancel: true,

View File

@ -52,7 +52,7 @@ var (
func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*zap.SugaredLogger, zap.AtomicLevel) {
logger, atomicLevel, err := newLoggerFromConfig(configJSON, levelOverride, opts)
if err == nil {
return enrichLoggerWithCommitID(logger.Sugar()), atomicLevel
return enrichLoggerWithCommitID(logger), atomicLevel
}
loggingCfg := zap.NewProductionConfig()
@ -66,18 +66,18 @@ func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*za
if err2 != nil {
panic(err2)
}
return enrichLoggerWithCommitID(logger.Named(fallbackLoggerName).Sugar()), loggingCfg.Level
return enrichLoggerWithCommitID(logger.Named(fallbackLoggerName)), loggingCfg.Level
}
func enrichLoggerWithCommitID(logger *zap.SugaredLogger) *zap.SugaredLogger {
func enrichLoggerWithCommitID(logger *zap.Logger) *zap.SugaredLogger {
commmitID, err := changeset.Get()
if err == nil {
// Enrich logs with GitHub commit ID.
return logger.With(zap.String(logkey.GitHubCommitID, commmitID))
return logger.With(zap.String(logkey.GitHubCommitID, commmitID)).Sugar()
}
logger.Infof("Fetch GitHub commit ID from kodata failed: %v", err)
return logger
logger.Info("Fetch GitHub commit ID from kodata failed", zap.Error(err))
return logger.Sugar()
}
// NewLoggerFromConfig creates a logger using the provided Config

View File

@ -129,12 +129,12 @@ func NewDurableConnection(target string, messageChan chan []byte, logger *zap.Su
for {
select {
default:
logger.Infof("Connecting to %s", target)
logger.Info("Connecting to ", target)
if err := c.connect(); err != nil {
logger.With(zap.Error(err)).Errorf("Connecting to %s failed", target)
logger.Errorw("Failed connecting to "+target, zap.Error(err))
continue
}
logger.Debugf("Connected to %s", target)
logger.Debug("Connected to ", target)
if err := c.keepalive(); err != nil {
logger.With(zap.Error(err)).Errorf("Connection to %s broke down, reconnecting...", target)
}