[exporterhelper] make enqueue failures available (#8674)
These metrics were only exporter either via OC or via the prometheus exporter. Fixes #8673 --------- Signed-off-by: Alex Boten <aboten@lightstep.com> Co-authored-by: Dmitrii Anoshin <anoshindx@gmail.com>
This commit is contained in:
parent
3d3fffafea
commit
844b628cf7
|
|
@ -0,0 +1,25 @@
|
|||
# Use this changelog template to create an entry for release notes.
|
||||
|
||||
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
|
||||
change_type: bug_fix
|
||||
|
||||
# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver)
|
||||
component: exporterhelper
|
||||
|
||||
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
|
||||
note: make enqueue failures available for otel metrics
|
||||
|
||||
# One or more tracking issues or pull requests related to the change
|
||||
issues: [8673]
|
||||
|
||||
# (Optional) One or more lines of additional information to render under the primary note.
|
||||
# These lines will be padded with 2 spaces and then inserted directly into the document.
|
||||
# Use pipe (|) for multiline entries.
|
||||
subtext:
|
||||
|
||||
# Optional: The change log or logs in which this entry should be included.
|
||||
# e.g. '[user]' or '[user, api]'
|
||||
# Include 'user' if the change is relevant to end users.
|
||||
# Include 'api' if there is a change to a library API.
|
||||
# Default: '[user]'
|
||||
change_logs: []
|
||||
|
|
@ -40,7 +40,7 @@ func (b *baseRequestSender) setNextSender(nextSender requestSender) {
|
|||
b.nextSender = nextSender
|
||||
}
|
||||
|
||||
type obsrepSenderFactory func(obsrep *obsExporter) requestSender
|
||||
type obsrepSenderFactory func(obsrep *ObsReport) requestSender
|
||||
|
||||
// baseRequest is a base implementation for the internal.Request.
|
||||
type baseRequest struct {
|
||||
|
|
@ -143,7 +143,7 @@ type baseExporter struct {
|
|||
signal component.DataType
|
||||
|
||||
set exporter.CreateSettings
|
||||
obsrep *obsExporter
|
||||
obsrep *ObsReport
|
||||
|
||||
// Chain of senders that the exporter helper applies before passing the data to the actual exporter.
|
||||
// The data is handled by each sender in the respective order starting from the queueSender.
|
||||
|
|
@ -163,7 +163,7 @@ type baseExporter struct {
|
|||
func newBaseExporter(set exporter.CreateSettings, signal component.DataType, requestExporter bool, marshaler internal.RequestMarshaler,
|
||||
unmarshaler internal.RequestUnmarshaler, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) {
|
||||
|
||||
obsrep, err := newObsExporter(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set}, globalInstruments)
|
||||
obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -175,12 +175,12 @@ func newBaseExporter(set exporter.CreateSettings, signal component.DataType, req
|
|||
signal: signal,
|
||||
|
||||
queueSender: &baseRequestSender{},
|
||||
obsrepSender: osf(obsrep),
|
||||
obsrepSender: osf(obsReport),
|
||||
retrySender: &baseRequestSender{},
|
||||
timeoutSender: &timeoutSender{cfg: NewDefaultTimeoutSettings()},
|
||||
|
||||
set: set,
|
||||
obsrep: obsrep,
|
||||
obsrep: obsReport,
|
||||
}
|
||||
|
||||
for _, op := range options {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func newNoopObsrepSender(_ *obsExporter) requestSender {
|
||||
func newNoopObsrepSender(_ *ObsReport) requestSender {
|
||||
return &baseRequestSender{}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func NewLogsExporter(
|
|||
req := newLogsRequest(ctx, ld, pusher)
|
||||
serr := be.send(req)
|
||||
if errors.Is(serr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordLogsEnqueueFailure(req.Context(), int64(req.Count()))
|
||||
be.obsrep.recordEnqueueFailure(req.Context(), component.DataTypeLogs, int64(req.Count()))
|
||||
}
|
||||
return serr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -151,7 +151,7 @@ func NewLogsRequestExporter(
|
|||
r := newRequest(ctx, req)
|
||||
sErr := be.send(r)
|
||||
if errors.Is(sErr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordLogsEnqueueFailure(r.Context(), int64(r.Count()))
|
||||
be.obsrep.recordEnqueueFailure(r.Context(), component.DataTypeLogs, int64(r.Count()))
|
||||
}
|
||||
return sErr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -164,10 +164,10 @@ func NewLogsRequestExporter(
|
|||
|
||||
type logsExporterWithObservability struct {
|
||||
baseRequestSender
|
||||
obsrep *obsExporter
|
||||
obsrep *ObsReport
|
||||
}
|
||||
|
||||
func newLogsExporterWithObservability(obsrep *obsExporter) requestSender {
|
||||
func newLogsExporterWithObservability(obsrep *ObsReport) requestSender {
|
||||
return &logsExporterWithObservability{obsrep: obsrep}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -248,7 +248,7 @@ func TestLogsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 batched must be in queue, and 5 batches (15 log records) rejected due to queue overflow
|
||||
checkExporterEnqueueFailedLogsStats(t, globalInstruments, fakeLogsExporterName, int64(15))
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedLogs(int64(15)))
|
||||
}
|
||||
|
||||
func TestLogsExporter_WithSpan(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func NewMetricsExporter(
|
|||
req := newMetricsRequest(ctx, md, pusher)
|
||||
serr := be.send(req)
|
||||
if errors.Is(serr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordMetricsEnqueueFailure(req.Context(), int64(req.Count()))
|
||||
be.obsrep.recordEnqueueFailure(req.Context(), component.DataTypeMetrics, int64(req.Count()))
|
||||
}
|
||||
return serr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -151,7 +151,7 @@ func NewMetricsRequestExporter(
|
|||
r := newRequest(ctx, req)
|
||||
sErr := be.send(r)
|
||||
if errors.Is(sErr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordMetricsEnqueueFailure(r.Context(), int64(r.Count()))
|
||||
be.obsrep.recordEnqueueFailure(r.Context(), component.DataTypeMetrics, int64(r.Count()))
|
||||
}
|
||||
return sErr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -164,10 +164,10 @@ func NewMetricsRequestExporter(
|
|||
|
||||
type metricsSenderWithObservability struct {
|
||||
baseRequestSender
|
||||
obsrep *obsExporter
|
||||
obsrep *ObsReport
|
||||
}
|
||||
|
||||
func newMetricsSenderWithObservability(obsrep *obsExporter) requestSender {
|
||||
func newMetricsSenderWithObservability(obsrep *ObsReport) requestSender {
|
||||
return &metricsSenderWithObservability{obsrep: obsrep}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -248,7 +248,7 @@ func TestMetricsExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 batched must be in queue, and 10 metric points rejected due to queue overflow
|
||||
checkExporterEnqueueFailedMetricsStats(t, globalInstruments, fakeMetricsExporterName, int64(10))
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedMetrics(int64(10)))
|
||||
}
|
||||
|
||||
func TestMetricsExporter_WithSpan(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -34,14 +34,17 @@ type ObsReport struct {
|
|||
tracer trace.Tracer
|
||||
logger *zap.Logger
|
||||
|
||||
useOtelForMetrics bool
|
||||
otelAttrs []attribute.KeyValue
|
||||
sentSpans metric.Int64Counter
|
||||
failedToSendSpans metric.Int64Counter
|
||||
sentMetricPoints metric.Int64Counter
|
||||
failedToSendMetricPoints metric.Int64Counter
|
||||
sentLogRecords metric.Int64Counter
|
||||
failedToSendLogRecords metric.Int64Counter
|
||||
useOtelForMetrics bool
|
||||
otelAttrs []attribute.KeyValue
|
||||
sentSpans metric.Int64Counter
|
||||
failedToSendSpans metric.Int64Counter
|
||||
failedToEnqueueSpans metric.Int64Counter
|
||||
sentMetricPoints metric.Int64Counter
|
||||
failedToSendMetricPoints metric.Int64Counter
|
||||
failedToEnqueueMetricPoints metric.Int64Counter
|
||||
sentLogRecords metric.Int64Counter
|
||||
failedToSendLogRecords metric.Int64Counter
|
||||
failedToEnqueueLogRecords metric.Int64Counter
|
||||
}
|
||||
|
||||
// ObsReportSettings are settings for creating an ObsReport.
|
||||
|
|
@ -96,6 +99,12 @@ func (or *ObsReport) createOtelMetrics(cfg ObsReportSettings) error {
|
|||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
or.failedToEnqueueSpans, err = meter.Int64Counter(
|
||||
obsmetrics.ExporterPrefix+obsmetrics.FailedToEnqueueSpansKey,
|
||||
metric.WithDescription("Number of spans failed to be added to the sending queue."),
|
||||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
or.sentMetricPoints, err = meter.Int64Counter(
|
||||
obsmetrics.ExporterPrefix+obsmetrics.SentMetricPointsKey,
|
||||
metric.WithDescription("Number of metric points successfully sent to destination."),
|
||||
|
|
@ -108,6 +117,12 @@ func (or *ObsReport) createOtelMetrics(cfg ObsReportSettings) error {
|
|||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
or.failedToEnqueueMetricPoints, err = meter.Int64Counter(
|
||||
obsmetrics.ExporterPrefix+obsmetrics.FailedToEnqueueMetricPointsKey,
|
||||
metric.WithDescription("Number of metric points failed to be added to the sending queue."),
|
||||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
or.sentLogRecords, err = meter.Int64Counter(
|
||||
obsmetrics.ExporterPrefix+obsmetrics.SentLogRecordsKey,
|
||||
metric.WithDescription("Number of log record successfully sent to destination."),
|
||||
|
|
@ -120,6 +135,12 @@ func (or *ObsReport) createOtelMetrics(cfg ObsReportSettings) error {
|
|||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
or.failedToEnqueueLogRecords, err = meter.Int64Counter(
|
||||
obsmetrics.ExporterPrefix+obsmetrics.FailedToEnqueueLogRecordsKey,
|
||||
metric.WithDescription("Number of log records failed to be added to the sending queue."),
|
||||
metric.WithUnit("1"))
|
||||
errors = multierr.Append(errors, err)
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
|
|
@ -252,3 +273,43 @@ func toNumItems(numExportedItems int, err error) (int64, int64) {
|
|||
}
|
||||
return int64(numExportedItems), 0
|
||||
}
|
||||
|
||||
func (or *ObsReport) recordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) {
|
||||
if or.useOtelForMetrics {
|
||||
or.recordEnqueueFailureWithOtel(ctx, dataType, failed)
|
||||
} else {
|
||||
or.recordEnqueueFailureWithOC(ctx, dataType, failed)
|
||||
}
|
||||
}
|
||||
|
||||
func (or *ObsReport) recordEnqueueFailureWithOC(ctx context.Context, dataType component.DataType, failed int64) {
|
||||
var failedMeasure *stats.Int64Measure
|
||||
switch dataType {
|
||||
case component.DataTypeTraces:
|
||||
failedMeasure = obsmetrics.ExporterFailedToEnqueueSpans
|
||||
case component.DataTypeMetrics:
|
||||
failedMeasure = obsmetrics.ExporterFailedToEnqueueMetricPoints
|
||||
case component.DataTypeLogs:
|
||||
failedMeasure = obsmetrics.ExporterFailedToEnqueueLogRecords
|
||||
}
|
||||
if failed > 0 {
|
||||
_ = stats.RecordWithTags(
|
||||
ctx,
|
||||
or.mutators,
|
||||
failedMeasure.M(failed))
|
||||
}
|
||||
}
|
||||
|
||||
func (or *ObsReport) recordEnqueueFailureWithOtel(ctx context.Context, dataType component.DataType, failed int64) {
|
||||
var enqueueFailedMeasure metric.Int64Counter
|
||||
switch dataType {
|
||||
case component.DataTypeTraces:
|
||||
enqueueFailedMeasure = or.failedToEnqueueSpans
|
||||
case component.DataTypeMetrics:
|
||||
enqueueFailedMeasure = or.failedToEnqueueMetricPoints
|
||||
case component.DataTypeLogs:
|
||||
enqueueFailedMeasure = or.failedToEnqueueLogRecords
|
||||
}
|
||||
|
||||
enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@
|
|||
package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opencensus.io/metric"
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
"go.opencensus.io/metric/metricproducer"
|
||||
|
|
@ -26,12 +24,9 @@ func init() {
|
|||
}
|
||||
|
||||
type instruments struct {
|
||||
registry *metric.Registry
|
||||
queueSize *metric.Int64DerivedGauge
|
||||
queueCapacity *metric.Int64DerivedGauge
|
||||
failedToEnqueueTraceSpans *metric.Int64Cumulative
|
||||
failedToEnqueueMetricPoints *metric.Int64Cumulative
|
||||
failedToEnqueueLogRecords *metric.Int64Cumulative
|
||||
registry *metric.Registry
|
||||
queueSize *metric.Int64DerivedGauge
|
||||
queueCapacity *metric.Int64DerivedGauge
|
||||
}
|
||||
|
||||
func newInstruments(registry *metric.Registry) *instruments {
|
||||
|
|
@ -49,67 +44,5 @@ func newInstruments(registry *metric.Registry) *instruments {
|
|||
metric.WithDescription("Fixed capacity of the retry queue (in batches)"),
|
||||
metric.WithLabelKeys(obsmetrics.ExporterKey),
|
||||
metric.WithUnit(metricdata.UnitDimensionless))
|
||||
|
||||
insts.failedToEnqueueTraceSpans, _ = registry.AddInt64Cumulative(
|
||||
obsmetrics.ExporterKey+"/enqueue_failed_spans",
|
||||
metric.WithDescription("Number of spans failed to be added to the sending queue."),
|
||||
metric.WithLabelKeys(obsmetrics.ExporterKey),
|
||||
metric.WithUnit(metricdata.UnitDimensionless))
|
||||
|
||||
insts.failedToEnqueueMetricPoints, _ = registry.AddInt64Cumulative(
|
||||
obsmetrics.ExporterKey+"/enqueue_failed_metric_points",
|
||||
metric.WithDescription("Number of metric points failed to be added to the sending queue."),
|
||||
metric.WithLabelKeys(obsmetrics.ExporterKey),
|
||||
metric.WithUnit(metricdata.UnitDimensionless))
|
||||
|
||||
insts.failedToEnqueueLogRecords, _ = registry.AddInt64Cumulative(
|
||||
obsmetrics.ExporterKey+"/enqueue_failed_log_records",
|
||||
metric.WithDescription("Number of log records failed to be added to the sending queue."),
|
||||
metric.WithLabelKeys(obsmetrics.ExporterKey),
|
||||
metric.WithUnit(metricdata.UnitDimensionless))
|
||||
|
||||
return insts
|
||||
}
|
||||
|
||||
// obsExporter is a helper to add observability to an exporter.
|
||||
type obsExporter struct {
|
||||
*ObsReport
|
||||
failedToEnqueueTraceSpansEntry *metric.Int64CumulativeEntry
|
||||
failedToEnqueueMetricPointsEntry *metric.Int64CumulativeEntry
|
||||
failedToEnqueueLogRecordsEntry *metric.Int64CumulativeEntry
|
||||
}
|
||||
|
||||
// newObsExporter creates a new observability exporter.
|
||||
func newObsExporter(cfg ObsReportSettings, insts *instruments) (*obsExporter, error) {
|
||||
labelValue := metricdata.NewLabelValue(cfg.ExporterID.String())
|
||||
failedToEnqueueTraceSpansEntry, _ := insts.failedToEnqueueTraceSpans.GetEntry(labelValue)
|
||||
failedToEnqueueMetricPointsEntry, _ := insts.failedToEnqueueMetricPoints.GetEntry(labelValue)
|
||||
failedToEnqueueLogRecordsEntry, _ := insts.failedToEnqueueLogRecords.GetEntry(labelValue)
|
||||
|
||||
exp, err := NewObsReport(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &obsExporter{
|
||||
ObsReport: exp,
|
||||
failedToEnqueueTraceSpansEntry: failedToEnqueueTraceSpansEntry,
|
||||
failedToEnqueueMetricPointsEntry: failedToEnqueueMetricPointsEntry,
|
||||
failedToEnqueueLogRecordsEntry: failedToEnqueueLogRecordsEntry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// recordTracesEnqueueFailure records number of spans that failed to be added to the sending queue.
|
||||
func (eor *obsExporter) recordTracesEnqueueFailure(_ context.Context, numSpans int64) {
|
||||
eor.failedToEnqueueTraceSpansEntry.Inc(numSpans)
|
||||
}
|
||||
|
||||
// recordMetricsEnqueueFailure records number of metric points that failed to be added to the sending queue.
|
||||
func (eor *obsExporter) recordMetricsEnqueueFailure(_ context.Context, numMetricPoints int64) {
|
||||
eor.failedToEnqueueMetricPointsEntry.Inc(numMetricPoints)
|
||||
}
|
||||
|
||||
// recordLogsEnqueueFailure records number of log records that failed to be added to the sending queue.
|
||||
func (eor *obsExporter) recordLogsEnqueueFailure(_ context.Context, numLogRecords int64) {
|
||||
eor.failedToEnqueueLogRecordsEntry.Inc(numLogRecords)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,8 +8,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opencensus.io/metric"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"go.opentelemetry.io/collector/component"
|
||||
"go.opentelemetry.io/collector/exporter"
|
||||
|
|
@ -22,47 +20,23 @@ func TestExportEnqueueFailure(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) })
|
||||
|
||||
insts := newInstruments(metric.NewRegistry())
|
||||
obsrep, err := newObsExporter(ObsReportSettings{
|
||||
obsrep, err := NewObsReport(ObsReportSettings{
|
||||
ExporterID: exporterID,
|
||||
ExporterCreateSettings: exporter.CreateSettings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings, BuildInfo: component.NewDefaultBuildInfo()},
|
||||
}, insts)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
logRecords := int64(7)
|
||||
obsrep.recordLogsEnqueueFailure(context.Background(), logRecords)
|
||||
checkExporterEnqueueFailedLogsStats(t, insts, exporterID, logRecords)
|
||||
obsrep.recordEnqueueFailureWithOC(context.Background(), component.DataTypeLogs, logRecords)
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedLogs(logRecords))
|
||||
|
||||
spans := int64(12)
|
||||
obsrep.recordTracesEnqueueFailure(context.Background(), spans)
|
||||
checkExporterEnqueueFailedTracesStats(t, insts, exporterID, spans)
|
||||
obsrep.recordEnqueueFailureWithOC(context.Background(), component.DataTypeTraces, spans)
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedTraces(spans))
|
||||
|
||||
metricPoints := int64(21)
|
||||
obsrep.recordMetricsEnqueueFailure(context.Background(), metricPoints)
|
||||
checkExporterEnqueueFailedMetricsStats(t, insts, exporterID, metricPoints)
|
||||
obsrep.recordEnqueueFailureWithOC(context.Background(), component.DataTypeMetrics, metricPoints)
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedMetrics(metricPoints))
|
||||
}
|
||||
|
||||
// checkExporterEnqueueFailedTracesStats checks that reported number of spans failed to enqueue match given values.
|
||||
// When this function is called it is required to also call SetupTelemetry as first thing.
|
||||
func checkExporterEnqueueFailedTracesStats(t *testing.T, insts *instruments, exporter component.ID, spans int64) {
|
||||
checkValueForProducer(t, insts.registry, tagsForExporterView(exporter), spans, "exporter/enqueue_failed_spans")
|
||||
}
|
||||
|
||||
// checkExporterEnqueueFailedMetricsStats checks that reported number of metric points failed to enqueue match given values.
|
||||
// When this function is called it is required to also call SetupTelemetry as first thing.
|
||||
func checkExporterEnqueueFailedMetricsStats(t *testing.T, insts *instruments, exporter component.ID, metricPoints int64) {
|
||||
checkValueForProducer(t, insts.registry, tagsForExporterView(exporter), metricPoints, "exporter/enqueue_failed_metric_points")
|
||||
}
|
||||
|
||||
// checkExporterEnqueueFailedLogsStats checks that reported number of log records failed to enqueue match given values.
|
||||
// When this function is called it is required to also call SetupTelemetry as first thing.
|
||||
func checkExporterEnqueueFailedLogsStats(t *testing.T, insts *instruments, exporter component.ID, logRecords int64) {
|
||||
checkValueForProducer(t, insts.registry, tagsForExporterView(exporter), logRecords, "exporter/enqueue_failed_log_records")
|
||||
}
|
||||
|
||||
// tagsForExporterView returns the tags that are needed for the exporter views.
|
||||
func tagsForExporterView(exporter component.ID) []tag.Tag {
|
||||
return []tag.Tag{
|
||||
{Key: exporterTag, Value: exporter.String()},
|
||||
}
|
||||
}
|
||||
// TODO: add test for validating recording enqueue failures for OTel
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ type observabilityConsumerSender struct {
|
|||
droppedItemsCount *atomic.Int64
|
||||
}
|
||||
|
||||
func newObservabilityConsumerSender(_ *obsExporter) requestSender {
|
||||
func newObservabilityConsumerSender(_ *ObsReport) requestSender {
|
||||
return &observabilityConsumerSender{
|
||||
waitGroup: new(sync.WaitGroup),
|
||||
droppedItemsCount: &atomic.Int64{},
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func NewTracesExporter(
|
|||
req := newTracesRequest(ctx, td, pusher)
|
||||
serr := be.send(req)
|
||||
if errors.Is(serr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordTracesEnqueueFailure(req.Context(), int64(req.Count()))
|
||||
be.obsrep.recordEnqueueFailure(req.Context(), component.DataTypeTraces, int64(req.Count()))
|
||||
}
|
||||
return serr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -151,7 +151,7 @@ func NewTracesRequestExporter(
|
|||
r := newRequest(ctx, req)
|
||||
sErr := be.send(r)
|
||||
if errors.Is(sErr, errSendingQueueIsFull) {
|
||||
be.obsrep.recordTracesEnqueueFailure(r.Context(), int64(r.Count()))
|
||||
be.obsrep.recordEnqueueFailure(r.Context(), component.DataTypeTraces, int64(r.Count()))
|
||||
}
|
||||
return sErr
|
||||
}, be.consumerOptions...)
|
||||
|
|
@ -164,10 +164,10 @@ func NewTracesRequestExporter(
|
|||
|
||||
type tracesExporterWithObservability struct {
|
||||
baseRequestSender
|
||||
obsrep *obsExporter
|
||||
obsrep *ObsReport
|
||||
}
|
||||
|
||||
func newTracesExporterWithObservability(obsrep *obsExporter) requestSender {
|
||||
func newTracesExporterWithObservability(obsrep *ObsReport) requestSender {
|
||||
return &tracesExporterWithObservability{obsrep: obsrep}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ func TestTracesExporter_WithRecordEnqueueFailedMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
// 2 batched must be in queue, and 5 batches (10 spans) rejected due to queue overflow
|
||||
checkExporterEnqueueFailedTracesStats(t, globalInstruments, fakeTracesExporterName, int64(10))
|
||||
require.NoError(t, tt.CheckExporterEnqueueFailedTraces(int64(10)))
|
||||
}
|
||||
|
||||
func TestTracesExporter_WithSpan(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -16,16 +16,22 @@ const (
|
|||
SentSpansKey = "sent_spans"
|
||||
// FailedToSendSpansKey used to track spans that failed to be sent by exporters.
|
||||
FailedToSendSpansKey = "send_failed_spans"
|
||||
// FailedToEnqueueSpansKey used to track spans that failed to be enqueued by exporters.
|
||||
FailedToEnqueueSpansKey = "enqueue_failed_spans"
|
||||
|
||||
// SentMetricPointsKey used to track metric points sent by exporters.
|
||||
SentMetricPointsKey = "sent_metric_points"
|
||||
// FailedToSendMetricPointsKey used to track metric points that failed to be sent by exporters.
|
||||
FailedToSendMetricPointsKey = "send_failed_metric_points"
|
||||
// FailedToEnqueueMetricPointsKey used to track metric points that failed to be enqueued by exporters.
|
||||
FailedToEnqueueMetricPointsKey = "enqueue_failed_metric_points"
|
||||
|
||||
// SentLogRecordsKey used to track logs sent by exporters.
|
||||
SentLogRecordsKey = "sent_log_records"
|
||||
// FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters.
|
||||
FailedToSendLogRecordsKey = "send_failed_log_records"
|
||||
// FailedToEnqueueLogRecordsKey used to track logs that failed to be enqueued by exporters.
|
||||
FailedToEnqueueLogRecordsKey = "enqueue_failed_log_records"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -49,6 +55,10 @@ var (
|
|||
ExporterPrefix+FailedToSendSpansKey,
|
||||
"Number of spans in failed attempts to send to destination.",
|
||||
stats.UnitDimensionless)
|
||||
ExporterFailedToEnqueueSpans = stats.Int64(
|
||||
ExporterPrefix+FailedToEnqueueSpansKey,
|
||||
"Number of spans failed to be added to the sending queue.",
|
||||
stats.UnitDimensionless)
|
||||
ExporterSentMetricPoints = stats.Int64(
|
||||
ExporterPrefix+SentMetricPointsKey,
|
||||
"Number of metric points successfully sent to destination.",
|
||||
|
|
@ -57,6 +67,10 @@ var (
|
|||
ExporterPrefix+FailedToSendMetricPointsKey,
|
||||
"Number of metric points in failed attempts to send to destination.",
|
||||
stats.UnitDimensionless)
|
||||
ExporterFailedToEnqueueMetricPoints = stats.Int64(
|
||||
ExporterPrefix+FailedToEnqueueMetricPointsKey,
|
||||
"Number of metric points failed to be added to the sending queue.",
|
||||
stats.UnitDimensionless)
|
||||
ExporterSentLogRecords = stats.Int64(
|
||||
ExporterPrefix+SentLogRecordsKey,
|
||||
"Number of log record successfully sent to destination.",
|
||||
|
|
@ -65,4 +79,8 @@ var (
|
|||
ExporterPrefix+FailedToSendLogRecordsKey,
|
||||
"Number of log records in failed attempts to send to destination.",
|
||||
stats.UnitDimensionless)
|
||||
ExporterFailedToEnqueueLogRecords = stats.Int64(
|
||||
ExporterPrefix+FailedToEnqueueLogRecordsKey,
|
||||
"Number of log records failed to be added to the sending queue.",
|
||||
stats.UnitDimensionless)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -56,10 +56,13 @@ func AllViews(level configtelemetry.Level) []*view.View {
|
|||
measures = []*stats.Int64Measure{
|
||||
obsmetrics.ExporterSentSpans,
|
||||
obsmetrics.ExporterFailedToSendSpans,
|
||||
obsmetrics.ExporterFailedToEnqueueSpans,
|
||||
obsmetrics.ExporterSentMetricPoints,
|
||||
obsmetrics.ExporterFailedToSendMetricPoints,
|
||||
obsmetrics.ExporterFailedToEnqueueMetricPoints,
|
||||
obsmetrics.ExporterSentLogRecords,
|
||||
obsmetrics.ExporterFailedToSendLogRecords,
|
||||
obsmetrics.ExporterFailedToEnqueueLogRecords,
|
||||
}
|
||||
tagKeys = []tag.Key{obsmetrics.TagKeyExporter}
|
||||
views = append(views, genViews(measures, tagKeys, view.Sum())...)
|
||||
|
|
|
|||
|
|
@ -24,17 +24,17 @@ func TestConfigure(t *testing.T) {
|
|||
{
|
||||
name: "basic",
|
||||
level: configtelemetry.LevelBasic,
|
||||
wantViewsLen: 24,
|
||||
wantViewsLen: 27,
|
||||
},
|
||||
{
|
||||
name: "normal",
|
||||
level: configtelemetry.LevelNormal,
|
||||
wantViewsLen: 24,
|
||||
wantViewsLen: 27,
|
||||
},
|
||||
{
|
||||
name: "detailed",
|
||||
level: configtelemetry.LevelDetailed,
|
||||
wantViewsLen: 24,
|
||||
wantViewsLen: 27,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
|
|||
|
|
@ -59,6 +59,18 @@ func (tts *TestTelemetry) CheckExporterMetrics(sentMetricsPoints, sendFailedMetr
|
|||
return tts.otelPrometheusChecker.checkExporterMetrics(tts.id, sentMetricsPoints, sendFailedMetricsPoints)
|
||||
}
|
||||
|
||||
func (tts *TestTelemetry) CheckExporterEnqueueFailedMetrics(enqueueFailed int64) error {
|
||||
return tts.otelPrometheusChecker.checkExporterEnqueueFailed(tts.id, "metric_points", enqueueFailed)
|
||||
}
|
||||
|
||||
func (tts *TestTelemetry) CheckExporterEnqueueFailedTraces(enqueueFailed int64) error {
|
||||
return tts.otelPrometheusChecker.checkExporterEnqueueFailed(tts.id, "spans", enqueueFailed)
|
||||
}
|
||||
|
||||
func (tts *TestTelemetry) CheckExporterEnqueueFailedLogs(enqueueFailed int64) error {
|
||||
return tts.otelPrometheusChecker.checkExporterEnqueueFailed(tts.id, "log_records", enqueueFailed)
|
||||
}
|
||||
|
||||
// CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values.
|
||||
// When this function is called it is required to also call SetupTelemetry as first thing.
|
||||
func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords int64) error {
|
||||
|
|
|
|||
|
|
@ -77,8 +77,8 @@ func (pc *prometheusChecker) checkExporterLogs(exporter component.ID, sent, send
|
|||
return pc.checkExporter(exporter, "log_records", sent, sendFailed)
|
||||
}
|
||||
|
||||
func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sentMetricPoints, sendFailedMetricPoints int64) error {
|
||||
return pc.checkExporter(exporter, "metric_points", sentMetricPoints, sendFailedMetricPoints)
|
||||
func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sent, sendFailed int64) error {
|
||||
return pc.checkExporter(exporter, "metric_points", sent, sendFailed)
|
||||
}
|
||||
|
||||
func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype string, sent, sendFailed int64) error {
|
||||
|
|
@ -91,6 +91,14 @@ func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype strin
|
|||
return errs
|
||||
}
|
||||
|
||||
func (pc *prometheusChecker) checkExporterEnqueueFailed(exporter component.ID, datatype string, enqueueFailed int64) error {
|
||||
if enqueueFailed == 0 {
|
||||
return nil
|
||||
}
|
||||
exporterAttrs := attributesForExporterMetrics(exporter)
|
||||
return pc.checkCounter(fmt.Sprintf("exporter_enqueue_failed_%s", datatype), enqueueFailed, exporterAttrs)
|
||||
}
|
||||
|
||||
func (pc *prometheusChecker) checkCounter(expectedMetric string, value int64, attrs []attribute.KeyValue) error {
|
||||
// Forces a flush for the opencensus view data.
|
||||
_, _ = view.RetrieveData(expectedMetric)
|
||||
|
|
|
|||
Loading…
Reference in New Issue