Change SpansCount to SpanCount everywhere (#3550)

Signed-off-by: Bogdan Drutu <bogdandrutu@gmail.com>
This commit is contained in:
Bogdan Drutu 2021-07-01 16:57:42 -07:00 committed by GitHub
parent cd83279681
commit 9a7bccea3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 43 additions and 43 deletions

View File

@ -58,7 +58,7 @@ func verifyTracesProcessorDoesntProduceAfterShutdown(t *testing.T, factory compo
// The Shutdown() is done. It means the processor must have sent everything we
// gave it to the next sink.
assert.EqualValues(t, generatedCount, nextSink.SpansCount())
assert.EqualValues(t, generatedCount, nextSink.SpanCount())
}
// VerifyProcessorShutdown verifies the processor doesn't produce telemetry data after shutdown.

View File

@ -26,9 +26,9 @@ import (
// stores all traces and allows querying them for testing.
type TracesSink struct {
nonMutatingConsumer
mu sync.Mutex
traces []pdata.Traces
spansCount int
mu sync.Mutex
traces []pdata.Traces
spanCount int
}
var _ consumer.Traces = (*TracesSink)(nil)
@ -39,7 +39,7 @@ func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error {
defer ste.mu.Unlock()
ste.traces = append(ste.traces, td)
ste.spansCount += td.SpanCount()
ste.spanCount += td.SpanCount()
return nil
}
@ -54,11 +54,11 @@ func (ste *TracesSink) AllTraces() []pdata.Traces {
return copyTraces
}
// SpansCount returns the number of spans sent to this sink.
func (ste *TracesSink) SpansCount() int {
// SpanCount returns the number of spans sent to this sink.
func (ste *TracesSink) SpanCount() int {
ste.mu.Lock()
defer ste.mu.Unlock()
return ste.spansCount
return ste.spanCount
}
// Reset deletes any stored data.
@ -67,7 +67,7 @@ func (ste *TracesSink) Reset() {
defer ste.mu.Unlock()
ste.traces = nil
ste.spansCount = 0
ste.spanCount = 0
}
// MetricsSink is a consumer.Metrics that acts like a sink that

View File

@ -34,10 +34,10 @@ func TestTracesSink(t *testing.T) {
want = append(want, td)
}
assert.Equal(t, want, sink.AllTraces())
assert.Equal(t, len(want), sink.SpansCount())
assert.Equal(t, len(want), sink.SpanCount())
sink.Reset()
assert.Equal(t, 0, len(sink.AllTraces()))
assert.Equal(t, 0, sink.SpansCount())
assert.Equal(t, 0, sink.SpanCount())
}
func TestMetricsSink(t *testing.T) {

View File

@ -120,7 +120,7 @@ func TestTraceRoundTrip(t *testing.T) {
td := testdata.GenerateTracesOneSpan()
assert.NoError(t, exp.ConsumeTraces(context.Background(), td))
require.Eventually(t, func() bool {
return sink.SpansCount() > 0
return sink.SpanCount() > 0
}, 1*time.Second, 10*time.Millisecond)
allTraces := sink.AllTraces()
require.Len(t, allTraces, 1)
@ -177,7 +177,7 @@ func TestCompressionOptions(t *testing.T) {
td := testdata.GenerateTracesOneSpan()
assert.NoError(t, exp.ConsumeTraces(context.Background(), td))
require.Eventually(t, func() bool {
return sink.SpansCount() > 0
return sink.SpanCount() > 0
}, 1*time.Second, 10*time.Millisecond)
allTraces := sink.AllTraces()
require.Len(t, allTraces, 1)

View File

@ -86,11 +86,11 @@ func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces {
return td
}
func GenerateTracesManySpansSameResource(spansCount int) pdata.Traces {
func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces {
td := GenerateTracesOneEmptyInstrumentationLibrary()
rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0)
rs0ils0.Spans().Resize(spansCount)
for i := 0; i < spansCount; i++ {
rs0ils0.Spans().Resize(spanCount)
for i := 0; i < spanCount; i++ {
fillSpanOne(rs0ils0.Spans().At(i))
}
return td

View File

@ -62,7 +62,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) {
require.NoError(t, batcher.Shutdown(context.Background()))
require.Equal(t, requestCount*spansPerRequest, sink.SpansCount())
require.Equal(t, requestCount*spansPerRequest, sink.SpanCount())
receivedTraces := sink.AllTraces()
spansReceivedByName := spansReceivedByName(receivedTraces)
for requestNum := 0; requestNum < requestCount; requestNum++ {
@ -102,7 +102,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) {
// wait for all spans to be reported
for {
if sink.SpansCount() == requestCount*spansPerRequest {
if sink.SpanCount() == requestCount*spansPerRequest {
break
}
<-time.After(cfg.Timeout)
@ -110,7 +110,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) {
require.NoError(t, batcher.Shutdown(context.Background()))
require.Equal(t, requestCount*spansPerRequest, sink.SpansCount())
require.Equal(t, requestCount*spansPerRequest, sink.SpanCount())
for i := 0; i < len(sink.AllTraces())-1; i++ {
assert.Equal(t, int(cfg.SendBatchMaxSize), sink.AllTraces()[i].SpanCount())
}
@ -152,7 +152,7 @@ func TestBatchProcessorSentBySize(t *testing.T) {
expectedBatchesNum := requestCount * spansPerRequest / sendBatchSize
expectedBatchingFactor := sendBatchSize / spansPerRequest
require.Equal(t, requestCount*spansPerRequest, sink.SpansCount())
require.Equal(t, requestCount*spansPerRequest, sink.SpanCount())
receivedTraces := sink.AllTraces()
require.EqualValues(t, expectedBatchesNum, len(receivedTraces))
for _, td := range receivedTraces {
@ -168,7 +168,7 @@ func TestBatchProcessorSentBySize(t *testing.T) {
assert.Equal(t, 1, len(viewData))
distData := viewData[0].Data.(*view.DistributionData)
assert.Equal(t, int64(expectedBatchesNum), distData.Count)
assert.Equal(t, sink.SpansCount(), int(distData.Sum()))
assert.Equal(t, sink.SpanCount(), int(distData.Sum()))
assert.Equal(t, sendBatchSize, int(distData.Min))
assert.Equal(t, sendBatchSize, int(distData.Max))
@ -203,7 +203,7 @@ func TestBatchProcessorSentByTimeout(t *testing.T) {
// Wait for at least one batch to be sent.
for {
if sink.SpansCount() != 0 {
if sink.SpanCount() != 0 {
break
}
<-time.After(cfg.Timeout)
@ -218,7 +218,7 @@ func TestBatchProcessorSentByTimeout(t *testing.T) {
expectedBatchesNum := 1
expectedBatchingFactor := 5
require.Equal(t, requestCount*spansPerRequest, sink.SpansCount())
require.Equal(t, requestCount*spansPerRequest, sink.SpanCount())
receivedTraces := sink.AllTraces()
require.EqualValues(t, expectedBatchesNum, len(receivedTraces))
for _, td := range receivedTraces {
@ -252,7 +252,7 @@ func TestBatchProcessorTraceSendWhenClosing(t *testing.T) {
require.NoError(t, batcher.Shutdown(context.Background()))
require.Equal(t, requestCount*spansPerRequest, sink.SpansCount())
require.Equal(t, requestCount*spansPerRequest, sink.SpanCount())
require.Equal(t, 1, len(sink.AllTraces()))
}

View File

@ -211,7 +211,7 @@ func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t
for _, td := range genRandomTestData(tt.numBatches, tt.numTracesPerBatch, testSvcName, tt.resourceSpanPerTrace) {
assert.NoError(t, tsp.ConsumeTraces(context.Background(), td))
assert.Equal(t, tt.resourceSpanPerTrace*tt.numTracesPerBatch, sink.SpansCount())
assert.Equal(t, tt.resourceSpanPerTrace*tt.numTracesPerBatch, sink.SpanCount())
sink.Reset()
}
@ -329,10 +329,10 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) {
sampledData := sink.AllTraces()
if tt.sampled {
require.Equal(t, 1, len(sampledData))
assert.Equal(t, 1, sink.SpansCount())
assert.Equal(t, 1, sink.SpanCount())
} else {
require.Equal(t, 0, len(sampledData))
assert.Equal(t, 0, sink.SpansCount())
assert.Equal(t, 0, sink.SpanCount())
}
})
}

View File

@ -206,7 +206,7 @@ func testJaegerAgent(t *testing.T, agentEndpoint string, receiverConfig *configu
}
assert.Eventually(t, func() bool {
return sink.SpansCount() > 0
return sink.SpanCount() > 0
}, 10*time.Second, 5*time.Millisecond)
gotTraces := sink.AllTraces()

View File

@ -704,7 +704,7 @@ func TestShutdown(t *testing.T) {
// Wait until the receiver outputs anything to the sink.
assert.Eventually(t, func() bool {
return nextSink.SpansCount() > 0
return nextSink.SpanCount() > 0
}, time.Second, 10*time.Millisecond)
// Now shutdown the receiver, while continuing sending traces to it.
@ -716,7 +716,7 @@ func TestShutdown(t *testing.T) {
// Remember how many spans the sink received. This number should not change after this
// point because after Shutdown() returns the component is not allowed to produce
// any more data.
sinkSpanCountAfterShutdown := nextSink.SpansCount()
sinkSpanCountAfterShutdown := nextSink.SpanCount()
// Now signal to generateTraces to exit the main generation loop, then send
// one more trace and stop.
@ -729,7 +729,7 @@ func TestShutdown(t *testing.T) {
// The last, additional trace should not be received by sink, so the number of spans in
// the sink should not change.
assert.EqualValues(t, sinkSpanCountAfterShutdown, nextSink.SpansCount())
assert.EqualValues(t, sinkSpanCountAfterShutdown, nextSink.SpanCount())
}
func generateTraces(senderFn senderFunc, doneSignal chan bool) {

View File

@ -40,9 +40,9 @@ func TestTracesProcessorCloningMultiplexing(t *testing.T) {
tfc := NewTracesCloning(processors)
td := testdata.GenerateTracesTwoSpansSameResource()
var wantSpansCount = 0
var wantSpanCount = 0
for i := 0; i < 2; i++ {
wantSpansCount += td.SpanCount()
wantSpanCount += td.SpanCount()
err := tfc.ConsumeTraces(context.Background(), td)
if err != nil {
t.Errorf("Wanted nil got error")
@ -52,7 +52,7 @@ func TestTracesProcessorCloningMultiplexing(t *testing.T) {
for i, p := range processors {
m := p.(*consumertest.TracesSink)
assert.Equal(t, wantSpansCount, m.SpansCount())
assert.Equal(t, wantSpanCount, m.SpanCount())
spanOrig := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0)
allTraces := m.AllTraces()
spanClone := allTraces[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0)

View File

@ -41,9 +41,9 @@ func TestTracesProcessorMultiplexing(t *testing.T) {
tfc := NewTraces(processors)
td := testdata.GenerateTracesOneSpan()
var wantSpansCount = 0
var wantSpanCount = 0
for i := 0; i < 2; i++ {
wantSpansCount += td.SpanCount()
wantSpanCount += td.SpanCount()
err := tfc.ConsumeTraces(context.Background(), td)
if err != nil {
t.Errorf("Wanted nil got error")
@ -53,7 +53,7 @@ func TestTracesProcessorMultiplexing(t *testing.T) {
for _, p := range processors {
m := p.(*consumertest.TracesSink)
assert.Equal(t, wantSpansCount, m.SpansCount())
assert.Equal(t, wantSpanCount, m.SpanCount())
assert.EqualValues(t, td, m.AllTraces()[0])
}
}
@ -70,14 +70,14 @@ func TestTracesProcessorWhenOneErrors(t *testing.T) {
tfc := NewTraces(processors)
td := testdata.GenerateTracesOneSpan()
var wantSpansCount = 0
var wantSpanCount = 0
for i := 0; i < 2; i++ {
wantSpansCount += td.SpanCount()
wantSpanCount += td.SpanCount()
assert.Error(t, tfc.ConsumeTraces(context.Background(), td))
}
assert.Equal(t, wantSpansCount, processors[0].(*consumertest.TracesSink).SpansCount())
assert.Equal(t, wantSpansCount, processors[2].(*consumertest.TracesSink).SpansCount())
assert.Equal(t, wantSpanCount, processors[0].(*consumertest.TracesSink).SpanCount())
assert.Equal(t, wantSpanCount, processors[2].(*consumertest.TracesSink).SpanCount())
}
func TestMetricsProcessorNotMultiplexing(t *testing.T) {

View File

@ -223,8 +223,8 @@ func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics
return nil
}
func (tc *MockTraceConsumer) MockConsumeTraceData(spansCount int) error {
tc.numSpansReceived.Add(uint64(spansCount))
func (tc *MockTraceConsumer) MockConsumeTraceData(spanCount int) error {
tc.numSpansReceived.Add(uint64(spanCount))
return nil
}