[chore]: enable gofumpt linter in connector, consumer, exporter, extension and featuregate (#11854)
#### Description [gofumpt](https://golangci-lint.run/usage/linters/#gofumpt) is a stricter format than gofmt, while being backwards compatible. Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
parent
824c9f7a43
commit
0b978307c2
|
|
@ -228,9 +228,11 @@ type nopConnector struct {
|
|||
func createTracesToTraces(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createTracesToMetrics(context.Context, Settings, component.Config, consumer.Metrics) (Traces, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createTracesToLogs(context.Context, Settings, component.Config, consumer.Logs) (Traces, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
|
@ -238,9 +240,11 @@ func createTracesToLogs(context.Context, Settings, component.Config, consumer.Lo
|
|||
func createMetricsToTraces(context.Context, Settings, component.Config, consumer.Traces) (Metrics, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createMetricsToMetrics(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createMetricsToLogs(context.Context, Settings, component.Config, consumer.Logs) (Metrics, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
|
@ -248,9 +252,11 @@ func createMetricsToLogs(context.Context, Settings, component.Config, consumer.L
|
|||
func createLogsToTraces(context.Context, Settings, component.Config, consumer.Traces) (Logs, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createLogsToMetrics(context.Context, Settings, component.Config, consumer.Metrics) (Logs, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createLogsToLogs(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -138,9 +138,11 @@ func createProfilesToProfiles(context.Context, connector.Settings, component.Con
|
|||
func createProfilesToTraces(context.Context, connector.Settings, component.Config, consumer.Traces) (Profiles, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createProfilesToMetrics(context.Context, connector.Settings, component.Config, consumer.Metrics) (Profiles, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
||||
func createProfilesToLogs(context.Context, connector.Settings, component.Config, consumer.Logs) (Profiles, error) {
|
||||
return nopInstance, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ func (mts *mutatingProfilesSink) Capabilities() consumer.Capabilities {
|
|||
}
|
||||
|
||||
func TestProfilesRouterMultiplexing(t *testing.T) {
|
||||
var num = 20
|
||||
num := 20
|
||||
for numIDs := 1; numIDs < num; numIDs++ {
|
||||
for numCons := 1; numCons < num; numCons++ {
|
||||
for numProfiles := 1; numProfiles < num; numProfiles++ {
|
||||
|
|
|
|||
|
|
@ -83,6 +83,7 @@ func createMetricsToMetricsConnector(context.Context, connector.Settings, compon
|
|||
func createMetricsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Metrics, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
||||
func createMetricsToProfilesConnector(context.Context, connector.Settings, component.Config, consumerprofiles.Profiles) (connector.Metrics, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
|
@ -98,6 +99,7 @@ func createLogsToMetricsConnector(context.Context, connector.Settings, component
|
|||
func createLogsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Logs, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
||||
func createLogsToProfilesConnector(context.Context, connector.Settings, component.Config, consumerprofiles.Profiles) (connector.Logs, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
|
@ -113,6 +115,7 @@ func createProfilesToMetricsConnector(context.Context, connector.Settings, compo
|
|||
func createProfilesToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connectorprofiles.Profiles, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
||||
func createProfilesToProfilesConnector(context.Context, connector.Settings, component.Config, consumerprofiles.Profiles) (connectorprofiles.Profiles, error) {
|
||||
return &nopConnector{Consumer: consumertest.NewNop()}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func (mts *mutatingLogsSink) Capabilities() consumer.Capabilities {
|
|||
}
|
||||
|
||||
func TestLogsRouterMultiplexing(t *testing.T) {
|
||||
var num = 20
|
||||
num := 20
|
||||
for numIDs := 1; numIDs < num; numIDs++ {
|
||||
for numCons := 1; numCons < num; numCons++ {
|
||||
for numLogs := 1; numLogs < num; numLogs++ {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func (mts *mutatingMetricsSink) Capabilities() consumer.Capabilities {
|
|||
}
|
||||
|
||||
func TestMetricsRouterMultiplexing(t *testing.T) {
|
||||
var num = 20
|
||||
num := 20
|
||||
for numIDs := 1; numIDs < num; numIDs++ {
|
||||
for numCons := 1; numCons < num; numCons++ {
|
||||
for numMetrics := 1; numMetrics < num; numMetrics++ {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func (mts *mutatingTracesSink) Capabilities() consumer.Capabilities {
|
|||
}
|
||||
|
||||
func TestTracesRouterMultiplexing(t *testing.T) {
|
||||
var num = 20
|
||||
num := 20
|
||||
for numIDs := 1; numIDs < num; numIDs++ {
|
||||
for numCons := 1; numCons < num; numCons++ {
|
||||
for numTraces := 1; numTraces < num; numTraces++ {
|
||||
|
|
|
|||
|
|
@ -37,10 +37,12 @@ type Consumer interface {
|
|||
unexported()
|
||||
}
|
||||
|
||||
var _ consumer.Logs = (Consumer)(nil)
|
||||
var _ consumer.Metrics = (Consumer)(nil)
|
||||
var _ consumer.Traces = (Consumer)(nil)
|
||||
var _ consumerprofiles.Profiles = (Consumer)(nil)
|
||||
var (
|
||||
_ consumer.Logs = (Consumer)(nil)
|
||||
_ consumer.Metrics = (Consumer)(nil)
|
||||
_ consumer.Traces = (Consumer)(nil)
|
||||
_ consumerprofiles.Profiles = (Consumer)(nil)
|
||||
)
|
||||
|
||||
type nonMutatingConsumer struct{}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,15 +10,13 @@ import (
|
|||
"go.opentelemetry.io/collector/config/configtelemetry"
|
||||
)
|
||||
|
||||
var (
|
||||
// supportedLevels in this exporter's configuration.
|
||||
// configtelemetry.LevelNone and other future values are not supported.
|
||||
supportedLevels map[configtelemetry.Level]struct{} = map[configtelemetry.Level]struct{}{
|
||||
configtelemetry.LevelBasic: {},
|
||||
configtelemetry.LevelNormal: {},
|
||||
configtelemetry.LevelDetailed: {},
|
||||
}
|
||||
)
|
||||
// supportedLevels in this exporter's configuration.
|
||||
// configtelemetry.LevelNone and other future values are not supported.
|
||||
var supportedLevels map[configtelemetry.Level]struct{} = map[configtelemetry.Level]struct{}{
|
||||
configtelemetry.LevelBasic: {},
|
||||
configtelemetry.LevelNormal: {},
|
||||
configtelemetry.LevelDetailed: {},
|
||||
}
|
||||
|
||||
// Config defines configuration for debug exporter.
|
||||
type Config struct {
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNewFactory(t *testing.T) {
|
||||
var testType = component.MustNewType("test")
|
||||
testType := component.MustNewType("test")
|
||||
defaultCfg := struct{}{}
|
||||
f := NewFactory(
|
||||
testType,
|
||||
|
|
@ -32,7 +32,7 @@ func TestNewFactory(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNewFactoryWithOptions(t *testing.T) {
|
||||
var testType = component.MustNewType("test")
|
||||
testType := component.MustNewType("test")
|
||||
defaultCfg := struct{}{}
|
||||
f := NewFactory(
|
||||
testType,
|
||||
|
|
|
|||
|
|
@ -22,8 +22,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pipeline/pipelineprofiles"
|
||||
)
|
||||
|
||||
var profilesMarshaler = &pprofile.ProtoMarshaler{}
|
||||
var profilesUnmarshaler = &pprofile.ProtoUnmarshaler{}
|
||||
var (
|
||||
profilesMarshaler = &pprofile.ProtoMarshaler{}
|
||||
profilesUnmarshaler = &pprofile.ProtoUnmarshaler{}
|
||||
)
|
||||
|
||||
type profilesRequest struct {
|
||||
pd pprofile.Profiles
|
||||
|
|
|
|||
|
|
@ -144,8 +144,7 @@ func TestExtractProfiles(t *testing.T) {
|
|||
}
|
||||
|
||||
// dummyRequest implements Request. It is for checking that merging two request types would fail
|
||||
type dummyRequest struct {
|
||||
}
|
||||
type dummyRequest struct{}
|
||||
|
||||
func (req *dummyRequest) Export(_ context.Context) error {
|
||||
return nil
|
||||
|
|
@ -160,6 +159,7 @@ func (req *dummyRequest) Merge(_ context.Context, _ exporterhelper.Request) (exp
|
|||
}
|
||||
|
||||
func (req *dummyRequest) MergeSplit(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ exporterhelper.Request) (
|
||||
[]exporterhelper.Request, error) {
|
||||
[]exporterhelper.Request, error,
|
||||
) {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,9 +40,7 @@ const (
|
|||
fakeProfilesParentSpanName = "fake_profiles_parent_span_name"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeProfilesExporterConfig = struct{}{}
|
||||
)
|
||||
var fakeProfilesExporterConfig = struct{}{}
|
||||
|
||||
func TestProfilesRequest(t *testing.T) {
|
||||
lr := newProfilesRequest(testdata.GenerateProfiles(1), nil)
|
||||
|
|
@ -301,8 +299,10 @@ func generateProfilesTraffic(t *testing.T, tracer trace.Tracer, le exporterprofi
|
|||
}
|
||||
}
|
||||
|
||||
// nolint: unparam
|
||||
func checkWrapSpanForProfilesExporter(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer, le exporterprofiles.Profiles,
|
||||
wantError error, numSampleRecords int64) { // nolint: unparam
|
||||
wantError error, numSampleRecords int64,
|
||||
) {
|
||||
const numRequests = 5
|
||||
generateProfilesTraffic(t, tracer, le, numRequests, wantError)
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,8 @@ func TestBatchSender_Merge(t *testing.T) {
|
|||
runTest := func(testName string, enableQueueBatcher bool, tt struct {
|
||||
name string
|
||||
batcherOption Option
|
||||
}) {
|
||||
},
|
||||
) {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
resetFeatureGate := setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)
|
||||
be := queueBatchExporter(t, tt.batcherOption)
|
||||
|
|
@ -78,8 +79,10 @@ func TestBatchSender_Merge(t *testing.T) {
|
|||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// should be ignored because of the merge error.
|
||||
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink,
|
||||
mergeErr: errors.New("merge error")}))
|
||||
require.NoError(t, be.Send(context.Background(), &fakeRequest{
|
||||
items: 3, sink: sink,
|
||||
mergeErr: errors.New("merge error"),
|
||||
}))
|
||||
|
||||
assert.Equal(t, int64(1), sink.requestsCount.Load())
|
||||
assert.Eventually(t, func() bool {
|
||||
|
|
@ -130,7 +133,8 @@ func TestBatchSender_BatchExportError(t *testing.T) {
|
|||
batcherOption Option
|
||||
expectedRequests int64
|
||||
expectedItems int64
|
||||
}) {
|
||||
},
|
||||
) {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
resetFeatureGate := setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)
|
||||
be := queueBatchExporter(t, tt.batcherOption)
|
||||
|
|
@ -200,8 +204,10 @@ func TestBatchSender_MergeOrSplit(t *testing.T) {
|
|||
}, 50*time.Millisecond, 10*time.Millisecond)
|
||||
|
||||
// request that cannot be split should be dropped.
|
||||
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 11, sink: sink,
|
||||
mergeErr: errors.New("split error")}))
|
||||
require.NoError(t, be.Send(context.Background(), &fakeRequest{
|
||||
items: 11, sink: sink,
|
||||
mergeErr: errors.New("split error"),
|
||||
}))
|
||||
|
||||
// big request should be broken down into two requests, both are sent right away.
|
||||
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 13, sink: sink}))
|
||||
|
|
|
|||
|
|
@ -86,7 +86,8 @@ func NewQueueSender(
|
|||
numConsumers int,
|
||||
exportFailureMessage string,
|
||||
obsrep *ObsReport,
|
||||
batcherCfg exporterbatcher.Config) *QueueSender {
|
||||
batcherCfg exporterbatcher.Config,
|
||||
) *QueueSender {
|
||||
qs := &QueueSender{
|
||||
queue: q,
|
||||
numConsumers: numConsumers,
|
||||
|
|
|
|||
|
|
@ -192,7 +192,8 @@ func TestQueuedRetryHappyPath(t *testing.T) {
|
|||
runTest := func(testName string, enableQueueBatcher bool, tt struct {
|
||||
name string
|
||||
queueOptions []Option
|
||||
}) {
|
||||
},
|
||||
) {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
resetFeatureGate := setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)
|
||||
tel, err := componenttest.SetupTelemetry(defaultID)
|
||||
|
|
@ -361,7 +362,8 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) {
|
|||
runTest := func(testName string, enableQueueBatcher bool, tt struct {
|
||||
name string
|
||||
queueOptions []Option
|
||||
}) {
|
||||
},
|
||||
) {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
defer setFeatureGateForTest(t, usePullingBasedExporterQueueBatcher, enableQueueBatcher)
|
||||
set := exportertest.NewNopSettings()
|
||||
|
|
@ -432,7 +434,7 @@ func TestQueuedRetryPersistenceEnabled(t *testing.T) {
|
|||
WithRetry(rCfg), WithQueue(qCfg))
|
||||
require.NoError(t, err)
|
||||
|
||||
var extensions = map[component.ID]component.Component{
|
||||
extensions := map[component.ID]component.Component{
|
||||
storageID: queue.NewMockStorageExtension(nil),
|
||||
}
|
||||
host := &MockHost{Ext: extensions}
|
||||
|
|
@ -465,7 +467,7 @@ func TestQueuedRetryPersistenceEnabledStorageError(t *testing.T) {
|
|||
WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg))
|
||||
require.NoError(t, err)
|
||||
|
||||
var extensions = map[component.ID]component.Component{
|
||||
extensions := map[component.ID]component.Component{
|
||||
storageID: queue.NewMockStorageExtension(storageError),
|
||||
}
|
||||
host := &MockHost{Ext: extensions}
|
||||
|
|
@ -497,7 +499,7 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) {
|
|||
WithUnmarshaler(mockRequestUnmarshaler(mockReq)), WithRetry(rCfg), WithQueue(qCfg))
|
||||
require.NoError(t, err)
|
||||
|
||||
var extensions = map[component.ID]component.Component{
|
||||
extensions := map[component.ID]component.Component{
|
||||
storageID: queue.NewMockStorageExtension(nil),
|
||||
}
|
||||
host := &MockHost{Ext: extensions}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,8 @@ func (r *fakeRequest) ItemsCount() int {
|
|||
}
|
||||
|
||||
func (r *fakeRequest) Merge(_ context.Context,
|
||||
r2 internal.Request) (internal.Request, error) {
|
||||
r2 internal.Request,
|
||||
) (internal.Request, error) {
|
||||
if r == nil {
|
||||
return r2, nil
|
||||
}
|
||||
|
|
@ -74,7 +75,8 @@ func (r *fakeRequest) Merge(_ context.Context,
|
|||
}
|
||||
|
||||
func (r *fakeRequest) MergeSplit(ctx context.Context, cfg exporterbatcher.MaxSizeConfig,
|
||||
r2 internal.Request) ([]internal.Request, error) {
|
||||
r2 internal.Request,
|
||||
) ([]internal.Request, error) {
|
||||
if r.mergeErr != nil {
|
||||
return nil, r.mergeErr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pipeline"
|
||||
)
|
||||
|
||||
var logsMarshaler = &plog.ProtoMarshaler{}
|
||||
var logsUnmarshaler = &plog.ProtoUnmarshaler{}
|
||||
var (
|
||||
logsMarshaler = &plog.ProtoMarshaler{}
|
||||
logsUnmarshaler = &plog.ProtoUnmarshaler{}
|
||||
)
|
||||
|
||||
type logsRequest struct {
|
||||
ld plog.Logs
|
||||
|
|
|
|||
|
|
@ -413,8 +413,10 @@ func generateLogsTraffic(t *testing.T, tracer trace.Tracer, le exporter.Logs, nu
|
|||
}
|
||||
}
|
||||
|
||||
// nolint: unparam
|
||||
func checkWrapSpanForLogs(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer, le exporter.Logs,
|
||||
wantError error, numLogRecords int64) { // nolint: unparam
|
||||
wantError error, numLogRecords int64,
|
||||
) {
|
||||
const numRequests = 5
|
||||
generateLogsTraffic(t, tracer, le, numRequests, wantError)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pipeline"
|
||||
)
|
||||
|
||||
var metricsMarshaler = &pmetric.ProtoMarshaler{}
|
||||
var metricsUnmarshaler = &pmetric.ProtoUnmarshaler{}
|
||||
var (
|
||||
metricsMarshaler = &pmetric.ProtoMarshaler{}
|
||||
metricsUnmarshaler = &pmetric.ProtoUnmarshaler{}
|
||||
)
|
||||
|
||||
type metricsRequest struct {
|
||||
md pmetric.Metrics
|
||||
|
|
|
|||
|
|
@ -387,6 +387,7 @@ func newPushMetricsData(retError error) consumer.ConsumeMetricsFunc {
|
|||
return retError
|
||||
}
|
||||
}
|
||||
|
||||
func newPushMetricsDataModifiedDownstream(retError error) consumer.ConsumeMetricsFunc {
|
||||
return func(_ context.Context, metric pmetric.Metrics) error {
|
||||
metric.ResourceMetrics().MoveAndAppendTo(pmetric.NewResourceMetricsSlice())
|
||||
|
|
@ -419,8 +420,10 @@ func generateMetricsTraffic(t *testing.T, tracer trace.Tracer, me exporter.Metri
|
|||
}
|
||||
}
|
||||
|
||||
// nolint: unparam
|
||||
func checkWrapSpanForMetrics(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer,
|
||||
me exporter.Metrics, wantError error, numMetricPoints int64) { // nolint: unparam
|
||||
me exporter.Metrics, wantError error, numMetricPoints int64,
|
||||
) {
|
||||
const numRequests = 5
|
||||
generateMetricsTraffic(t, tracer, me, numRequests, wantError)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pipeline"
|
||||
)
|
||||
|
||||
var tracesMarshaler = &ptrace.ProtoMarshaler{}
|
||||
var tracesUnmarshaler = &ptrace.ProtoUnmarshaler{}
|
||||
var (
|
||||
tracesMarshaler = &ptrace.ProtoMarshaler{}
|
||||
tracesUnmarshaler = &ptrace.ProtoUnmarshaler{}
|
||||
)
|
||||
|
||||
type tracesRequest struct {
|
||||
td ptrace.Traces
|
||||
|
|
|
|||
|
|
@ -421,8 +421,10 @@ func generateTraceTraffic(t *testing.T, tracer trace.Tracer, te exporter.Traces,
|
|||
}
|
||||
}
|
||||
|
||||
// nolint: unparam
|
||||
func checkWrapSpanForTraces(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer,
|
||||
te exporter.Traces, wantError error, numSpans int64) { // nolint: unparam
|
||||
te exporter.Traces, wantError error, numSpans int64,
|
||||
) {
|
||||
const numRequests = 5
|
||||
generateTraceTraffic(t, tracer, te, numRequests, wantError)
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNewFactoryWithProfiles(t *testing.T) {
|
||||
var testType = component.MustNewType("test")
|
||||
testType := component.MustNewType("test")
|
||||
defaultCfg := struct{}{}
|
||||
factory := NewFactory(
|
||||
testType,
|
||||
|
|
|
|||
|
|
@ -106,7 +106,8 @@ func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParam
|
|||
}
|
||||
|
||||
func checkMetrics(t *testing.T, params CheckConsumeContractParams, mockReceiver component.Component,
|
||||
mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter)) {
|
||||
mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter),
|
||||
) {
|
||||
ctx := context.Background()
|
||||
var exp exporter.Metrics
|
||||
var err error
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pdata/ptrace"
|
||||
)
|
||||
|
||||
var errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error")
|
||||
var errPermanent = status.Error(codes.Internal, "Permanent error")
|
||||
var (
|
||||
errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error")
|
||||
errPermanent = status.Error(codes.Internal, "Permanent error")
|
||||
)
|
||||
|
||||
// // randomNonPermanentErrorConsumeDecision is a decision function that succeeds approximately
|
||||
// // half of the time and fails with a non-permanent error the rest of the time.
|
||||
|
|
|
|||
|
|
@ -195,6 +195,7 @@ func TestConsumeTracesSuccess(t *testing.T) {
|
|||
assert.Equal(t, 1, mc.reqCounter.success)
|
||||
assert.Equal(t, 1, mc.reqCounter.total)
|
||||
}
|
||||
|
||||
func TestConsumeMetricsNonPermanent(t *testing.T) {
|
||||
mc := newMockConsumer(returnNonPermanentError)
|
||||
validData := createMetric("metricId")
|
||||
|
|
|
|||
|
|
@ -35,7 +35,8 @@ type BaseBatcher struct {
|
|||
func NewBatcher(batchCfg exporterbatcher.Config,
|
||||
queue Queue[internal.Request],
|
||||
exportFunc func(ctx context.Context, req internal.Request) error,
|
||||
maxWorkers int) (Batcher, error) {
|
||||
maxWorkers int,
|
||||
) (Batcher, error) {
|
||||
if !batchCfg.Enabled {
|
||||
return &DisabledBatcher{
|
||||
BaseBatcher{
|
||||
|
|
|
|||
|
|
@ -67,7 +67,8 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
|
|||
qb.flushAsync(batch{
|
||||
req: reqList[i],
|
||||
ctx: ctx,
|
||||
idxList: []uint64{idx}})
|
||||
idxList: []uint64{idx},
|
||||
})
|
||||
// TODO: handle partial failure
|
||||
}
|
||||
qb.resetTimer()
|
||||
|
|
@ -75,7 +76,8 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
|
|||
qb.currentBatch = &batch{
|
||||
req: reqList[0],
|
||||
ctx: ctx,
|
||||
idxList: []uint64{idx}}
|
||||
idxList: []uint64{idx},
|
||||
}
|
||||
qb.currentBatchMu.Unlock()
|
||||
}
|
||||
} else {
|
||||
|
|
@ -84,7 +86,8 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
|
|||
qb.currentBatch = &batch{
|
||||
req: req,
|
||||
ctx: ctx,
|
||||
idxList: []uint64{idx}}
|
||||
idxList: []uint64{idx},
|
||||
}
|
||||
} else {
|
||||
mergedReq, mergeErr := qb.currentBatch.req.Merge(qb.currentBatch.ctx, req)
|
||||
if mergeErr != nil {
|
||||
|
|
@ -95,7 +98,8 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
|
|||
qb.currentBatch = &batch{
|
||||
req: mergedReq,
|
||||
ctx: qb.currentBatch.ctx,
|
||||
idxList: append(qb.currentBatch.idxList, idx)}
|
||||
idxList: append(qb.currentBatch.idxList, idx),
|
||||
}
|
||||
}
|
||||
|
||||
if qb.currentBatch.req.ItemsCount() >= qb.batchCfg.MinSizeItems {
|
||||
|
|
|
|||
|
|
@ -38,7 +38,8 @@ func (qb *DisabledBatcher) Start(_ context.Context, _ component.Host) error {
|
|||
qb.flushAsync(batch{
|
||||
req: req,
|
||||
ctx: context.Background(),
|
||||
idxList: []uint64{idx}})
|
||||
idxList: []uint64{idx},
|
||||
})
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -54,7 +54,8 @@ func (r *fakeRequest) ItemsCount() int {
|
|||
}
|
||||
|
||||
func (r *fakeRequest) Merge(_ context.Context,
|
||||
r2 internal.Request) (internal.Request, error) {
|
||||
r2 internal.Request,
|
||||
) (internal.Request, error) {
|
||||
fr2 := r2.(*fakeRequest)
|
||||
if fr2.mergeErr != nil {
|
||||
return nil, fr2.mergeErr
|
||||
|
|
@ -68,7 +69,8 @@ func (r *fakeRequest) Merge(_ context.Context,
|
|||
}
|
||||
|
||||
func (r *fakeRequest) MergeSplit(ctx context.Context, cfg exporterbatcher.MaxSizeConfig,
|
||||
r2 internal.Request) ([]internal.Request, error) {
|
||||
r2 internal.Request,
|
||||
) ([]internal.Request, error) {
|
||||
if r.mergeErr != nil {
|
||||
return nil, r.mergeErr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -226,7 +226,8 @@ func (m *fakeStorageClientWithErrors) Reset() {
|
|||
|
||||
// createAndStartTestPersistentQueue creates and starts a fake queue with the given capacity and number of consumers.
|
||||
func createAndStartTestPersistentQueue(t *testing.T, sizer Sizer[tracesRequest], capacity int64, numConsumers int,
|
||||
consumeFunc func(_ context.Context, item tracesRequest) error) Queue[tracesRequest] {
|
||||
consumeFunc func(_ context.Context, item tracesRequest) error,
|
||||
) Queue[tracesRequest] {
|
||||
pq := NewPersistentQueue[tracesRequest](PersistentQueueSettings[tracesRequest]{
|
||||
Sizer: sizer,
|
||||
Capacity: capacity,
|
||||
|
|
@ -272,7 +273,8 @@ func createTestPersistentQueueWithItemsCapacity(t testing.TB, ext storage.Extens
|
|||
}
|
||||
|
||||
func createTestPersistentQueueWithCapacityLimiter(t testing.TB, ext storage.Extension, sizer Sizer[tracesRequest],
|
||||
capacity int64) *persistentQueue[tracesRequest] {
|
||||
capacity int64,
|
||||
) *persistentQueue[tracesRequest] {
|
||||
pq := NewPersistentQueue[tracesRequest](PersistentQueueSettings[tracesRequest]{
|
||||
Sizer: sizer,
|
||||
Capacity: capacity,
|
||||
|
|
@ -340,7 +342,8 @@ func TestPersistentQueue_FullCapacity(t *testing.T) {
|
|||
|
||||
func TestPersistentQueue_Shutdown(t *testing.T) {
|
||||
pq := createAndStartTestPersistentQueue(t, &RequestSizer[tracesRequest]{}, 1001, 100, func(context.Context,
|
||||
tracesRequest) error {
|
||||
tracesRequest,
|
||||
) error {
|
||||
return nil
|
||||
})
|
||||
req := newTracesRequest(1, 10)
|
||||
|
|
@ -384,7 +387,8 @@ func TestPersistentQueue_ConsumersProducers(t *testing.T) {
|
|||
numMessagesConsumed := &atomic.Int32{}
|
||||
pq := createAndStartTestPersistentQueue(t, &RequestSizer[tracesRequest]{}, 1000, c.numConsumers,
|
||||
func(context.Context,
|
||||
tracesRequest) error {
|
||||
tracesRequest,
|
||||
) error {
|
||||
numMessagesConsumed.Add(int32(1))
|
||||
return nil
|
||||
})
|
||||
|
|
@ -468,7 +472,7 @@ func TestToStorageClient(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
storageID := component.MustNewIDWithName("file_storage", strconv.Itoa(tt.storageIndex))
|
||||
|
||||
var extensions = map[component.ID]component.Component{}
|
||||
extensions := map[component.ID]component.Component{}
|
||||
for i := 0; i < tt.numStorages; i++ {
|
||||
extensions[component.MustNewIDWithName("file_storage", strconv.Itoa(i))] = NewMockStorageExtension(tt.getClientError)
|
||||
}
|
||||
|
|
@ -499,7 +503,7 @@ func TestInvalidStorageExtensionType(t *testing.T) {
|
|||
settings := extensiontest.NewNopSettings()
|
||||
extension, err := factory.Create(context.Background(), settings, extConfig)
|
||||
require.NoError(t, err)
|
||||
var extensions = map[component.ID]component.Component{
|
||||
extensions := map[component.ID]component.Component{
|
||||
storageID: extension,
|
||||
}
|
||||
host := &mockHost{ext: extensions}
|
||||
|
|
|
|||
|
|
@ -12,10 +12,8 @@ import (
|
|||
"go.opentelemetry.io/collector/component"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full.
|
||||
ErrQueueIsFull = errors.New("sending queue is full")
|
||||
)
|
||||
// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full.
|
||||
var ErrQueueIsFull = errors.New("sending queue is full")
|
||||
|
||||
// Queue defines a producer-consumer exchange which can be backed by e.g. the memory-based ring buffer queue
|
||||
// (boundedMemoryQueue) or via a disk-based queue (persistentQueue)
|
||||
|
|
|
|||
|
|
@ -39,10 +39,12 @@ import (
|
|||
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
|
||||
)
|
||||
|
||||
const tracesTelemetryType = "traces"
|
||||
const metricsTelemetryType = "metrics"
|
||||
const logsTelemetryType = "logs"
|
||||
const profilesTelemetryType = "profiles"
|
||||
const (
|
||||
tracesTelemetryType = "traces"
|
||||
metricsTelemetryType = "metrics"
|
||||
logsTelemetryType = "logs"
|
||||
profilesTelemetryType = "profiles"
|
||||
)
|
||||
|
||||
type responseSerializer interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ type Extension interface {
|
|||
//
|
||||
// [overwrite | not-found | no-op] from "real" problems
|
||||
type Client interface {
|
||||
|
||||
// Get will retrieve data from storage that corresponds to the
|
||||
// specified key. It should return (nil, nil) if not found
|
||||
Get(ctx context.Context, key string) ([]byte, error)
|
||||
|
|
|
|||
|
|
@ -80,7 +80,8 @@ func NewFactory(
|
|||
cfgType component.Type,
|
||||
createDefaultConfig component.CreateDefaultConfigFunc,
|
||||
createServiceExtension CreateFunc,
|
||||
sl component.StabilityLevel) Factory {
|
||||
sl component.StabilityLevel,
|
||||
) Factory {
|
||||
return &factory{
|
||||
cfgType: cfgType,
|
||||
CreateDefaultConfigFunc: createDefaultConfig,
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ type nopExtension struct {
|
|||
}
|
||||
|
||||
func TestNewFactory(t *testing.T) {
|
||||
var testType = component.MustNewType("test")
|
||||
testType := component.MustNewType("test")
|
||||
defaultCfg := struct{}{}
|
||||
nopExtensionInstance := new(nopExtension)
|
||||
|
||||
|
|
|
|||
|
|
@ -31,8 +31,10 @@ func newZPagesHost() *zpagesHost {
|
|||
|
||||
func (*zpagesHost) RegisterZPages(*http.ServeMux, string) {}
|
||||
|
||||
var _ registerableTracerProvider = (*registerableProvider)(nil)
|
||||
var _ registerableTracerProvider = sdktrace.NewTracerProvider()
|
||||
var (
|
||||
_ registerableTracerProvider = (*registerableProvider)(nil)
|
||||
_ registerableTracerProvider = sdktrace.NewTracerProvider()
|
||||
)
|
||||
|
||||
type registerableProvider struct {
|
||||
trace.TracerProvider
|
||||
|
|
|
|||
|
|
@ -23,10 +23,8 @@ var (
|
|||
idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`)
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAlreadyRegistered is returned when adding a Gate that is already registered.
|
||||
ErrAlreadyRegistered = errors.New("gate is already registered")
|
||||
)
|
||||
// ErrAlreadyRegistered is returned when adding a Gate that is already registered.
|
||||
var ErrAlreadyRegistered = errors.New("gate is already registered")
|
||||
|
||||
// GlobalRegistry returns the global Registry.
|
||||
func GlobalRegistry() *Registry {
|
||||
|
|
|
|||
Loading…
Reference in New Issue