Implement `messaging.kafka.*` attributes spec (part 1) (#7824)

Part 1 of #7771

It's a lot more than I initially expected it to be; I'll introduce the
clientId attribute in part 2
This commit is contained in:
Mateusz Rzeszutek 2023-02-16 22:22:17 +01:00 committed by GitHub
parent 0e4e696ed4
commit 12ea869855
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 986 additions and 1202 deletions

View File

@ -21,6 +21,7 @@ import java.util.List;
import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription; import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher; import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -59,6 +60,7 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
@SuppressWarnings("unused") @SuppressWarnings("unused")
public static class IterableAdvice { public static class IterableAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class) @Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap( public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records, @Advice.This ConsumerRecords<?, ?> records,
@ -69,13 +71,16 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span // case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947) // (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records); Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
iterable = TracingIterable.wrap(iterable, receiveContext); Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
iterable = TracingIterable.wrap(iterable, receiveContext, consumer);
} }
} }
@SuppressWarnings("unused") @SuppressWarnings("unused")
public static class ListAdvice { public static class ListAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class) @Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap( public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records, @Advice.This ConsumerRecords<?, ?> records,
@ -86,13 +91,16 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span // case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947) // (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records); Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
list = TracingList.wrap(list, receiveContext); Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
list = TracingList.wrap(list, receiveContext, consumer);
} }
} }
@SuppressWarnings("unused") @SuppressWarnings("unused")
public static class IteratorAdvice { public static class IteratorAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class) @Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap( public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records, @Advice.This ConsumerRecords<?, ?> records,
@ -103,7 +111,9 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span // case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947) // (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records); Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
iterator = TracingIterator.wrap(iterator, receiveContext); Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
iterator = TracingIterator.wrap(iterator, receiveContext, consumer);
} }
} }
} }

View File

@ -18,6 +18,7 @@ import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.internal.InstrumenterUtil; import io.opentelemetry.instrumentation.api.internal.InstrumenterUtil;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.Timer; import io.opentelemetry.instrumentation.kafka.internal.Timer;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing; import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation; import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
@ -28,6 +29,7 @@ import java.util.Properties;
import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription; import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher; import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -83,6 +85,7 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
@Advice.OnMethodExit(suppress = Throwable.class, onThrowable = Throwable.class) @Advice.OnMethodExit(suppress = Throwable.class, onThrowable = Throwable.class)
public static void onExit( public static void onExit(
@Advice.Enter Timer timer, @Advice.Enter Timer timer,
@Advice.This Consumer<?, ?> consumer,
@Advice.Return ConsumerRecords<?, ?> records, @Advice.Return ConsumerRecords<?, ?> records,
@Advice.Thrown Throwable error) { @Advice.Thrown Throwable error) {
@ -91,8 +94,17 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
return; return;
} }
// we're attaching the consumer to the records to be able to retrieve things like consumer
// group or clientId later
VirtualField<ConsumerRecords<?, ?>, Consumer<?, ?>> consumerRecordsConsumer =
VirtualField.find(ConsumerRecords.class, Consumer.class);
consumerRecordsConsumer.set(records, consumer);
Context parentContext = currentContext(); Context parentContext = currentContext();
if (consumerReceiveInstrumenter().shouldStart(parentContext, records)) { ConsumerAndRecord<ConsumerRecords<?, ?>> request =
ConsumerAndRecord.create(consumer, records);
if (consumerReceiveInstrumenter().shouldStart(parentContext, request)) {
// disable process tracing and store the receive span for each individual record too // disable process tracing and store the receive span for each individual record too
boolean previousValue = KafkaClientsConsumerProcessTracing.setEnabled(false); boolean previousValue = KafkaClientsConsumerProcessTracing.setEnabled(false);
try { try {
@ -100,15 +112,14 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
InstrumenterUtil.startAndEnd( InstrumenterUtil.startAndEnd(
consumerReceiveInstrumenter(), consumerReceiveInstrumenter(),
parentContext, parentContext,
records, request,
null, null,
error, error,
timer.startTime(), timer.startTime(),
timer.now()); timer.now());
// we're storing the context of the receive span so that process spans can use it as // we're storing the context of the receive span so that process spans can use it as
// parent // parent context even though the span has ended
// context even though the span has ended
// this is the suggested behavior according to the spec batch receive scenario: // this is the suggested behavior according to the spec batch receive scenario:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#batch-receiving // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#batch-receiving
VirtualField<ConsumerRecords<?, ?>, Context> consumerRecordsContext = VirtualField<ConsumerRecords<?, ?>, Context> consumerRecordsContext =

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory; import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter; import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier; import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier;
@ -34,8 +35,10 @@ public final class KafkaSingletons {
.getBoolean("otel.instrumentation.kafka.metric-reporter.enabled", true); .getBoolean("otel.instrumentation.kafka.metric-reporter.enabled", true);
private static final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> PRODUCER_INSTRUMENTER; private static final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> PRODUCER_INSTRUMENTER;
private static final Instrumenter<ConsumerRecords<?, ?>, Void> CONSUMER_RECEIVE_INSTRUMENTER; private static final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
private static final Instrumenter<ConsumerRecord<?, ?>, Void> CONSUMER_PROCESS_INSTRUMENTER; CONSUMER_RECEIVE_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
CONSUMER_PROCESS_INSTRUMENTER;
static { static {
KafkaInstrumenterFactory instrumenterFactory = KafkaInstrumenterFactory instrumenterFactory =
@ -59,11 +62,13 @@ public final class KafkaSingletons {
return PRODUCER_INSTRUMENTER; return PRODUCER_INSTRUMENTER;
} }
public static Instrumenter<ConsumerRecords<?, ?>, Void> consumerReceiveInstrumenter() { public static Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
consumerReceiveInstrumenter() {
return CONSUMER_RECEIVE_INSTRUMENTER; return CONSUMER_RECEIVE_INSTRUMENTER;
} }
public static Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter() { public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
consumerProcessInstrumenter() {
return CONSUMER_PROCESS_INSTRUMENTER; return CONSUMER_PROCESS_INSTRUMENTER;
} }

View File

@ -9,23 +9,30 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing; import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import java.util.Iterator; import java.util.Iterator;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingIterable<K, V> implements Iterable<ConsumerRecord<K, V>> { public class TracingIterable<K, V> implements Iterable<ConsumerRecord<K, V>> {
private final Iterable<ConsumerRecord<K, V>> delegate; private final Iterable<ConsumerRecord<K, V>> delegate;
@Nullable private final Context receiveContext; @Nullable private final Context receiveContext;
private final Consumer<K, V> consumer;
private boolean firstIterator = true; private boolean firstIterator = true;
protected TracingIterable( protected TracingIterable(
Iterable<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) { Iterable<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
this.delegate = delegate; this.delegate = delegate;
this.receiveContext = receiveContext; this.receiveContext = receiveContext;
this.consumer = consumer;
} }
public static <K, V> Iterable<ConsumerRecord<K, V>> wrap( public static <K, V> Iterable<ConsumerRecord<K, V>> wrap(
Iterable<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) { Iterable<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) { if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingIterable<>(delegate, receiveContext); return new TracingIterable<>(delegate, receiveContext, consumer);
} }
return delegate; return delegate;
} }
@ -37,7 +44,7 @@ public class TracingIterable<K, V> implements Iterable<ConsumerRecord<K, V>> {
// However, this is not thread-safe, but usually the first (hopefully only) traversal of // However, this is not thread-safe, but usually the first (hopefully only) traversal of
// ConsumerRecords is performed in the same thread that called poll() // ConsumerRecords is performed in the same thread that called poll()
if (firstIterator) { if (firstIterator) {
it = TracingIterator.wrap(delegate.iterator(), receiveContext); it = TracingIterator.wrap(delegate.iterator(), receiveContext, consumer);
firstIterator = false; firstIterator = false;
} else { } else {
it = delegate.iterator(); it = delegate.iterator();

View File

@ -9,35 +9,44 @@ import static io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11.Kafk
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing; import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import java.util.Iterator; import java.util.Iterator;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingIterator<K, V> implements Iterator<ConsumerRecord<K, V>> { public class TracingIterator<K, V> implements Iterator<ConsumerRecord<K, V>> {
private final Iterator<ConsumerRecord<K, V>> delegateIterator; private final Iterator<ConsumerRecord<K, V>> delegateIterator;
private final Context parentContext; private final Context parentContext;
private final Consumer<K, V> consumer;
/* /*
* Note: this may potentially create problems if this iterator is used from different threads. But * Note: this may potentially create problems if this iterator is used from different threads. But
* at the moment we cannot do much about this. * at the moment we cannot do much about this.
*/ */
@Nullable private ConsumerRecord<?, ?> currentRequest; @Nullable private ConsumerAndRecord<ConsumerRecord<?, ?>> currentRequest;
@Nullable private Context currentContext; @Nullable private Context currentContext;
@Nullable private Scope currentScope; @Nullable private Scope currentScope;
private TracingIterator( private TracingIterator(
Iterator<ConsumerRecord<K, V>> delegateIterator, @Nullable Context receiveContext) { Iterator<ConsumerRecord<K, V>> delegateIterator,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
this.delegateIterator = delegateIterator; this.delegateIterator = delegateIterator;
// use the receive CONSUMER as parent if it's available // use the receive CONSUMER as parent if it's available
this.parentContext = receiveContext != null ? receiveContext : Context.current(); this.parentContext = receiveContext != null ? receiveContext : Context.current();
this.consumer = consumer;
} }
public static <K, V> Iterator<ConsumerRecord<K, V>> wrap( public static <K, V> Iterator<ConsumerRecord<K, V>> wrap(
Iterator<ConsumerRecord<K, V>> delegateIterator, @Nullable Context receiveContext) { Iterator<ConsumerRecord<K, V>> delegateIterator,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) { if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingIterator<>(delegateIterator, receiveContext); return new TracingIterator<>(delegateIterator, receiveContext, consumer);
} }
return delegateIterator; return delegateIterator;
} }
@ -60,7 +69,7 @@ public class TracingIterator<K, V> implements Iterator<ConsumerRecord<K, V>> {
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947) // (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
ConsumerRecord<K, V> next = delegateIterator.next(); ConsumerRecord<K, V> next = delegateIterator.next();
if (next != null && KafkaClientsConsumerProcessTracing.wrappingEnabled()) { if (next != null && KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
currentRequest = next; currentRequest = ConsumerAndRecord.create(consumer, next);
currentContext = consumerProcessInstrumenter().start(parentContext, currentRequest); currentContext = consumerProcessInstrumenter().start(parentContext, currentRequest);
currentScope = currentContext.makeCurrent(); currentScope = currentContext.makeCurrent();
} }

View File

@ -11,20 +11,26 @@ import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.ListIterator; import java.util.ListIterator;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingList<K, V> extends TracingIterable<K, V> implements List<ConsumerRecord<K, V>> { public class TracingList<K, V> extends TracingIterable<K, V> implements List<ConsumerRecord<K, V>> {
private final List<ConsumerRecord<K, V>> delegate; private final List<ConsumerRecord<K, V>> delegate;
private TracingList(List<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) { private TracingList(
super(delegate, receiveContext); List<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
super(delegate, receiveContext, consumer);
this.delegate = delegate; this.delegate = delegate;
} }
public static <K, V> List<ConsumerRecord<K, V>> wrap( public static <K, V> List<ConsumerRecord<K, V>> wrap(
List<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) { List<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) { if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingList<>(delegate, receiveContext); return new TracingList<>(delegate, receiveContext, consumer);
} }
return delegate; return delegate;
} }

View File

@ -6,11 +6,8 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11; package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind; import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest; import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest; import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
@ -18,10 +15,8 @@ import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtens
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -30,7 +25,6 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.extension.RegisterExtension;
@ -51,7 +45,7 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
"parent", "parent",
() -> { () -> {
ProducerRecord<Integer, String> producerRecord = ProducerRecord<Integer, String> producerRecord =
new ProducerRecord<>(SHARED_TOPIC, greeting); new ProducerRecord<>(SHARED_TOPIC, 10, greeting);
if (testHeaders) { if (testHeaders) {
producerRecord producerRecord
.headers() .headers()
@ -80,8 +74,8 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
testing.runWithSpan( testing.runWithSpan(
"processing", "processing",
() -> { () -> {
assertThat(record.key()).isEqualTo(10);
assertThat(record.value()).isEqualTo(greeting); assertThat(record.value()).isEqualTo(greeting);
assertThat(record.key()).isNull();
}); });
} }
AtomicReference<SpanData> producerSpan = new AtomicReference<>(); AtomicReference<SpanData> producerSpan = new AtomicReference<>();
@ -89,85 +83,32 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(); span ->
},
span -> {
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes("10", greeting, testHeaders)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span -> {
span.hasName("producer callback") span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL) .hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0)); .hasParent(trace.getSpan(0)));
});
producerSpan.set(trace.getSpan(1)); producerSpan.set(trace.getSpan(1));
}, },
trace -> trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " receive") span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(receiveAttributes(testHeaders)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext())) .hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), processAttributes("10", greeting, testHeaders)),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span -> span.hasName("processing").hasParent(trace.getSpan(1)))); span -> span.hasName("processing").hasParent(trace.getSpan(1))));
} }
@ -192,59 +133,26 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, null, false)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
producerSpan.set(trace.getSpan(0)); producerSpan.set(trace.getSpan(0));
}, },
trace -> { trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " receive") span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(receiveAttributes(false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext())) .hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes(null, null, false))));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
} }
@DisplayName("test records(TopicPartition) kafka consume") @DisplayName("test records(TopicPartition) kafka consume")
@ -276,55 +184,25 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, greeting, false)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
producerSpan.set(trace.getSpan(0)); producerSpan.set(trace.getSpan(0));
}, },
trace -> { trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " receive") span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(receiveAttributes(false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext())) .hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes(null, greeting, false))));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
} }
} }

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11; package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest; import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.Collections; import java.util.Collections;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.extension.RegisterExtension;
@ -37,24 +31,13 @@ class KafkaClientPropagationDisabledTest extends KafkaClientPropagationBaseTest
producer.send(new ProducerRecord<>(SHARED_TOPIC, message)); producer.send(new ProducerRecord<>(SHARED_TOPIC, message));
testing.waitAndAssertTraces( testing.waitAndAssertTraces(
trace -> { trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, message, false))));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
});
awaitUntilConsumerIsReady(); awaitUntilConsumerIsReady();
@ -68,49 +51,20 @@ class KafkaClientPropagationDisabledTest extends KafkaClientPropagationBaseTest
} }
testing.waitAndAssertTraces( testing.waitAndAssertTraces(
trace -> { trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, message, false))),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), trace ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
},
trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasLinks(Collections.emptyList()) .hasLinks(Collections.emptyList())
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes(null, message, false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span -> span.hasName("processing").hasParent(trace.getSpan(0))));
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
message.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName("processing").hasParent(trace.getSpan(0));
});
});
} }
} }

View File

@ -5,19 +5,13 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11; package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest; import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest; import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.List; import java.util.List;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
@ -26,7 +20,6 @@ import java.util.concurrent.TimeoutException;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.extension.RegisterExtension;
@ -41,7 +34,7 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
"parent", "parent",
() -> { () -> {
producer.send( producer.send(
new ProducerRecord<>(SHARED_TOPIC, greeting), new ProducerRecord<>(SHARED_TOPIC, 10, greeting),
(meta, ex) -> { (meta, ex) -> {
if (ex == null) { if (ex == null) {
testing.runWithSpan("producer callback", () -> {}); testing.runWithSpan("producer callback", () -> {});
@ -59,63 +52,33 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
testing.runWithSpan( testing.runWithSpan(
"processing", "processing",
() -> { () -> {
assertThat(record.key()).isEqualTo(10);
assertThat(record.value()).isEqualTo(greeting); assertThat(record.value()).isEqualTo(greeting);
assertThat(record.key()).isNull();
}); });
} }
testing.waitAndAssertTraces( testing.waitAndAssertTraces(
trace -> { trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(); span ->
},
span -> {
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes("10", greeting, false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1)) .hasParent(trace.getSpan(1))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes("10", greeting, false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC), span.hasName("processing")
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), .hasKind(SpanKind.INTERNAL)
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"), .hasParent(trace.getSpan(2)),
equalTo( span ->
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName("processing").hasKind(SpanKind.INTERNAL).hasParent(trace.getSpan(2));
},
span -> {
span.hasName("producer callback") span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL) .hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0)); .hasParent(trace.getSpan(0))));
});
});
} }
@Test @Test
@ -133,48 +96,19 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
assertThat(record.key()).isNull(); assertThat(record.key()).isNull();
} }
testing.waitAndAssertSortedTraces( testing.waitAndAssertTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), trace ->
trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, null, false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes(null, null, false))));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
} }
@Test @Test
@ -200,44 +134,18 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
assertThat(record.key()).isNull(); assertThat(record.key()).isNull();
} }
testing.waitAndAssertSortedTraces( testing.waitAndAssertTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), trace ->
trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactly(
span -> { span ->
span.hasName(SHARED_TOPIC + " send") span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(sendAttributes(null, greeting, false)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"), span ->
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process") span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfying( .hasAttributesSatisfyingExactly(processAttributes(null, greeting, false))));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
} }
} }

View File

@ -5,10 +5,20 @@
package io.opentelemetry.instrumentation.kafka.internal; package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -25,6 +35,7 @@ import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.serialization.StringSerializer;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.TestInstance;
@ -138,4 +149,98 @@ public abstract class KafkaClientBaseTest {
} }
consumer.seekToBeginning(Collections.emptyList()); consumer.seekToBeginning(Collections.emptyList());
} }
protected static List<AttributeAssertion> sendAttributes(
String messageKey, String messageValue, boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
if (messageValue == null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
protected static List<AttributeAssertion> receiveAttributes(boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
protected static List<AttributeAssertion> processAttributes(
String messageKey, String messageValue, boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
if (messageValue == null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true));
// TODO shouldn't set -1 in this case
assertions.add(equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L));
} else {
assertions.add(
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
messageValue.getBytes(StandardCharsets.UTF_8).length));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
} }

View File

@ -8,14 +8,12 @@ package io.opentelemetry.instrumentation.kafkaclients.v2_6;
import static java.util.logging.Level.WARNING; import static java.util.logging.Level.WARNING;
import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.context.propagation.TextMapGetter;
import io.opentelemetry.context.propagation.TextMapPropagator; import io.opentelemetry.context.propagation.TextMapPropagator;
import io.opentelemetry.context.propagation.TextMapSetter; import io.opentelemetry.context.propagation.TextMapSetter;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.KafkaConsumerRecordGetter; import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaHeadersSetter; import io.opentelemetry.instrumentation.kafka.internal.KafkaHeadersSetter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter; import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier; import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier;
@ -43,20 +41,18 @@ import org.apache.kafka.common.metrics.MetricsReporter;
public final class KafkaTelemetry { public final class KafkaTelemetry {
private static final Logger logger = Logger.getLogger(KafkaTelemetry.class.getName()); private static final Logger logger = Logger.getLogger(KafkaTelemetry.class.getName());
private static final TextMapGetter<ConsumerRecord<?, ?>> GETTER =
KafkaConsumerRecordGetter.INSTANCE;
private static final TextMapSetter<Headers> SETTER = KafkaHeadersSetter.INSTANCE; private static final TextMapSetter<Headers> SETTER = KafkaHeadersSetter.INSTANCE;
private final OpenTelemetry openTelemetry; private final OpenTelemetry openTelemetry;
private final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter; private final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter;
private final Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter; private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
consumerProcessInstrumenter;
private final boolean producerPropagationEnabled; private final boolean producerPropagationEnabled;
KafkaTelemetry( KafkaTelemetry(
OpenTelemetry openTelemetry, OpenTelemetry openTelemetry,
Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter, Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter,
Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter, Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> consumerProcessInstrumenter,
boolean producerPropagationEnabled) { boolean producerPropagationEnabled) {
this.openTelemetry = openTelemetry; this.openTelemetry = openTelemetry;
this.producerInstrumenter = producerInstrumenter; this.producerInstrumenter = producerInstrumenter;
@ -126,7 +122,7 @@ public final class KafkaTelemetry {
// ConsumerRecords<K, V> poll(long timeout) // ConsumerRecords<K, V> poll(long timeout)
// ConsumerRecords<K, V> poll(Duration duration) // ConsumerRecords<K, V> poll(Duration duration)
if ("poll".equals(method.getName()) && result instanceof ConsumerRecords) { if ("poll".equals(method.getName()) && result instanceof ConsumerRecords) {
buildAndFinishSpan((ConsumerRecords) result); buildAndFinishSpan(consumer, (ConsumerRecords) result);
} }
return result; return result;
}); });
@ -220,18 +216,16 @@ public final class KafkaTelemetry {
} }
} }
<K, V> void buildAndFinishSpan(ConsumerRecords<K, V> records) { <K, V> void buildAndFinishSpan(Consumer<K, V> consumer, ConsumerRecords<K, V> records) {
Context currentContext = Context.current(); Context parentContext = Context.current();
for (ConsumerRecord<K, V> record : records) { for (ConsumerRecord<K, V> record : records) {
Context linkedContext = propagator().extract(currentContext, record, GETTER); ConsumerAndRecord<ConsumerRecord<?, ?>> request = ConsumerAndRecord.create(consumer, record);
Context newContext = currentContext.with(Span.fromContext(linkedContext)); if (!consumerProcessInstrumenter.shouldStart(parentContext, request)) {
if (!consumerProcessInstrumenter.shouldStart(newContext, record)) {
continue; continue;
} }
Context current = consumerProcessInstrumenter.start(newContext, record); Context context = consumerProcessInstrumenter.start(parentContext, request);
consumerProcessInstrumenter.end(current, record, null, null); consumerProcessInstrumenter.end(context, request, null, null);
} }
} }

View File

@ -11,6 +11,7 @@ import com.google.errorprone.annotations.CanIgnoreReturnValue;
import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor; import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.instrumentation.api.instrumenter.messaging.MessageOperation; import io.opentelemetry.instrumentation.api.instrumenter.messaging.MessageOperation;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory; import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -25,8 +26,8 @@ public final class KafkaTelemetryBuilder {
private final OpenTelemetry openTelemetry; private final OpenTelemetry openTelemetry;
private final List<AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata>> private final List<AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata>>
producerAttributesExtractors = new ArrayList<>(); producerAttributesExtractors = new ArrayList<>();
private final List<AttributesExtractor<ConsumerRecord<?, ?>, Void>> consumerAttributesExtractors = private final List<AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>>
new ArrayList<>(); consumerAttributesExtractors = new ArrayList<>();
private List<String> capturedHeaders = emptyList(); private List<String> capturedHeaders = emptyList();
private boolean captureExperimentalSpanAttributes = false; private boolean captureExperimentalSpanAttributes = false;
private boolean propagationEnabled = true; private boolean propagationEnabled = true;
@ -44,7 +45,7 @@ public final class KafkaTelemetryBuilder {
@CanIgnoreReturnValue @CanIgnoreReturnValue
public KafkaTelemetryBuilder addConsumerAttributesExtractors( public KafkaTelemetryBuilder addConsumerAttributesExtractors(
AttributesExtractor<ConsumerRecord<?, ?>, Void> extractor) { AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> extractor) {
consumerAttributesExtractors.add(extractor); consumerAttributesExtractors.add(extractor);
return this; return this;
} }

View File

@ -23,7 +23,7 @@ public class TracingConsumerInterceptor<K, V> implements ConsumerInterceptor<K,
@Override @Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) { public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
telemetry.buildAndFinishSpan(records); telemetry.buildAndFinishSpan(null, records);
return records; return records;
} }

View File

@ -0,0 +1,73 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import com.google.auto.value.AutoValue;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
@AutoValue
public abstract class ConsumerAndRecord<R> {
public static <R> ConsumerAndRecord<R> create(@Nullable Consumer<?, ?> consumer, R record) {
return new AutoValue_ConsumerAndRecord<>(consumer, record);
}
@Nullable
public abstract Consumer<?, ?> consumer();
public abstract R record();
private static final MethodHandle GET_GROUP_METADATA;
private static final MethodHandle GET_GROUP_ID;
static {
MethodHandle getGroupMetadata;
MethodHandle getGroupId;
try {
Class<?> consumerGroupMetadata =
Class.forName("org.apache.kafka.clients.consumer.ConsumerGroupMetadata");
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getGroupMetadata =
lookup.findVirtual(
Consumer.class, "groupMetadata", MethodType.methodType(consumerGroupMetadata));
getGroupId =
lookup.findVirtual(consumerGroupMetadata, "groupId", MethodType.methodType(String.class));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException ignored) {
getGroupMetadata = null;
getGroupId = null;
}
GET_GROUP_METADATA = getGroupMetadata;
GET_GROUP_ID = getGroupId;
}
@Nullable
String consumerGroup() {
if (GET_GROUP_METADATA == null || GET_GROUP_ID == null) {
return null;
}
Consumer<?, ?> consumer = consumer();
if (consumer == null) {
return null;
}
try {
Object metadata = GET_GROUP_METADATA.invoke(consumer);
return (String) GET_GROUP_ID.invoke(metadata);
} catch (Throwable e) {
return null;
}
}
}

View File

@ -1,98 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.instrumentation.api.instrumenter.messaging.MessagingAttributesGetter;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
enum KafkaBatchProcessAttributesGetter
implements MessagingAttributesGetter<ConsumerRecords<?, ?>, Void> {
INSTANCE;
@Override
public String getSystem(ConsumerRecords<?, ?> records) {
return "kafka";
}
@Override
public String getDestinationKind(ConsumerRecords<?, ?> records) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
}
@Nullable
@Override
public String getDestination(ConsumerRecords<?, ?> records) {
Set<String> topics =
records.partitions().stream().map(TopicPartition::topic).collect(Collectors.toSet());
// only return topic when there's exactly one in the batch
return topics.size() == 1 ? topics.iterator().next() : null;
}
@Override
public boolean isTemporaryDestination(ConsumerRecords<?, ?> records) {
return false;
}
@Nullable
@Override
public String getProtocol(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getProtocolVersion(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getUrl(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getConversationId(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public Long getMessagePayloadSize(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public Long getMessagePayloadCompressedSize(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getMessageId(ConsumerRecords<?, ?> records, @Nullable Void unused) {
return null;
}
@Override
public List<String> getMessageHeader(ConsumerRecords<?, ?> records, String name) {
return StreamSupport.stream(records.spliterator(), false)
.flatMap(
consumerRecord ->
StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false))
.map(header -> new String(header.value(), StandardCharsets.UTF_8))
.collect(Collectors.toList());
}
}

View File

@ -14,9 +14,10 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
final class KafkaBatchProcessSpanLinksExtractor final class KafkaBatchProcessSpanLinksExtractor
implements SpanLinksExtractor<ConsumerRecords<?, ?>> { implements SpanLinksExtractor<ConsumerAndRecord<ConsumerRecords<?, ?>>> {
private final SpanLinksExtractor<ConsumerRecord<?, ?>> singleRecordLinkExtractor; private final SpanLinksExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>>
singleRecordLinkExtractor;
KafkaBatchProcessSpanLinksExtractor(TextMapPropagator propagator) { KafkaBatchProcessSpanLinksExtractor(TextMapPropagator propagator) {
this.singleRecordLinkExtractor = this.singleRecordLinkExtractor =
@ -25,12 +26,17 @@ final class KafkaBatchProcessSpanLinksExtractor
@Override @Override
public void extract( public void extract(
SpanLinksBuilder spanLinks, Context parentContext, ConsumerRecords<?, ?> records) { SpanLinksBuilder spanLinks,
Context parentContext,
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
for (ConsumerRecord<?, ?> record : records) { for (ConsumerRecord<?, ?> record : consumerAndRecords.record()) {
// explicitly passing root to avoid situation where context propagation is turned off and the // explicitly passing root to avoid situation where context propagation is turned off and the
// parent (CONSUMER receive) span is linked // parent (CONSUMER receive) span is linked
singleRecordLinkExtractor.extract(spanLinks, Context.root(), record); singleRecordLinkExtractor.extract(
spanLinks,
Context.root(),
ConsumerAndRecord.create(consumerAndRecords.consumer(), record));
} }
} }
} }

View File

@ -1,47 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public final class KafkaConsumerAdditionalAttributesExtractor
implements AttributesExtractor<ConsumerRecord<?, ?>, Void> {
// TODO: remove this constant when this attribute appears in SemanticAttributes
private static final AttributeKey<Long> MESSAGING_KAFKA_MESSAGE_OFFSET =
longKey("messaging.kafka.message.offset");
@Override
public void onStart(
AttributesBuilder attributes, Context parentContext, ConsumerRecord<?, ?> consumerRecord) {
attributes.put(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, (long) consumerRecord.partition());
attributes.put(MESSAGING_KAFKA_MESSAGE_OFFSET, consumerRecord.offset());
if (consumerRecord.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
}
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerRecord<?, ?> consumerRecord,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -0,0 +1,57 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.ByteBuffer;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
final class KafkaConsumerAttributesExtractor
implements AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
@Override
public void onStart(
AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
attributes.put(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, (long) record.partition());
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET, record.offset());
Object key = record.key();
if (key != null && canSerialize(key.getClass())) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, key.toString());
}
if (record.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
}
String consumerGroup = consumerAndRecord.consumerGroup();
if (consumerGroup != null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, consumerGroup);
}
}
private static boolean canSerialize(Class<?> keyClass) {
// we make a simple assumption here that we can serialize keys by simply calling toString()
// and that does not work for byte[] or ByteBuffer
return !(keyClass.isArray() || keyClass == ByteBuffer.class);
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -14,78 +14,78 @@ import java.util.stream.StreamSupport;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
/** enum KafkaConsumerAttributesGetter
* This class is internal and is hence not for public use. Its APIs are unstable and can change at implements MessagingAttributesGetter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
* any time.
*/
public enum KafkaConsumerAttributesGetter
implements MessagingAttributesGetter<ConsumerRecord<?, ?>, Void> {
INSTANCE; INSTANCE;
@Override @Override
public String getSystem(ConsumerRecord<?, ?> consumerRecord) { public String getSystem(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return "kafka"; return "kafka";
} }
@Override @Override
public String getDestinationKind(ConsumerRecord<?, ?> consumerRecord) { public String getDestinationKind(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC; return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
} }
@Override @Override
public String getDestination(ConsumerRecord<?, ?> consumerRecord) { public String getDestination(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return consumerRecord.topic(); return consumerAndRecord.record().topic();
} }
@Override @Override
public boolean isTemporaryDestination(ConsumerRecord<?, ?> consumerRecord) { public boolean isTemporaryDestination(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return false; return false;
} }
@Override @Override
@Nullable @Nullable
public String getProtocol(ConsumerRecord<?, ?> consumerRecord) { public String getProtocol(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getProtocolVersion(ConsumerRecord<?, ?> consumerRecord) { public String getProtocolVersion(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getUrl(ConsumerRecord<?, ?> consumerRecord) { public String getUrl(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getConversationId(ConsumerRecord<?, ?> consumerRecord) { public String getConversationId(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null; return null;
} }
@Override @Override
public Long getMessagePayloadSize(ConsumerRecord<?, ?> consumerRecord) { public Long getMessagePayloadSize(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return (long) consumerRecord.serializedValueSize(); return (long) consumerAndRecord.record().serializedValueSize();
} }
@Override @Override
@Nullable @Nullable
public Long getMessagePayloadCompressedSize(ConsumerRecord<?, ?> consumerRecord) { public Long getMessagePayloadCompressedSize(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getMessageId(ConsumerRecord<?, ?> consumerRecord, @Nullable Void unused) { public String getMessageId(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, @Nullable Void unused) {
return null; return null;
} }
@Override @Override
public List<String> getMessageHeader(ConsumerRecord<?, ?> consumerRecord, String name) { public List<String> getMessageHeader(
return StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false) ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, String name) {
return StreamSupport.stream(
consumerAndRecord.record().headers().headers(name).spliterator(), false)
.map(header -> new String(header.value(), StandardCharsets.UTF_8)) .map(header -> new String(header.value(), StandardCharsets.UTF_8))
.collect(Collectors.toList()); .collect(Collectors.toList());
} }

View File

@ -15,23 +15,21 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.record.TimestampType;
/** final class KafkaConsumerExperimentalAttributesExtractor
* This class is internal and is hence not for public use. Its APIs are unstable and can change at implements AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
* any time.
*/
public final class KafkaConsumerExperimentalAttributesExtractor
implements AttributesExtractor<ConsumerRecord<?, ?>, Void> {
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS = private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms"); longKey("kafka.record.queue_time_ms");
@Override @Override
public void onStart( public void onStart(
AttributesBuilder attributes, Context parentContext, ConsumerRecord<?, ?> consumerRecord) { AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
// don't record a duration if the message was sent from an old Kafka client // don't record a duration if the message was sent from an old Kafka client
if (consumerRecord.timestampType() != TimestampType.NO_TIMESTAMP_TYPE) { if (consumerAndRecord.record().timestampType() != TimestampType.NO_TIMESTAMP_TYPE) {
long produceTime = consumerRecord.timestamp(); long produceTime = consumerAndRecord.record().timestamp();
// this attribute shows how much time elapsed between the producer and the consumer of this // this attribute shows how much time elapsed between the producer and the consumer of this
// message, which can be helpful for identifying queue bottlenecks // message, which can be helpful for identifying queue bottlenecks
attributes.put( attributes.put(
@ -43,7 +41,7 @@ public final class KafkaConsumerExperimentalAttributesExtractor
public void onEnd( public void onEnd(
AttributesBuilder attributes, AttributesBuilder attributes,
Context context, Context context,
ConsumerRecord<?, ?> consumerRecord, ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord,
@Nullable Void unused, @Nullable Void unused,
@Nullable Throwable error) {} @Nullable Throwable error) {}
} }

View File

@ -13,24 +13,20 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Header;
/** enum KafkaConsumerRecordGetter implements TextMapGetter<ConsumerAndRecord<ConsumerRecord<?, ?>>> {
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public enum KafkaConsumerRecordGetter implements TextMapGetter<ConsumerRecord<?, ?>> {
INSTANCE; INSTANCE;
@Override @Override
public Iterable<String> keys(ConsumerRecord<?, ?> carrier) { public Iterable<String> keys(ConsumerAndRecord<ConsumerRecord<?, ?>> carrier) {
return StreamSupport.stream(carrier.headers().spliterator(), false) return StreamSupport.stream(carrier.record().headers().spliterator(), false)
.map(Header::key) .map(Header::key)
.collect(Collectors.toList()); .collect(Collectors.toList());
} }
@Nullable @Nullable
@Override @Override
public String get(@Nullable ConsumerRecord<?, ?> carrier, String key) { public String get(@Nullable ConsumerAndRecord<ConsumerRecord<?, ?>> carrier, String key) {
Header header = carrier.headers().lastHeader(key); Header header = carrier.record().headers().lastHeader(key);
if (header == null) { if (header == null) {
return null; return null;
} }

View File

@ -1,41 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.context.propagation.TextMapGetter;
import java.nio.charset.StandardCharsets;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public final class KafkaHeadersGetter implements TextMapGetter<Headers> {
@Override
public Iterable<String> keys(Headers carrier) {
return StreamSupport.stream(carrier.spliterator(), false)
.map(Header::key)
.collect(Collectors.toList());
}
@Nullable
@Override
public String get(@Nullable Headers carrier, String key) {
Header header = carrier.lastHeader(key);
if (header == null) {
return null;
}
byte[] value = header.value();
if (value == null) {
return null;
}
return new String(value, StandardCharsets.UTF_8);
}
}

View File

@ -98,44 +98,48 @@ public final class KafkaInstrumenterFactory {
.addAttributesExtractor( .addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders)) buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractors(extractors) .addAttributesExtractors(extractors)
.addAttributesExtractor(new KafkaProducerAdditionalAttributesExtractor()) .addAttributesExtractor(new KafkaProducerAttributesExtractor())
.setErrorCauseExtractor(errorCauseExtractor) .setErrorCauseExtractor(errorCauseExtractor)
.buildInstrumenter(SpanKindExtractor.alwaysProducer()); .buildInstrumenter(SpanKindExtractor.alwaysProducer());
} }
public Instrumenter<ConsumerRecords<?, ?>, Void> createConsumerReceiveInstrumenter() { public Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
createConsumerReceiveInstrumenter() {
KafkaReceiveAttributesGetter getter = KafkaReceiveAttributesGetter.INSTANCE; KafkaReceiveAttributesGetter getter = KafkaReceiveAttributesGetter.INSTANCE;
MessageOperation operation = MessageOperation.RECEIVE; MessageOperation operation = MessageOperation.RECEIVE;
return Instrumenter.<ConsumerRecords<?, ?>, Void>builder( return Instrumenter.<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>builder(
openTelemetry, openTelemetry,
instrumentationName, instrumentationName,
MessagingSpanNameExtractor.create(getter, operation)) MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor( .addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders)) buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(KafkaReceiveAttributesExtractor.INSTANCE)
.setErrorCauseExtractor(errorCauseExtractor) .setErrorCauseExtractor(errorCauseExtractor)
.setEnabled(messagingReceiveInstrumentationEnabled) .setEnabled(messagingReceiveInstrumentationEnabled)
.buildInstrumenter(SpanKindExtractor.alwaysConsumer()); .buildInstrumenter(SpanKindExtractor.alwaysConsumer());
} }
public Instrumenter<ConsumerRecord<?, ?>, Void> createConsumerProcessInstrumenter() { public Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
createConsumerProcessInstrumenter() {
return createConsumerOperationInstrumenter(MessageOperation.PROCESS, Collections.emptyList()); return createConsumerOperationInstrumenter(MessageOperation.PROCESS, Collections.emptyList());
} }
public Instrumenter<ConsumerRecord<?, ?>, Void> createConsumerOperationInstrumenter( public Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
createConsumerOperationInstrumenter(
MessageOperation operation, MessageOperation operation,
Iterable<AttributesExtractor<ConsumerRecord<?, ?>, Void>> extractors) { Iterable<AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>> extractors) {
KafkaConsumerAttributesGetter getter = KafkaConsumerAttributesGetter.INSTANCE; KafkaConsumerAttributesGetter getter = KafkaConsumerAttributesGetter.INSTANCE;
InstrumenterBuilder<ConsumerRecord<?, ?>, Void> builder = InstrumenterBuilder<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> builder =
Instrumenter.<ConsumerRecord<?, ?>, Void>builder( Instrumenter.<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>builder(
openTelemetry, openTelemetry,
instrumentationName, instrumentationName,
MessagingSpanNameExtractor.create(getter, operation)) MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor( .addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders)) buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(new KafkaConsumerAdditionalAttributesExtractor()) .addAttributesExtractor(new KafkaConsumerAttributesExtractor())
.addAttributesExtractors(extractors) .addAttributesExtractors(extractors)
.setErrorCauseExtractor(errorCauseExtractor); .setErrorCauseExtractor(errorCauseExtractor);
if (captureExperimentalSpanAttributes) { if (captureExperimentalSpanAttributes) {
@ -144,7 +148,7 @@ public final class KafkaInstrumenterFactory {
if (messagingReceiveInstrumentationEnabled) { if (messagingReceiveInstrumentationEnabled) {
builder.addSpanLinksExtractor( builder.addSpanLinksExtractor(
new PropagatorBasedSpanLinksExtractor<ConsumerRecord<?, ?>>( new PropagatorBasedSpanLinksExtractor<>(
openTelemetry.getPropagators().getTextMapPropagator(), openTelemetry.getPropagators().getTextMapPropagator(),
KafkaConsumerRecordGetter.INSTANCE)); KafkaConsumerRecordGetter.INSTANCE));
return builder.buildInstrumenter(SpanKindExtractor.alwaysConsumer()); return builder.buildInstrumenter(SpanKindExtractor.alwaysConsumer());
@ -153,16 +157,18 @@ public final class KafkaInstrumenterFactory {
} }
} }
public Instrumenter<ConsumerRecords<?, ?>, Void> createBatchProcessInstrumenter() { public Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
KafkaBatchProcessAttributesGetter getter = KafkaBatchProcessAttributesGetter.INSTANCE; createBatchProcessInstrumenter() {
KafkaReceiveAttributesGetter getter = KafkaReceiveAttributesGetter.INSTANCE;
MessageOperation operation = MessageOperation.PROCESS; MessageOperation operation = MessageOperation.PROCESS;
return Instrumenter.<ConsumerRecords<?, ?>, Void>builder( return Instrumenter.<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>builder(
openTelemetry, openTelemetry,
instrumentationName, instrumentationName,
MessagingSpanNameExtractor.create(getter, operation)) MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor( .addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders)) buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(KafkaReceiveAttributesExtractor.INSTANCE)
.addSpanLinksExtractor( .addSpanLinksExtractor(
new KafkaBatchProcessSpanLinksExtractor( new KafkaBatchProcessSpanLinksExtractor(
openTelemetry.getPropagators().getTextMapPropagator())) openTelemetry.getPropagators().getTextMapPropagator()))

View File

@ -5,36 +5,37 @@
package io.opentelemetry.instrumentation.kafka.internal; package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor; import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes; import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.ByteBuffer;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.clients.producer.RecordMetadata;
/** final class KafkaProducerAttributesExtractor
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
final class KafkaProducerAdditionalAttributesExtractor
implements AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata> { implements AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata> {
// TODO: remove this constant when this attribute appears in SemanticAttributes
private static final AttributeKey<Long> MESSAGING_KAFKA_MESSAGE_OFFSET =
longKey("messaging.kafka.message.offset");
@Override @Override
public void onStart( public void onStart(
AttributesBuilder attributes, Context parentContext, ProducerRecord<?, ?> producerRecord) { AttributesBuilder attributes, Context parentContext, ProducerRecord<?, ?> record) {
if (producerRecord.value() == null) {
Object key = record.key();
if (key != null && canSerialize(key.getClass())) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, key.toString());
}
if (record.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true); attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
} }
} }
private static boolean canSerialize(Class<?> keyClass) {
// we make a simple assumption here that we can serialize keys by simply calling toString()
// and that does not work for byte[] or ByteBuffer
return !(keyClass.isArray() || keyClass == ByteBuffer.class);
}
@Override @Override
public void onEnd( public void onEnd(
AttributesBuilder attributes, AttributesBuilder attributes,
@ -46,7 +47,7 @@ final class KafkaProducerAdditionalAttributesExtractor
if (recordMetadata != null) { if (recordMetadata != null) {
attributes.put( attributes.put(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, recordMetadata.partition()); SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, recordMetadata.partition());
attributes.put(MESSAGING_KAFKA_MESSAGE_OFFSET, recordMetadata.offset()); attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET, recordMetadata.offset());
} }
} }
} }

View File

@ -0,0 +1,38 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords;
enum KafkaReceiveAttributesExtractor
implements AttributesExtractor<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> {
INSTANCE;
@Override
public void onStart(
AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
String consumerGroup = consumerAndRecords.consumerGroup();
if (consumerGroup != null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, consumerGroup);
}
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerAndRecord<ConsumerRecords<?, ?>> request,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -16,29 +16,25 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartition;
/** enum KafkaReceiveAttributesGetter
* This class is internal and is hence not for public use. Its APIs are unstable and can change at implements MessagingAttributesGetter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> {
* any time.
*/
public enum KafkaReceiveAttributesGetter
implements MessagingAttributesGetter<ConsumerRecords<?, ?>, Void> {
INSTANCE; INSTANCE;
@Override @Override
public String getSystem(ConsumerRecords<?, ?> consumerRecords) { public String getSystem(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return "kafka"; return "kafka";
} }
@Override @Override
public String getDestinationKind(ConsumerRecords<?, ?> consumerRecords) { public String getDestinationKind(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC; return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
} }
@Override @Override
@Nullable @Nullable
public String getDestination(ConsumerRecords<?, ?> consumerRecords) { public String getDestination(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
Set<String> topics = Set<String> topics =
consumerRecords.partitions().stream() consumerAndRecords.record().partitions().stream()
.map(TopicPartition::topic) .map(TopicPartition::topic)
.collect(Collectors.toSet()); .collect(Collectors.toSet());
// only return topic when there's exactly one in the batch // only return topic when there's exactly one in the batch
@ -46,55 +42,59 @@ public enum KafkaReceiveAttributesGetter
} }
@Override @Override
public boolean isTemporaryDestination(ConsumerRecords<?, ?> consumerRecords) { public boolean isTemporaryDestination(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return false; return false;
} }
@Override @Override
@Nullable @Nullable
public String getProtocol(ConsumerRecords<?, ?> consumerRecords) { public String getProtocol(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getProtocolVersion(ConsumerRecords<?, ?> consumerRecords) { public String getProtocolVersion(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getUrl(ConsumerRecords<?, ?> consumerRecords) { public String getUrl(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getConversationId(ConsumerRecords<?, ?> consumerRecords) { public String getConversationId(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public Long getMessagePayloadSize(ConsumerRecords<?, ?> consumerRecords) { public Long getMessagePayloadSize(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public Long getMessagePayloadCompressedSize(ConsumerRecords<?, ?> consumerRecords) { public Long getMessagePayloadCompressedSize(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null; return null;
} }
@Override @Override
@Nullable @Nullable
public String getMessageId(ConsumerRecords<?, ?> consumerRecords, @Nullable Void unused) { public String getMessageId(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords, @Nullable Void unused) {
return null; return null;
} }
@Override @Override
public List<String> getMessageHeader(ConsumerRecords<?, ?> records, String name) { public List<String> getMessageHeader(
return StreamSupport.stream(records.spliterator(), false) ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords, String name) {
return StreamSupport.stream(consumerAndRecords.record().spliterator(), false)
.flatMap( .flatMap(
consumerRecord -> consumerRecord ->
StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false)) StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false))

View File

@ -27,6 +27,8 @@ tasks {
withType<Test>().configureEach { withType<Test>().configureEach {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service) usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", findProperty("testLatestDeps") as Boolean)
// TODO run tests both with and without experimental span attributes // TODO run tests both with and without experimental span attributes
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true") jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true")
} }

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.kafkastreams;
import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory; import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig; import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig;
import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig; import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig;
@ -16,7 +17,7 @@ public final class KafkaStreamsSingletons {
private static final String INSTRUMENTATION_NAME = "io.opentelemetry.kafka-streams-0.11"; private static final String INSTRUMENTATION_NAME = "io.opentelemetry.kafka-streams-0.11";
private static final Instrumenter<ConsumerRecord<?, ?>, Void> INSTRUMENTER = private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> INSTRUMENTER =
new KafkaInstrumenterFactory(GlobalOpenTelemetry.get(), INSTRUMENTATION_NAME) new KafkaInstrumenterFactory(GlobalOpenTelemetry.get(), INSTRUMENTATION_NAME)
.setCapturedHeaders(ExperimentalConfig.get().getMessagingHeaders()) .setCapturedHeaders(ExperimentalConfig.get().getMessagingHeaders())
.setCaptureExperimentalSpanAttributes( .setCaptureExperimentalSpanAttributes(
@ -26,7 +27,7 @@ public final class KafkaStreamsSingletons {
ExperimentalConfig.get().messagingReceiveInstrumentationEnabled()) ExperimentalConfig.get().messagingReceiveInstrumentationEnabled())
.createConsumerProcessInstrumenter(); .createConsumerProcessInstrumenter();
public static Instrumenter<ConsumerRecord<?, ?>, Void> instrumenter() { public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> instrumenter() {
return INSTRUMENTER; return INSTRUMENTER;
} }

View File

@ -15,6 +15,7 @@ import static net.bytebuddy.matcher.ElementMatchers.returns;
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation; import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer; import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice;
@ -61,11 +62,13 @@ public class PartitionGroupInstrumentation implements TypeInstrumentation {
// use the receive CONSUMER span as parent if it's available // use the receive CONSUMER span as parent if it's available
Context parentContext = receiveContext != null ? receiveContext : currentContext(); Context parentContext = receiveContext != null ? receiveContext : currentContext();
ConsumerAndRecord<ConsumerRecord<?, ?>> request =
ConsumerAndRecord.create(null, record.value);
if (!instrumenter().shouldStart(parentContext, record.value)) { if (!instrumenter().shouldStart(parentContext, request)) {
return; return;
} }
Context context = instrumenter().start(parentContext, record.value); Context context = instrumenter().start(parentContext, request);
holder.set(record.value, context, context.makeCurrent()); holder.set(record.value, context, context.makeCurrent());
} }
} }

View File

@ -11,6 +11,7 @@ import static net.bytebuddy.matcher.ElementMatchers.isPublic;
import static net.bytebuddy.matcher.ElementMatchers.named; import static net.bytebuddy.matcher.ElementMatchers.named;
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation; import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer; import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice;
@ -51,7 +52,8 @@ public class StreamTaskInstrumentation implements TypeInstrumentation {
Context context = holder.getContext(); Context context = holder.getContext();
if (context != null) { if (context != null) {
holder.closeScope(); holder.closeScope();
instrumenter().end(context, holder.getRecord(), null, throwable); instrumenter()
.end(context, ConsumerAndRecord.create(null, holder.getRecord()), null, throwable);
} }
} }
} }

View File

@ -40,7 +40,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
} catch (ClassNotFoundException | NoClassDefFoundError e) { } catch (ClassNotFoundException | NoClassDefFoundError e) {
builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance() builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance()
} }
KStream<String, String> textLines = builder.stream(STREAM_PENDING) KStream<Integer, String> textLines = builder.stream(STREAM_PENDING)
def values = textLines def values = textLines
.mapValues(new ValueMapper<String, String>() { .mapValues(new ValueMapper<String, String>() {
@Override @Override
@ -53,11 +53,11 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
KafkaStreams streams KafkaStreams streams
try { try {
// Different api for test and latestDepTest. // Different api for test and latestDepTest.
values.to(Serdes.String(), Serdes.String(), STREAM_PROCESSED) values.to(Serdes.Integer(), Serdes.String(), STREAM_PROCESSED)
streams = new KafkaStreams(builder, config) streams = new KafkaStreams(builder, config)
} catch (MissingMethodException e) { } catch (MissingMethodException e) {
def producer = Class.forName("org.apache.kafka.streams.kstream.Produced") def producer = Class.forName("org.apache.kafka.streams.kstream.Produced")
.with(Serdes.String(), Serdes.String()) .with(Serdes.Integer(), Serdes.String())
values.to(STREAM_PROCESSED, producer) values.to(STREAM_PROCESSED, producer)
streams = new KafkaStreams(builder.build(), config) streams = new KafkaStreams(builder.build(), config)
} }
@ -65,7 +65,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
when: when:
String greeting = "TESTING TESTING 123!" String greeting = "TESTING TESTING 123!"
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, greeting)) KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, 10, greeting))
then: then:
awaitUntilConsumerIsReady() awaitUntilConsumerIsReady()
@ -74,8 +74,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
for (record in records) { for (record in records) {
Span.current().setAttribute("testing", 123) Span.current().setAttribute("testing", 123)
assert record.key() == 10
assert record.value() == greeting.toLowerCase() assert record.value() == greeting.toLowerCase()
assert record.key() == null
if (receivedHeaders == null) { if (receivedHeaders == null) {
receivedHeaders = record.headers() receivedHeaders = record.headers()
@ -101,7 +101,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
} }
} }
@ -118,6 +119,9 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_OPERATION" "receive" "$SemanticAttributes.MESSAGING_OPERATION" "receive"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test-application"
}
} }
} }
// kafka-stream CONSUMER // kafka-stream CONSUMER
@ -133,7 +137,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process" "$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long "$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
"kafka.record.queue_time_ms" { it >= 0 } "kafka.record.queue_time_ms" { it >= 0 }
"asdf" "testing" "asdf" "testing"
} }
@ -148,7 +153,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
} }
} }
@ -165,6 +170,9 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_OPERATION" "receive" "$SemanticAttributes.MESSAGING_OPERATION" "receive"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
} }
} }
// kafka-clients CONSUMER process // kafka-clients CONSUMER process
@ -180,7 +188,11 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process" "$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long "$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
"kafka.record.queue_time_ms" { it >= 0 } "kafka.record.queue_time_ms" { it >= 0 }
"testing" 123 "testing" 123
} }

View File

@ -40,7 +40,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
} catch (ClassNotFoundException | NoClassDefFoundError e) { } catch (ClassNotFoundException | NoClassDefFoundError e) {
builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance() builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance()
} }
KStream<String, String> textLines = builder.stream(STREAM_PENDING) KStream<Integer, String> textLines = builder.stream(STREAM_PENDING)
def values = textLines def values = textLines
.mapValues(new ValueMapper<String, String>() { .mapValues(new ValueMapper<String, String>() {
@Override @Override
@ -53,11 +53,11 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
KafkaStreams streams KafkaStreams streams
try { try {
// Different api for test and latestDepTest. // Different api for test and latestDepTest.
values.to(Serdes.String(), Serdes.String(), STREAM_PROCESSED) values.to(Serdes.Integer(), Serdes.String(), STREAM_PROCESSED)
streams = new KafkaStreams(builder, config) streams = new KafkaStreams(builder, config)
} catch (MissingMethodException e) { } catch (MissingMethodException e) {
def producer = Class.forName("org.apache.kafka.streams.kstream.Produced") def producer = Class.forName("org.apache.kafka.streams.kstream.Produced")
.with(Serdes.String(), Serdes.String()) .with(Serdes.Integer(), Serdes.String())
values.to(STREAM_PROCESSED, producer) values.to(STREAM_PROCESSED, producer)
streams = new KafkaStreams(builder.build(), config) streams = new KafkaStreams(builder.build(), config)
} }
@ -65,7 +65,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
when: when:
String greeting = "TESTING TESTING 123!" String greeting = "TESTING TESTING 123!"
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, greeting)) KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, 10, greeting))
then: then:
// check that the message was received // check that the message was received
@ -74,8 +74,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
for (record in records) { for (record in records) {
Span.current().setAttribute("testing", 123) Span.current().setAttribute("testing", 123)
assert record.key() == 10
assert record.value() == greeting.toLowerCase() assert record.value() == greeting.toLowerCase()
assert record.key() == null
if (receivedHeaders == null) { if (receivedHeaders == null) {
receivedHeaders = record.headers() receivedHeaders = record.headers()
@ -96,7 +96,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
} }
} }
// kafka-stream CONSUMER // kafka-stream CONSUMER
@ -111,7 +112,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process" "$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long "$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
"kafka.record.queue_time_ms" { it >= 0 } "kafka.record.queue_time_ms" { it >= 0 }
"asdf" "testing" "asdf" "testing"
} }
@ -129,7 +131,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED "$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic" "$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
} }
} }
// kafka-clients CONSUMER process // kafka-clients CONSUMER process
@ -144,7 +146,11 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process" "$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long "$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 } "$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0 "$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
"kafka.record.queue_time_ms" { it >= 0 } "kafka.record.queue_time_ms" { it >= 0 }
"testing" 123 "testing" 123
} }

View File

@ -5,7 +5,6 @@
package io.opentelemetry.instrumentation.spring.autoconfigure.kafka; package io.opentelemetry.instrumentation.spring.autoconfigure.kafka;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
@ -47,7 +46,7 @@ class KafkaIntegrationTest {
@BeforeAll @BeforeAll
static void setUpKafka() { static void setUpKafka() {
kafka = kafka =
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:5.4.3")) new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.1.9"))
.waitingFor(Wait.forLogMessage(".*started \\(kafka.server.KafkaServer\\).*", 1)) .waitingFor(Wait.forLogMessage(".*started \\(kafka.server.KafkaServer\\).*", 1))
.withStartupTimeout(Duration.ofMinutes(1)); .withStartupTimeout(Duration.ofMinutes(1));
kafka.start(); kafka.start();
@ -112,8 +111,9 @@ class KafkaIntegrationTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span -> span ->
span.hasName("testTopic process") span.hasName("testTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -130,8 +130,11 @@ class KafkaIntegrationTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "testListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2)))); span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
} }

View File

@ -73,8 +73,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -89,7 +90,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -108,8 +112,12 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener"),
satisfies( satisfies(
longKey("kafka.record.queue_time_ms"), longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative)),
@ -147,8 +155,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -163,7 +172,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -184,8 +196,12 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener"),
satisfies( satisfies(
longKey("kafka.record.queue_time_ms"), longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative)),
@ -205,7 +221,7 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
testing.waitAndAssertSortedTraces( testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"), span -> span.hasName("producer"),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
@ -219,8 +235,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
@ -233,8 +250,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "20")));
producer1.set(trace.getSpan(1)); producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2)); producer2.set(trace.getSpan(2));
@ -250,7 +268,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span ->
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -263,7 +284,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(1)))); span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
} }
@ -298,8 +322,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -314,7 +339,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span ->
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -327,7 +355,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo( equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"), SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(1)))); span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
} }
} }

View File

@ -9,6 +9,7 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -18,14 +19,15 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
private static final VirtualField<ConsumerRecords<?, ?>, Context> receiveContextField = private static final VirtualField<ConsumerRecords<?, ?>, Context> receiveContextField =
VirtualField.find(ConsumerRecords.class, Context.class); VirtualField.find(ConsumerRecords.class, Context.class);
private static final VirtualField<ConsumerRecords<?, ?>, State<ConsumerRecords<?, ?>>> private static final VirtualField<ConsumerRecords<?, ?>, State> stateField =
stateField = VirtualField.find(ConsumerRecords.class, State.class); VirtualField.find(ConsumerRecords.class, State.class);
private final Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter; private final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter;
@Nullable private final BatchInterceptor<K, V> decorated; @Nullable private final BatchInterceptor<K, V> decorated;
InstrumentedBatchInterceptor( InstrumentedBatchInterceptor(
Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter, Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> batchProcessInstrumenter,
@Nullable BatchInterceptor<K, V> decorated) { @Nullable BatchInterceptor<K, V> decorated) {
this.batchProcessInstrumenter = batchProcessInstrumenter; this.batchProcessInstrumenter = batchProcessInstrumenter;
this.decorated = decorated; this.decorated = decorated;
@ -35,16 +37,17 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
public ConsumerRecords<K, V> intercept(ConsumerRecords<K, V> records, Consumer<K, V> consumer) { public ConsumerRecords<K, V> intercept(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
Context parentContext = getParentContext(records); Context parentContext = getParentContext(records);
if (batchProcessInstrumenter.shouldStart(parentContext, records)) { ConsumerAndRecord<ConsumerRecords<?, ?>> request = ConsumerAndRecord.create(consumer, records);
Context context = batchProcessInstrumenter.start(parentContext, records); if (batchProcessInstrumenter.shouldStart(parentContext, request)) {
Context context = batchProcessInstrumenter.start(parentContext, request);
Scope scope = context.makeCurrent(); Scope scope = context.makeCurrent();
stateField.set(records, State.create(records, context, scope)); stateField.set(records, State.create(context, scope));
} }
return decorated == null ? records : decorated.intercept(records, consumer); return decorated == null ? records : decorated.intercept(records, consumer);
} }
private Context getParentContext(ConsumerRecords<K, V> records) { private static Context getParentContext(ConsumerRecords<?, ?> records) {
Context receiveContext = receiveContextField.get(records); Context receiveContext = receiveContextField.get(records);
// use the receive CONSUMER span as parent if it's available // use the receive CONSUMER span as parent if it's available
@ -53,7 +56,7 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
@Override @Override
public void success(ConsumerRecords<K, V> records, Consumer<K, V> consumer) { public void success(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
end(records, null); end(ConsumerAndRecord.create(consumer, records), null);
if (decorated != null) { if (decorated != null) {
decorated.success(records, consumer); decorated.success(records, consumer);
} }
@ -61,18 +64,20 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
@Override @Override
public void failure(ConsumerRecords<K, V> records, Exception exception, Consumer<K, V> consumer) { public void failure(ConsumerRecords<K, V> records, Exception exception, Consumer<K, V> consumer) {
end(records, exception); end(ConsumerAndRecord.create(consumer, records), exception);
if (decorated != null) { if (decorated != null) {
decorated.failure(records, exception, consumer); decorated.failure(records, exception, consumer);
} }
} }
private void end(ConsumerRecords<K, V> records, @Nullable Throwable error) { private void end(
State<ConsumerRecords<?, ?>> state = stateField.get(records); ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecord, @Nullable Throwable error) {
ConsumerRecords<?, ?> records = consumerAndRecord.record();
State state = stateField.get(records);
stateField.set(records, null); stateField.set(records, null);
if (state != null) { if (state != null) {
state.scope().close(); state.scope().close();
batchProcessInstrumenter.end(state.context(), state.request(), null, error); batchProcessInstrumenter.end(state.context(), consumerAndRecord, null, error);
} }
} }
} }

View File

@ -9,6 +9,7 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.tooling.muzzle.NoMuzzle; import io.opentelemetry.javaagent.tooling.muzzle.NoMuzzle;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.Consumer;
@ -19,14 +20,14 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
private static final VirtualField<ConsumerRecord<?, ?>, Context> receiveContextField = private static final VirtualField<ConsumerRecord<?, ?>, Context> receiveContextField =
VirtualField.find(ConsumerRecord.class, Context.class); VirtualField.find(ConsumerRecord.class, Context.class);
private static final VirtualField<ConsumerRecord<?, ?>, State<ConsumerRecord<?, ?>>> stateField = private static final VirtualField<ConsumerRecord<?, ?>, State> stateField =
VirtualField.find(ConsumerRecord.class, State.class); VirtualField.find(ConsumerRecord.class, State.class);
private final Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter; private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter;
@Nullable private final RecordInterceptor<K, V> decorated; @Nullable private final RecordInterceptor<K, V> decorated;
InstrumentedRecordInterceptor( InstrumentedRecordInterceptor(
Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter, Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter,
@Nullable RecordInterceptor<K, V> decorated) { @Nullable RecordInterceptor<K, V> decorated) {
this.processInstrumenter = processInstrumenter; this.processInstrumenter = processInstrumenter;
this.decorated = decorated; this.decorated = decorated;
@ -37,27 +38,28 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
"deprecation") // implementing deprecated method (removed in 3.0) for better compatibility "deprecation") // implementing deprecated method (removed in 3.0) for better compatibility
@Override @Override
public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record) { public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record) {
start(record); start(ConsumerAndRecord.create(null, record));
return decorated == null ? record : decorated.intercept(record); return decorated == null ? record : decorated.intercept(record);
} }
@Override @Override
public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record, Consumer<K, V> consumer) { public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
start(record); start(ConsumerAndRecord.create(consumer, record));
return decorated == null ? record : decorated.intercept(record, consumer); return decorated == null ? record : decorated.intercept(record, consumer);
} }
private void start(ConsumerRecord<K, V> record) { private void start(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
Context parentContext = getParentContext(record); Context parentContext = getParentContext(record);
if (processInstrumenter.shouldStart(parentContext, record)) { if (processInstrumenter.shouldStart(parentContext, consumerAndRecord)) {
Context context = processInstrumenter.start(parentContext, record); Context context = processInstrumenter.start(parentContext, consumerAndRecord);
Scope scope = context.makeCurrent(); Scope scope = context.makeCurrent();
stateField.set(record, State.create(record, context, scope)); stateField.set(record, State.create(context, scope));
} }
} }
private Context getParentContext(ConsumerRecord<K, V> records) { private static Context getParentContext(ConsumerRecord<?, ?> records) {
Context receiveContext = receiveContextField.get(records); Context receiveContext = receiveContextField.get(records);
// use the receive CONSUMER span as parent if it's available // use the receive CONSUMER span as parent if it's available
@ -66,7 +68,7 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
@Override @Override
public void success(ConsumerRecord<K, V> record, Consumer<K, V> consumer) { public void success(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
end(record, null); end(ConsumerAndRecord.create(consumer, record), null);
if (decorated != null) { if (decorated != null) {
decorated.success(record, consumer); decorated.success(record, consumer);
} }
@ -74,18 +76,20 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
@Override @Override
public void failure(ConsumerRecord<K, V> record, Exception exception, Consumer<K, V> consumer) { public void failure(ConsumerRecord<K, V> record, Exception exception, Consumer<K, V> consumer) {
end(record, exception); end(ConsumerAndRecord.create(consumer, record), exception);
if (decorated != null) { if (decorated != null) {
decorated.failure(record, exception, consumer); decorated.failure(record, exception, consumer);
} }
} }
private void end(ConsumerRecord<K, V> record, @Nullable Throwable error) { private void end(
State<ConsumerRecord<?, ?>> state = stateField.get(record); ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, @Nullable Throwable error) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
State state = stateField.get(record);
stateField.set(record, null); stateField.set(record, null);
if (state != null) { if (state != null) {
state.scope().close(); state.scope().close();
processInstrumenter.end(state.context(), state.request(), null, error); processInstrumenter.end(state.context(), consumerAndRecord, null, error);
} }
} }
} }

View File

@ -8,6 +8,7 @@ package io.opentelemetry.instrumentation.spring.kafka.v2_7;
import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.listener.AbstractMessageListenerContainer; import org.springframework.kafka.listener.AbstractMessageListenerContainer;
@ -30,12 +31,13 @@ public final class SpringKafkaTelemetry {
return new SpringKafkaTelemetryBuilder(openTelemetry); return new SpringKafkaTelemetryBuilder(openTelemetry);
} }
private final Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter; private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter;
private final Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter; private final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter;
SpringKafkaTelemetry( SpringKafkaTelemetry(
Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter, Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter,
Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter) { Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> batchProcessInstrumenter) {
this.processInstrumenter = processInstrumenter; this.processInstrumenter = processInstrumenter;
this.batchProcessInstrumenter = batchProcessInstrumenter; this.batchProcessInstrumenter = batchProcessInstrumenter;
} }

View File

@ -10,14 +10,12 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
@AutoValue @AutoValue
abstract class State<REQUEST> { abstract class State {
static <REQUEST> State<REQUEST> create(REQUEST request, Context context, Scope scope) { static State create(Context context, Scope scope) {
return new AutoValue_State<>(request, context, scope); return new AutoValue_State(context, scope);
} }
abstract REQUEST request();
abstract Context context(); abstract Context context();
abstract Scope scope(); abstract Scope scope();

View File

@ -9,7 +9,6 @@ import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.or
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.data.StatusData;
@ -54,8 +53,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -74,8 +74,12 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2)))); span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
} }
@ -111,8 +115,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -133,8 +138,12 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2)))); span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
} }
@ -152,7 +161,7 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
.waitAndAssertSortedTraces( .waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"), span -> span.hasName("producer"),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
@ -167,8 +176,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)), AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
@ -182,8 +192,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "20")));
producer1.set(trace.getSpan(1)); producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2)); producer2.set(trace.getSpan(2));
@ -204,7 +215,10 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_DESTINATION_NAME, SemanticAttributes.MESSAGING_DESTINATION_NAME,
"testBatchTopic"), "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(0)))); span -> span.hasName("consumer").hasParent(trace.getSpan(0))));
} }
@ -242,8 +256,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative), AbstractLongAssert::isNotNegative),
satisfies( satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"), SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative))); AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -262,7 +277,10 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_DESTINATION_NAME, SemanticAttributes.MESSAGING_DESTINATION_NAME,
"testBatchTopic"), "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"), equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")), equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(0)))); span -> span.hasName("consumer").hasParent(trace.getSpan(0))));
} }
} }

View File

@ -24,13 +24,22 @@ dependencies {
testInstrumentation(project(":instrumentation:kafka:kafka-clients:kafka-clients-0.11:javaagent")) testInstrumentation(project(":instrumentation:kafka:kafka-clients:kafka-clients-0.11:javaagent"))
} }
val latestDepTest = findProperty("testLatestDeps") as Boolean
testing { testing {
suites { suites {
val testNoReceiveTelemetry by registering(JvmTestSuite::class) { val testNoReceiveTelemetry by registering(JvmTestSuite::class) {
dependencies { dependencies {
implementation(project(":instrumentation:vertx:vertx-kafka-client-3.6:testing"))
// the "library" configuration is not recognized by the test suite plugin
if (latestDepTest) {
implementation("io.vertx:vertx-kafka-client:+")
implementation("io.vertx:vertx-codegen:+")
} else {
implementation("io.vertx:vertx-kafka-client:3.6.0") implementation("io.vertx:vertx-kafka-client:3.6.0")
implementation("io.vertx:vertx-codegen:3.6.0") implementation("io.vertx:vertx-codegen:3.6.0")
implementation(project(":instrumentation:vertx:vertx-kafka-client-3.6:testing")) }
} }
targets { targets {
@ -38,6 +47,8 @@ testing {
testTask.configure { testTask.configure {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service) usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", findProperty("testLatestDeps") as Boolean)
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=false") jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=false")
jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=false") jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=false")
} }
@ -51,6 +62,8 @@ tasks {
test { test {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service) usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", latestDepTest)
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true") jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true")
jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=true") jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=true")
} }

View File

@ -10,28 +10,35 @@ import static io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6.VertxK
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing; import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import io.vertx.core.Handler; import io.vertx.core.Handler;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<ConsumerRecords<K, V>> { public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<ConsumerRecords<K, V>> {
private final VirtualField<ConsumerRecords<K, V>, Context> receiveContextField; private final VirtualField<ConsumerRecords<K, V>, Context> receiveContextField;
private final Consumer<K, V> kafkaConsumer;
@Nullable private final Handler<ConsumerRecords<K, V>> delegate; @Nullable private final Handler<ConsumerRecords<K, V>> delegate;
public InstrumentedBatchRecordsHandler( public InstrumentedBatchRecordsHandler(
VirtualField<ConsumerRecords<K, V>, Context> receiveContextField, VirtualField<ConsumerRecords<K, V>, Context> receiveContextField,
Consumer<K, V> kafkaConsumer,
@Nullable Handler<ConsumerRecords<K, V>> delegate) { @Nullable Handler<ConsumerRecords<K, V>> delegate) {
this.receiveContextField = receiveContextField; this.receiveContextField = receiveContextField;
this.kafkaConsumer = kafkaConsumer;
this.delegate = delegate; this.delegate = delegate;
} }
@Override @Override
public void handle(ConsumerRecords<K, V> records) { public void handle(ConsumerRecords<K, V> records) {
Context parentContext = getParentContext(records); Context parentContext = getParentContext(records);
ConsumerAndRecord<ConsumerRecords<?, ?>> request =
ConsumerAndRecord.create(kafkaConsumer, records);
if (!batchProcessInstrumenter().shouldStart(parentContext, records)) { if (!batchProcessInstrumenter().shouldStart(parentContext, request)) {
callDelegateHandler(records); callDelegateHandler(records);
return; return;
} }
@ -39,7 +46,7 @@ public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<Cons
// the instrumenter iterates over records when adding links, we need to suppress that // the instrumenter iterates over records when adding links, we need to suppress that
boolean previousWrappingEnabled = KafkaClientsConsumerProcessTracing.setEnabled(false); boolean previousWrappingEnabled = KafkaClientsConsumerProcessTracing.setEnabled(false);
try { try {
Context context = batchProcessInstrumenter().start(parentContext, records); Context context = batchProcessInstrumenter().start(parentContext, request);
Throwable error = null; Throwable error = null;
try (Scope ignored = context.makeCurrent()) { try (Scope ignored = context.makeCurrent()) {
callDelegateHandler(records); callDelegateHandler(records);
@ -47,7 +54,7 @@ public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<Cons
error = t; error = t;
throw t; throw t;
} finally { } finally {
batchProcessInstrumenter().end(context, records, null, error); batchProcessInstrumenter().end(context, request, null, error);
} }
} finally { } finally {
KafkaClientsConsumerProcessTracing.setEnabled(previousWrappingEnabled); KafkaClientsConsumerProcessTracing.setEnabled(previousWrappingEnabled);

View File

@ -10,32 +10,39 @@ import static io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6.VertxK
import io.opentelemetry.context.Context; import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope; import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.util.VirtualField; import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.vertx.core.Handler; import io.vertx.core.Handler;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
public final class InstrumentedSingleRecordHandler<K, V> implements Handler<ConsumerRecord<K, V>> { public final class InstrumentedSingleRecordHandler<K, V> implements Handler<ConsumerRecord<K, V>> {
private final VirtualField<ConsumerRecord<K, V>, Context> receiveContextField; private final VirtualField<ConsumerRecord<K, V>, Context> receiveContextField;
private final Consumer<K, V> kafkaConsumer;
@Nullable private final Handler<ConsumerRecord<K, V>> delegate; @Nullable private final Handler<ConsumerRecord<K, V>> delegate;
public InstrumentedSingleRecordHandler( public InstrumentedSingleRecordHandler(
VirtualField<ConsumerRecord<K, V>, Context> receiveContextField, VirtualField<ConsumerRecord<K, V>, Context> receiveContextField,
Consumer<K, V> kafkaConsumer,
@Nullable Handler<ConsumerRecord<K, V>> delegate) { @Nullable Handler<ConsumerRecord<K, V>> delegate) {
this.receiveContextField = receiveContextField; this.receiveContextField = receiveContextField;
this.kafkaConsumer = kafkaConsumer;
this.delegate = delegate; this.delegate = delegate;
} }
@Override @Override
public void handle(ConsumerRecord<K, V> record) { public void handle(ConsumerRecord<K, V> record) {
Context parentContext = getParentContext(record); Context parentContext = getParentContext(record);
ConsumerAndRecord<ConsumerRecord<?, ?>> request =
ConsumerAndRecord.create(kafkaConsumer, record);
if (!processInstrumenter().shouldStart(parentContext, record)) { if (!processInstrumenter().shouldStart(parentContext, request)) {
callDelegateHandler(record); callDelegateHandler(record);
return; return;
} }
Context context = processInstrumenter().start(parentContext, record); Context context = processInstrumenter().start(parentContext, request);
Throwable error = null; Throwable error = null;
try (Scope ignored = context.makeCurrent()) { try (Scope ignored = context.makeCurrent()) {
callDelegateHandler(record); callDelegateHandler(record);
@ -43,7 +50,7 @@ public final class InstrumentedSingleRecordHandler<K, V> implements Handler<Cons
error = t; error = t;
throw t; throw t;
} finally { } finally {
processInstrumenter().end(context, record, null, error); processInstrumenter().end(context, request, null, error);
} }
} }

View File

@ -16,9 +16,11 @@ import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTra
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation; import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer; import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import io.vertx.core.Handler; import io.vertx.core.Handler;
import io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl;
import net.bytebuddy.asm.Advice; import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription; import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher; import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -50,11 +52,13 @@ public class KafkaReadStreamImplInstrumentation implements TypeInstrumentation {
@Advice.OnMethodEnter(suppress = Throwable.class) @Advice.OnMethodEnter(suppress = Throwable.class)
public static <K, V> void onEnter( public static <K, V> void onEnter(
@Advice.This KafkaReadStreamImpl<K, V> readStream,
@Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecord<K, V>> handler) { @Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecord<K, V>> handler) {
Consumer<K, V> consumer = readStream.unwrap();
VirtualField<ConsumerRecord<K, V>, Context> receiveContextField = VirtualField<ConsumerRecord<K, V>, Context> receiveContextField =
VirtualField.find(ConsumerRecord.class, Context.class); VirtualField.find(ConsumerRecord.class, Context.class);
handler = new InstrumentedSingleRecordHandler<>(receiveContextField, handler); handler = new InstrumentedSingleRecordHandler<>(receiveContextField, consumer, handler);
} }
} }
@ -63,11 +67,13 @@ public class KafkaReadStreamImplInstrumentation implements TypeInstrumentation {
@Advice.OnMethodEnter(suppress = Throwable.class) @Advice.OnMethodEnter(suppress = Throwable.class)
public static <K, V> void onEnter( public static <K, V> void onEnter(
@Advice.This KafkaReadStreamImpl<K, V> readStream,
@Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecords<K, V>> handler) { @Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecords<K, V>> handler) {
Consumer<K, V> consumer = readStream.unwrap();
VirtualField<ConsumerRecords<K, V>, Context> receiveContextField = VirtualField<ConsumerRecords<K, V>, Context> receiveContextField =
VirtualField.find(ConsumerRecords.class, Context.class); VirtualField.find(ConsumerRecords.class, Context.class);
handler = new InstrumentedBatchRecordsHandler<>(receiveContextField, handler); handler = new InstrumentedBatchRecordsHandler<>(receiveContextField, consumer, handler);
} }
} }

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter; import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory; import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig; import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig;
import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig; import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig;
@ -17,8 +18,10 @@ public final class VertxKafkaSingletons {
private static final String INSTRUMENTATION_NAME = "io.opentelemetry.vertx-kafka-client-3.6"; private static final String INSTRUMENTATION_NAME = "io.opentelemetry.vertx-kafka-client-3.6";
private static final Instrumenter<ConsumerRecords<?, ?>, Void> BATCH_PROCESS_INSTRUMENTER; private static final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
private static final Instrumenter<ConsumerRecord<?, ?>, Void> PROCESS_INSTRUMENTER; BATCH_PROCESS_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
PROCESS_INSTRUMENTER;
static { static {
KafkaInstrumenterFactory factory = KafkaInstrumenterFactory factory =
@ -33,11 +36,12 @@ public final class VertxKafkaSingletons {
PROCESS_INSTRUMENTER = factory.createConsumerProcessInstrumenter(); PROCESS_INSTRUMENTER = factory.createConsumerProcessInstrumenter();
} }
public static Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter() { public static Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter() {
return BATCH_PROCESS_INSTRUMENTER; return BATCH_PROCESS_INSTRUMENTER;
} }
public static Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter() { public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter() {
return PROCESS_INSTRUMENTER; return PROCESS_INSTRUMENTER;
} }

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6; package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind; import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord; import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation;
import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Order;
@ -51,9 +45,11 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException { void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages( KafkaProducerRecord<String, String> record1 =
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1"), KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1");
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2")); KafkaProducerRecord<String, String> record2 =
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2");
sendBatchMessages(record1, record2);
AtomicReference<SpanData> producer1 = new AtomicReference<>(); AtomicReference<SpanData> producer1 = new AtomicReference<>();
AtomicReference<SpanData> producer2 = new AtomicReference<>(); AtomicReference<SpanData> producer2 = new AtomicReference<>();
@ -61,52 +57,29 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
testing.waitAndAssertSortedTraces( testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"), span -> span.hasName("producer"),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record1)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record2)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
producer1.set(trace.getSpan(1)); producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2)); producer2.set(trace.getSpan(2));
}, },
trace -> trace ->
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span ->
span.hasName("testBatchTopic receive") span.hasName("testBatchTopic receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(receiveAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
// batch consumer // batch consumer
span -> span ->
@ -116,12 +89,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks( .hasLinks(
LinkData.create(producer1.get().getSpanContext()), LinkData.create(producer1.get().getSpanContext()),
LinkData.create(producer2.get().getSpanContext())) LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)), span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)),
// single consumer 1 // single consumer 1
@ -130,24 +98,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer1.get().getSpanContext())) .hasLinks(LinkData.create(producer1.get().getSpanContext()))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record1)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process testSpan1").hasParent(trace.getSpan(3)), span -> span.hasName("process testSpan1").hasParent(trace.getSpan(3)),
// single consumer 2 // single consumer 2
@ -156,24 +107,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer2.get().getSpanContext())) .hasLinks(LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record2)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5)))); span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5))));
} }
@ -182,7 +116,9 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldHandleFailureInKafkaBatchListener() throws InterruptedException { void shouldHandleFailureInKafkaBatchListener() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(KafkaProducerRecord.create("testBatchTopic", "10", "error")); KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testBatchTopic", "10", "error");
sendBatchMessages(record);
// make sure that the consumer eats up any leftover records // make sure that the consumer eats up any leftover records
kafkaConsumer.resume(); kafkaConsumer.resume();
@ -198,16 +134,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -217,12 +144,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic receive") span.hasName("testBatchTopic receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(receiveAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
// batch consumer // batch consumer
span -> span ->
@ -232,12 +154,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks(LinkData.create(producer.get().getSpanContext())) .hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error()) .hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom")) .hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)), span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)),
// single consumer // single consumer
@ -245,24 +162,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process error").hasParent(trace.getSpan(3)))); span -> span.hasName("process error").hasParent(trace.getSpan(3))));
} }
} }

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6; package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind; import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord; import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -47,13 +41,10 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldCreateSpansForSingleRecordProcess() throws InterruptedException { void shouldCreateSpansForSingleRecordProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan");
CountDownLatch sent = new CountDownLatch(1); CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan( testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan"),
result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS)); assertTrue(sent.await(30, TimeUnit.SECONDS));
AtomicReference<SpanData> producer = new AtomicReference<>(); AtomicReference<SpanData> producer = new AtomicReference<>();
@ -67,16 +58,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic send") span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -86,35 +68,13 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic receive") span.hasName("testSingleTopic receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(receiveAttributes("testSingleTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer.get().getSpanContext())) .hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("consumer").hasParent(trace.getSpan(1)))); span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
} }
@ -122,13 +82,10 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldHandleFailureInSingleRecordHandler() throws InterruptedException { void shouldHandleFailureInSingleRecordHandler() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "error");
CountDownLatch sent = new CountDownLatch(1); CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan( testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "error"),
result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS)); assertTrue(sent.await(30, TimeUnit.SECONDS));
AtomicReference<SpanData> producer = new AtomicReference<>(); AtomicReference<SpanData> producer = new AtomicReference<>();
@ -142,16 +99,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic send") span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)));
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
}, },
@ -161,12 +109,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic receive") span.hasName("testSingleTopic receive")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasNoParent() .hasNoParent()
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(receiveAttributes("testSingleTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
@ -174,24 +117,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks(LinkData.create(producer.get().getSpanContext())) .hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error()) .hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom")) .hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("consumer").hasParent(trace.getSpan(1)))); span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
} }
} }

View File

@ -6,21 +6,16 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6; package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind; import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord; import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Order;
@ -50,9 +45,11 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException { void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages( KafkaProducerRecord<String, String> record1 =
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1"), KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1");
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2")); KafkaProducerRecord<String, String> record2 =
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2");
sendBatchMessages(record1, record2);
AtomicReference<SpanData> producer1 = new AtomicReference<>(); AtomicReference<SpanData> producer1 = new AtomicReference<>();
AtomicReference<SpanData> producer2 = new AtomicReference<>(); AtomicReference<SpanData> producer2 = new AtomicReference<>();
@ -60,7 +57,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
testing.waitAndAssertSortedTraces( testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER), orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> { trace -> {
trace.hasSpansSatisfyingExactly( trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"), span -> span.hasName("producer"),
// first record // first record
@ -68,34 +65,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record1)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1)) .hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record1)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process testSpan1").hasParent(trace.getSpan(2)), span -> span.hasName("process testSpan1").hasParent(trace.getSpan(2)),
// second record // second record
@ -103,34 +78,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record2)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(4)) .hasParent(trace.getSpan(4))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record2)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5))); span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5)));
producer1.set(trace.getSpan(1)); producer1.set(trace.getSpan(1));
@ -146,12 +99,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
.hasLinks( .hasLinks(
LinkData.create(producer1.get().getSpanContext()), LinkData.create(producer1.get().getSpanContext()),
LinkData.create(producer2.get().getSpanContext())) LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(0)))); span -> span.hasName("batch consumer").hasParent(trace.getSpan(0))));
} }
@ -160,7 +108,9 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
void shouldHandleFailureInKafkaBatchListener() throws InterruptedException { void shouldHandleFailureInKafkaBatchListener() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(KafkaProducerRecord.create("testBatchTopic", "10", "error")); KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testBatchTopic", "10", "error");
sendBatchMessages(record);
// make sure that the consumer eats up any leftover records // make sure that the consumer eats up any leftover records
kafkaConsumer.resume(); kafkaConsumer.resume();
@ -176,34 +126,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send") span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testBatchTopic process") span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1)) .hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("process error").hasParent(trace.getSpan(2))); span -> span.hasName("process error").hasParent(trace.getSpan(2)));
producer.set(trace.getSpan(1)); producer.set(trace.getSpan(1));
@ -217,12 +145,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
.hasLinks(LinkData.create(producer.get().getSpanContext())) .hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error()) .hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom")) .hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(0)))); span -> span.hasName("batch consumer").hasParent(trace.getSpan(0))));
} }
} }

View File

@ -5,18 +5,13 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6; package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord; import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -42,13 +37,10 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
void shouldCreateSpansForSingleRecordProcess() throws InterruptedException { void shouldCreateSpansForSingleRecordProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan");
CountDownLatch sent = new CountDownLatch(1); CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan( testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan"),
result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS)); assertTrue(sent.await(30, TimeUnit.SECONDS));
testing.waitAndAssertTraces( testing.waitAndAssertTraces(
@ -59,36 +51,12 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testSingleTopic send") span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1)) .hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("consumer").hasParent(trace.getSpan(2)))); span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
} }
@ -96,13 +64,10 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
void shouldHandleFailureInSingleRecordHandler() throws InterruptedException { void shouldHandleFailureInSingleRecordHandler() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS)); assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "error");
CountDownLatch sent = new CountDownLatch(1); CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan( testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "error"),
result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS)); assertTrue(sent.await(30, TimeUnit.SECONDS));
testing.waitAndAssertTraces( testing.waitAndAssertTraces(
@ -113,38 +78,14 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testSingleTopic send") span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER) .hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0)) .hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(sendAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span ->
span.hasName("testSingleTopic process") span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER) .hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1)) .hasParent(trace.getSpan(1))
.hasStatus(StatusData.error()) .hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom")) .hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly( .hasAttributesSatisfyingExactly(processAttributes(record)),
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
span -> span.hasName("consumer").hasParent(trace.getSpan(2)))); span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
} }
} }

View File

@ -5,10 +5,15 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6; package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension; import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.core.AsyncResult; import io.vertx.core.AsyncResult;
import io.vertx.core.Handler; import io.vertx.core.Handler;
import io.vertx.core.Vertx; import io.vertx.core.Vertx;
@ -19,12 +24,17 @@ import io.vertx.kafka.client.producer.RecordMetadata;
import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType; import java.lang.invoke.MethodType;
import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties; import java.util.Properties;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.serialization.StringSerializer;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.extension.RegisterExtension; import org.junit.jupiter.api.extension.RegisterExtension;
@ -182,4 +192,87 @@ public abstract class AbstractVertxKafkaTest {
throw new AssertionError("Failed producer send/write invocation", e); throw new AssertionError("Failed producer send/write invocation", e);
} }
} }
protected static List<AttributeAssertion> sendAttributes(
KafkaProducerRecord<String, String> record) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, record.topic()),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
String messageKey = record.key();
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
return assertions;
}
protected static List<AttributeAssertion> receiveAttributes(String topic) {
return batchConsumerAttributes(topic, "receive");
}
protected static List<AttributeAssertion> batchProcessAttributes(String topic) {
return batchConsumerAttributes(topic, "process");
}
private static List<AttributeAssertion> batchConsumerAttributes(String topic, String operation) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, topic),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, operation)));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
return assertions;
}
protected static List<AttributeAssertion> processAttributes(
KafkaProducerRecord<String, String> record) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, record.topic()),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
if (Boolean.getBoolean("otel.instrumentation.kafka.experimental-span-attributes")) {
assertions.add(
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
}
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
String messageKey = record.key();
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
String messageValue = record.value();
if (messageValue != null) {
assertions.add(
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
messageValue.getBytes(StandardCharsets.UTF_8).length));
}
return assertions;
}
} }