Implement `messaging.kafka.*` attributes spec (part 1) (#7824)

Part 1 of #7771

It's a lot more than I initially expected it to be; I'll introduce the
clientId attribute in part 2
This commit is contained in:
Mateusz Rzeszutek 2023-02-16 22:22:17 +01:00 committed by GitHub
parent 0e4e696ed4
commit 12ea869855
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 986 additions and 1202 deletions

View File

@ -21,6 +21,7 @@ import java.util.List;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -59,6 +60,7 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
@SuppressWarnings("unused")
public static class IterableAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records,
@ -69,13 +71,16 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
iterable = TracingIterable.wrap(iterable, receiveContext);
Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
iterable = TracingIterable.wrap(iterable, receiveContext, consumer);
}
}
@SuppressWarnings("unused")
public static class ListAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records,
@ -86,13 +91,16 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
list = TracingList.wrap(list, receiveContext);
Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
list = TracingList.wrap(list, receiveContext, consumer);
}
}
@SuppressWarnings("unused")
public static class IteratorAdvice {
@SuppressWarnings("unchecked")
@Advice.OnMethodExit(suppress = Throwable.class)
public static <K, V> void wrap(
@Advice.This ConsumerRecords<?, ?> records,
@ -103,7 +111,9 @@ public class ConsumerRecordsInstrumentation implements TypeInstrumentation {
// case it's important to overwrite the leaked span instead of suppressing the correct span
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
Context receiveContext = VirtualField.find(ConsumerRecords.class, Context.class).get(records);
iterator = TracingIterator.wrap(iterator, receiveContext);
Consumer<K, V> consumer =
VirtualField.find(ConsumerRecords.class, Consumer.class).get(records);
iterator = TracingIterator.wrap(iterator, receiveContext, consumer);
}
}
}

View File

@ -18,6 +18,7 @@ import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.internal.InstrumenterUtil;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.Timer;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
@ -28,6 +29,7 @@ import java.util.Properties;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -83,6 +85,7 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
@Advice.OnMethodExit(suppress = Throwable.class, onThrowable = Throwable.class)
public static void onExit(
@Advice.Enter Timer timer,
@Advice.This Consumer<?, ?> consumer,
@Advice.Return ConsumerRecords<?, ?> records,
@Advice.Thrown Throwable error) {
@ -91,8 +94,17 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
return;
}
// we're attaching the consumer to the records to be able to retrieve things like consumer
// group or clientId later
VirtualField<ConsumerRecords<?, ?>, Consumer<?, ?>> consumerRecordsConsumer =
VirtualField.find(ConsumerRecords.class, Consumer.class);
consumerRecordsConsumer.set(records, consumer);
Context parentContext = currentContext();
if (consumerReceiveInstrumenter().shouldStart(parentContext, records)) {
ConsumerAndRecord<ConsumerRecords<?, ?>> request =
ConsumerAndRecord.create(consumer, records);
if (consumerReceiveInstrumenter().shouldStart(parentContext, request)) {
// disable process tracing and store the receive span for each individual record too
boolean previousValue = KafkaClientsConsumerProcessTracing.setEnabled(false);
try {
@ -100,15 +112,14 @@ public class KafkaConsumerInstrumentation implements TypeInstrumentation {
InstrumenterUtil.startAndEnd(
consumerReceiveInstrumenter(),
parentContext,
records,
request,
null,
error,
timer.startTime(),
timer.now());
// we're storing the context of the receive span so that process spans can use it as
// parent
// context even though the span has ended
// parent context even though the span has ended
// this is the suggested behavior according to the spec batch receive scenario:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#batch-receiving
VirtualField<ConsumerRecords<?, ?>, Context> consumerRecordsContext =

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier;
@ -34,8 +35,10 @@ public final class KafkaSingletons {
.getBoolean("otel.instrumentation.kafka.metric-reporter.enabled", true);
private static final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> PRODUCER_INSTRUMENTER;
private static final Instrumenter<ConsumerRecords<?, ?>, Void> CONSUMER_RECEIVE_INSTRUMENTER;
private static final Instrumenter<ConsumerRecord<?, ?>, Void> CONSUMER_PROCESS_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
CONSUMER_RECEIVE_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
CONSUMER_PROCESS_INSTRUMENTER;
static {
KafkaInstrumenterFactory instrumenterFactory =
@ -59,11 +62,13 @@ public final class KafkaSingletons {
return PRODUCER_INSTRUMENTER;
}
public static Instrumenter<ConsumerRecords<?, ?>, Void> consumerReceiveInstrumenter() {
public static Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
consumerReceiveInstrumenter() {
return CONSUMER_RECEIVE_INSTRUMENTER;
}
public static Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter() {
public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
consumerProcessInstrumenter() {
return CONSUMER_PROCESS_INSTRUMENTER;
}

View File

@ -9,23 +9,30 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import java.util.Iterator;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingIterable<K, V> implements Iterable<ConsumerRecord<K, V>> {
private final Iterable<ConsumerRecord<K, V>> delegate;
@Nullable private final Context receiveContext;
private final Consumer<K, V> consumer;
private boolean firstIterator = true;
protected TracingIterable(
Iterable<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) {
Iterable<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
this.delegate = delegate;
this.receiveContext = receiveContext;
this.consumer = consumer;
}
public static <K, V> Iterable<ConsumerRecord<K, V>> wrap(
Iterable<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) {
Iterable<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingIterable<>(delegate, receiveContext);
return new TracingIterable<>(delegate, receiveContext, consumer);
}
return delegate;
}
@ -37,7 +44,7 @@ public class TracingIterable<K, V> implements Iterable<ConsumerRecord<K, V>> {
// However, this is not thread-safe, but usually the first (hopefully only) traversal of
// ConsumerRecords is performed in the same thread that called poll()
if (firstIterator) {
it = TracingIterator.wrap(delegate.iterator(), receiveContext);
it = TracingIterator.wrap(delegate.iterator(), receiveContext, consumer);
firstIterator = false;
} else {
it = delegate.iterator();

View File

@ -9,35 +9,44 @@ import static io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11.Kafk
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import java.util.Iterator;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingIterator<K, V> implements Iterator<ConsumerRecord<K, V>> {
private final Iterator<ConsumerRecord<K, V>> delegateIterator;
private final Context parentContext;
private final Consumer<K, V> consumer;
/*
* Note: this may potentially create problems if this iterator is used from different threads. But
* at the moment we cannot do much about this.
*/
@Nullable private ConsumerRecord<?, ?> currentRequest;
@Nullable private ConsumerAndRecord<ConsumerRecord<?, ?>> currentRequest;
@Nullable private Context currentContext;
@Nullable private Scope currentScope;
private TracingIterator(
Iterator<ConsumerRecord<K, V>> delegateIterator, @Nullable Context receiveContext) {
Iterator<ConsumerRecord<K, V>> delegateIterator,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
this.delegateIterator = delegateIterator;
// use the receive CONSUMER as parent if it's available
this.parentContext = receiveContext != null ? receiveContext : Context.current();
this.consumer = consumer;
}
public static <K, V> Iterator<ConsumerRecord<K, V>> wrap(
Iterator<ConsumerRecord<K, V>> delegateIterator, @Nullable Context receiveContext) {
Iterator<ConsumerRecord<K, V>> delegateIterator,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingIterator<>(delegateIterator, receiveContext);
return new TracingIterator<>(delegateIterator, receiveContext, consumer);
}
return delegateIterator;
}
@ -60,7 +69,7 @@ public class TracingIterator<K, V> implements Iterator<ConsumerRecord<K, V>> {
// (https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/1947)
ConsumerRecord<K, V> next = delegateIterator.next();
if (next != null && KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
currentRequest = next;
currentRequest = ConsumerAndRecord.create(consumer, next);
currentContext = consumerProcessInstrumenter().start(parentContext, currentRequest);
currentScope = currentContext.makeCurrent();
}

View File

@ -11,20 +11,26 @@ import java.util.Collection;
import java.util.List;
import java.util.ListIterator;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class TracingList<K, V> extends TracingIterable<K, V> implements List<ConsumerRecord<K, V>> {
private final List<ConsumerRecord<K, V>> delegate;
private TracingList(List<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) {
super(delegate, receiveContext);
private TracingList(
List<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
super(delegate, receiveContext, consumer);
this.delegate = delegate;
}
public static <K, V> List<ConsumerRecord<K, V>> wrap(
List<ConsumerRecord<K, V>> delegate, @Nullable Context receiveContext) {
List<ConsumerRecord<K, V>> delegate,
@Nullable Context receiveContext,
Consumer<K, V> consumer) {
if (KafkaClientsConsumerProcessTracing.wrappingEnabled()) {
return new TracingList<>(delegate, receiveContext);
return new TracingList<>(delegate, receiveContext, consumer);
}
return delegate;
}

View File

@ -6,11 +6,8 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
@ -18,10 +15,8 @@ import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtens
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@ -30,7 +25,6 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
@ -51,7 +45,7 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
"parent",
() -> {
ProducerRecord<Integer, String> producerRecord =
new ProducerRecord<>(SHARED_TOPIC, greeting);
new ProducerRecord<>(SHARED_TOPIC, 10, greeting);
if (testHeaders) {
producerRecord
.headers()
@ -80,8 +74,8 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
testing.runWithSpan(
"processing",
() -> {
assertThat(record.key()).isEqualTo(10);
assertThat(record.value()).isEqualTo(greeting);
assertThat(record.key()).isNull();
});
}
AtomicReference<SpanData> producerSpan = new AtomicReference<>();
@ -89,85 +83,32 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent();
},
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span -> {
span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0));
});
span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(sendAttributes("10", greeting, testHeaders)),
span ->
span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0)));
producerSpan.set(trace.getSpan(1));
},
trace ->
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
if (testHeaders) {
span.hasAttributesSatisfying(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
},
span ->
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(receiveAttributes(testHeaders)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
processAttributes("10", greeting, testHeaders)),
span -> span.hasName("processing").hasParent(trace.getSpan(1))));
}
@ -192,59 +133,26 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, null, false)));
producerSpan.set(trace.getSpan(0));
},
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(receiveAttributes(false)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(processAttributes(null, null, false))));
}
@DisplayName("test records(TopicPartition) kafka consume")
@ -276,55 +184,25 @@ class KafkaClientDefaultTest extends KafkaClientPropagationBaseTest {
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, greeting, false)));
producerSpan.set(trace.getSpan(0));
},
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"));
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(receiveAttributes(false)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(LinkData.create(producerSpan.get().getSpanContext()))
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(processAttributes(null, greeting, false))));
}
}

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Collections;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
@ -37,24 +31,13 @@ class KafkaClientPropagationDisabledTest extends KafkaClientPropagationBaseTest
producer.send(new ProducerRecord<>(SHARED_TOPIC, message));
testing.waitAndAssertTraces(
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
});
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, message, false))));
awaitUntilConsumerIsReady();
@ -68,49 +51,20 @@ class KafkaClientPropagationDisabledTest extends KafkaClientPropagationBaseTest
}
testing.waitAndAssertTraces(
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
});
},
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(Collections.emptyList())
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
message.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName("processing").hasParent(trace.getSpan(0));
});
});
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, message, false))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasLinks(Collections.emptyList())
.hasAttributesSatisfyingExactly(processAttributes(null, message, false)),
span -> span.hasName("processing").hasParent(trace.getSpan(0))));
}
}

View File

@ -5,19 +5,13 @@
package io.opentelemetry.javaagent.instrumentation.kafkaclients.v0_11;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.assertj.core.api.Assertions.assertThat;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientBaseTest;
import io.opentelemetry.instrumentation.kafka.internal.KafkaClientPropagationBaseTest;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.List;
import java.util.concurrent.ExecutionException;
@ -26,7 +20,6 @@ import java.util.concurrent.TimeoutException;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
@ -41,7 +34,7 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
"parent",
() -> {
producer.send(
new ProducerRecord<>(SHARED_TOPIC, greeting),
new ProducerRecord<>(SHARED_TOPIC, 10, greeting),
(meta, ex) -> {
if (ex == null) {
testing.runWithSpan("producer callback", () -> {});
@ -59,63 +52,33 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
testing.runWithSpan(
"processing",
() -> {
assertThat(record.key()).isEqualTo(10);
assertThat(record.value()).isEqualTo(greeting);
assertThat(record.key()).isNull();
});
}
testing.waitAndAssertTraces(
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent();
},
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName("processing").hasKind(SpanKind.INTERNAL).hasParent(trace.getSpan(2));
},
span -> {
span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0));
});
});
trace ->
trace.hasSpansSatisfyingExactly(
span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(sendAttributes("10", greeting, false)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly(processAttributes("10", greeting, false)),
span ->
span.hasName("processing")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(2)),
span ->
span.hasName("producer callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0))));
}
@Test
@ -133,48 +96,19 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
assertThat(record.key()).isNull();
}
testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true),
equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, null, false)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(processAttributes(null, null, false))));
}
@Test
@ -200,44 +134,18 @@ class KafkaClientSuppressReceiveSpansTest extends KafkaClientPropagationBaseTest
assertThat(record.key()).isNull();
}
testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
span -> {
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative));
},
span -> {
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfying(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
greeting.getBytes(StandardCharsets.UTF_8).length),
equalTo(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, partition),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
});
});
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(SHARED_TOPIC + " send")
.hasKind(SpanKind.PRODUCER)
.hasNoParent()
.hasAttributesSatisfyingExactly(sendAttributes(null, greeting, false)),
span ->
span.hasName(SHARED_TOPIC + " process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(processAttributes(null, greeting, false))));
}
}

View File

@ -5,10 +5,20 @@
package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@ -25,6 +35,7 @@ import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
@ -138,4 +149,98 @@ public abstract class KafkaClientBaseTest {
}
consumer.seekToBeginning(Collections.emptyList());
}
protected static List<AttributeAssertion> sendAttributes(
String messageKey, String messageValue, boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
if (messageValue == null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
protected static List<AttributeAssertion> receiveAttributes(boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
protected static List<AttributeAssertion> processAttributes(
String messageKey, String messageValue, boolean testHeaders) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, SHARED_TOPIC),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
if (messageValue == null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true));
// TODO shouldn't set -1 in this case
assertions.add(equalTo(SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES, -1L));
} else {
assertions.add(
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
messageValue.getBytes(StandardCharsets.UTF_8).length));
}
if (testHeaders) {
assertions.add(
equalTo(
AttributeKey.stringArrayKey("messaging.header.test_message_header"),
Collections.singletonList("test")));
}
return assertions;
}
}

View File

@ -8,14 +8,12 @@ package io.opentelemetry.instrumentation.kafkaclients.v2_6;
import static java.util.logging.Level.WARNING;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.context.propagation.TextMapGetter;
import io.opentelemetry.context.propagation.TextMapPropagator;
import io.opentelemetry.context.propagation.TextMapSetter;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.KafkaConsumerRecordGetter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaHeadersSetter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetryMetricsReporter;
import io.opentelemetry.instrumentation.kafka.internal.OpenTelemetrySupplier;
@ -43,20 +41,18 @@ import org.apache.kafka.common.metrics.MetricsReporter;
public final class KafkaTelemetry {
private static final Logger logger = Logger.getLogger(KafkaTelemetry.class.getName());
private static final TextMapGetter<ConsumerRecord<?, ?>> GETTER =
KafkaConsumerRecordGetter.INSTANCE;
private static final TextMapSetter<Headers> SETTER = KafkaHeadersSetter.INSTANCE;
private final OpenTelemetry openTelemetry;
private final Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter;
private final Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter;
private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
consumerProcessInstrumenter;
private final boolean producerPropagationEnabled;
KafkaTelemetry(
OpenTelemetry openTelemetry,
Instrumenter<ProducerRecord<?, ?>, RecordMetadata> producerInstrumenter,
Instrumenter<ConsumerRecord<?, ?>, Void> consumerProcessInstrumenter,
Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> consumerProcessInstrumenter,
boolean producerPropagationEnabled) {
this.openTelemetry = openTelemetry;
this.producerInstrumenter = producerInstrumenter;
@ -126,7 +122,7 @@ public final class KafkaTelemetry {
// ConsumerRecords<K, V> poll(long timeout)
// ConsumerRecords<K, V> poll(Duration duration)
if ("poll".equals(method.getName()) && result instanceof ConsumerRecords) {
buildAndFinishSpan((ConsumerRecords) result);
buildAndFinishSpan(consumer, (ConsumerRecords) result);
}
return result;
});
@ -220,18 +216,16 @@ public final class KafkaTelemetry {
}
}
<K, V> void buildAndFinishSpan(ConsumerRecords<K, V> records) {
Context currentContext = Context.current();
<K, V> void buildAndFinishSpan(Consumer<K, V> consumer, ConsumerRecords<K, V> records) {
Context parentContext = Context.current();
for (ConsumerRecord<K, V> record : records) {
Context linkedContext = propagator().extract(currentContext, record, GETTER);
Context newContext = currentContext.with(Span.fromContext(linkedContext));
if (!consumerProcessInstrumenter.shouldStart(newContext, record)) {
ConsumerAndRecord<ConsumerRecord<?, ?>> request = ConsumerAndRecord.create(consumer, record);
if (!consumerProcessInstrumenter.shouldStart(parentContext, request)) {
continue;
}
Context current = consumerProcessInstrumenter.start(newContext, record);
consumerProcessInstrumenter.end(current, record, null, null);
Context context = consumerProcessInstrumenter.start(parentContext, request);
consumerProcessInstrumenter.end(context, request, null, null);
}
}

View File

@ -11,6 +11,7 @@ import com.google.errorprone.annotations.CanIgnoreReturnValue;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.instrumentation.api.instrumenter.messaging.MessageOperation;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import java.util.ArrayList;
import java.util.List;
@ -25,8 +26,8 @@ public final class KafkaTelemetryBuilder {
private final OpenTelemetry openTelemetry;
private final List<AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata>>
producerAttributesExtractors = new ArrayList<>();
private final List<AttributesExtractor<ConsumerRecord<?, ?>, Void>> consumerAttributesExtractors =
new ArrayList<>();
private final List<AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>>
consumerAttributesExtractors = new ArrayList<>();
private List<String> capturedHeaders = emptyList();
private boolean captureExperimentalSpanAttributes = false;
private boolean propagationEnabled = true;
@ -44,7 +45,7 @@ public final class KafkaTelemetryBuilder {
@CanIgnoreReturnValue
public KafkaTelemetryBuilder addConsumerAttributesExtractors(
AttributesExtractor<ConsumerRecord<?, ?>, Void> extractor) {
AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> extractor) {
consumerAttributesExtractors.add(extractor);
return this;
}

View File

@ -23,7 +23,7 @@ public class TracingConsumerInterceptor<K, V> implements ConsumerInterceptor<K,
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
telemetry.buildAndFinishSpan(records);
telemetry.buildAndFinishSpan(null, records);
return records;
}

View File

@ -0,0 +1,73 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import com.google.auto.value.AutoValue;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
@AutoValue
public abstract class ConsumerAndRecord<R> {
public static <R> ConsumerAndRecord<R> create(@Nullable Consumer<?, ?> consumer, R record) {
return new AutoValue_ConsumerAndRecord<>(consumer, record);
}
@Nullable
public abstract Consumer<?, ?> consumer();
public abstract R record();
private static final MethodHandle GET_GROUP_METADATA;
private static final MethodHandle GET_GROUP_ID;
static {
MethodHandle getGroupMetadata;
MethodHandle getGroupId;
try {
Class<?> consumerGroupMetadata =
Class.forName("org.apache.kafka.clients.consumer.ConsumerGroupMetadata");
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getGroupMetadata =
lookup.findVirtual(
Consumer.class, "groupMetadata", MethodType.methodType(consumerGroupMetadata));
getGroupId =
lookup.findVirtual(consumerGroupMetadata, "groupId", MethodType.methodType(String.class));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException ignored) {
getGroupMetadata = null;
getGroupId = null;
}
GET_GROUP_METADATA = getGroupMetadata;
GET_GROUP_ID = getGroupId;
}
@Nullable
String consumerGroup() {
if (GET_GROUP_METADATA == null || GET_GROUP_ID == null) {
return null;
}
Consumer<?, ?> consumer = consumer();
if (consumer == null) {
return null;
}
try {
Object metadata = GET_GROUP_METADATA.invoke(consumer);
return (String) GET_GROUP_ID.invoke(metadata);
} catch (Throwable e) {
return null;
}
}
}

View File

@ -1,98 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.instrumentation.api.instrumenter.messaging.MessagingAttributesGetter;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
enum KafkaBatchProcessAttributesGetter
implements MessagingAttributesGetter<ConsumerRecords<?, ?>, Void> {
INSTANCE;
@Override
public String getSystem(ConsumerRecords<?, ?> records) {
return "kafka";
}
@Override
public String getDestinationKind(ConsumerRecords<?, ?> records) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
}
@Nullable
@Override
public String getDestination(ConsumerRecords<?, ?> records) {
Set<String> topics =
records.partitions().stream().map(TopicPartition::topic).collect(Collectors.toSet());
// only return topic when there's exactly one in the batch
return topics.size() == 1 ? topics.iterator().next() : null;
}
@Override
public boolean isTemporaryDestination(ConsumerRecords<?, ?> records) {
return false;
}
@Nullable
@Override
public String getProtocol(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getProtocolVersion(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getUrl(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getConversationId(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public Long getMessagePayloadSize(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public Long getMessagePayloadCompressedSize(ConsumerRecords<?, ?> records) {
return null;
}
@Nullable
@Override
public String getMessageId(ConsumerRecords<?, ?> records, @Nullable Void unused) {
return null;
}
@Override
public List<String> getMessageHeader(ConsumerRecords<?, ?> records, String name) {
return StreamSupport.stream(records.spliterator(), false)
.flatMap(
consumerRecord ->
StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false))
.map(header -> new String(header.value(), StandardCharsets.UTF_8))
.collect(Collectors.toList());
}
}

View File

@ -14,9 +14,10 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
final class KafkaBatchProcessSpanLinksExtractor
implements SpanLinksExtractor<ConsumerRecords<?, ?>> {
implements SpanLinksExtractor<ConsumerAndRecord<ConsumerRecords<?, ?>>> {
private final SpanLinksExtractor<ConsumerRecord<?, ?>> singleRecordLinkExtractor;
private final SpanLinksExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>>
singleRecordLinkExtractor;
KafkaBatchProcessSpanLinksExtractor(TextMapPropagator propagator) {
this.singleRecordLinkExtractor =
@ -25,12 +26,17 @@ final class KafkaBatchProcessSpanLinksExtractor
@Override
public void extract(
SpanLinksBuilder spanLinks, Context parentContext, ConsumerRecords<?, ?> records) {
SpanLinksBuilder spanLinks,
Context parentContext,
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
for (ConsumerRecord<?, ?> record : records) {
for (ConsumerRecord<?, ?> record : consumerAndRecords.record()) {
// explicitly passing root to avoid situation where context propagation is turned off and the
// parent (CONSUMER receive) span is linked
singleRecordLinkExtractor.extract(spanLinks, Context.root(), record);
singleRecordLinkExtractor.extract(
spanLinks,
Context.root(),
ConsumerAndRecord.create(consumerAndRecords.consumer(), record));
}
}
}

View File

@ -1,47 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public final class KafkaConsumerAdditionalAttributesExtractor
implements AttributesExtractor<ConsumerRecord<?, ?>, Void> {
// TODO: remove this constant when this attribute appears in SemanticAttributes
private static final AttributeKey<Long> MESSAGING_KAFKA_MESSAGE_OFFSET =
longKey("messaging.kafka.message.offset");
@Override
public void onStart(
AttributesBuilder attributes, Context parentContext, ConsumerRecord<?, ?> consumerRecord) {
attributes.put(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, (long) consumerRecord.partition());
attributes.put(MESSAGING_KAFKA_MESSAGE_OFFSET, consumerRecord.offset());
if (consumerRecord.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
}
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerRecord<?, ?> consumerRecord,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -0,0 +1,57 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.ByteBuffer;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
final class KafkaConsumerAttributesExtractor
implements AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
@Override
public void onStart(
AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
attributes.put(SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION, (long) record.partition());
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET, record.offset());
Object key = record.key();
if (key != null && canSerialize(key.getClass())) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, key.toString());
}
if (record.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
}
String consumerGroup = consumerAndRecord.consumerGroup();
if (consumerGroup != null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, consumerGroup);
}
}
private static boolean canSerialize(Class<?> keyClass) {
// we make a simple assumption here that we can serialize keys by simply calling toString()
// and that does not work for byte[] or ByteBuffer
return !(keyClass.isArray() || keyClass == ByteBuffer.class);
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -14,78 +14,78 @@ import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public enum KafkaConsumerAttributesGetter
implements MessagingAttributesGetter<ConsumerRecord<?, ?>, Void> {
enum KafkaConsumerAttributesGetter
implements MessagingAttributesGetter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
INSTANCE;
@Override
public String getSystem(ConsumerRecord<?, ?> consumerRecord) {
public String getSystem(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return "kafka";
}
@Override
public String getDestinationKind(ConsumerRecord<?, ?> consumerRecord) {
public String getDestinationKind(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
}
@Override
public String getDestination(ConsumerRecord<?, ?> consumerRecord) {
return consumerRecord.topic();
public String getDestination(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return consumerAndRecord.record().topic();
}
@Override
public boolean isTemporaryDestination(ConsumerRecord<?, ?> consumerRecord) {
public boolean isTemporaryDestination(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return false;
}
@Override
@Nullable
public String getProtocol(ConsumerRecord<?, ?> consumerRecord) {
public String getProtocol(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null;
}
@Override
@Nullable
public String getProtocolVersion(ConsumerRecord<?, ?> consumerRecord) {
public String getProtocolVersion(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null;
}
@Override
@Nullable
public String getUrl(ConsumerRecord<?, ?> consumerRecord) {
public String getUrl(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null;
}
@Override
@Nullable
public String getConversationId(ConsumerRecord<?, ?> consumerRecord) {
public String getConversationId(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null;
}
@Override
public Long getMessagePayloadSize(ConsumerRecord<?, ?> consumerRecord) {
return (long) consumerRecord.serializedValueSize();
public Long getMessagePayloadSize(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return (long) consumerAndRecord.record().serializedValueSize();
}
@Override
@Nullable
public Long getMessagePayloadCompressedSize(ConsumerRecord<?, ?> consumerRecord) {
public Long getMessagePayloadCompressedSize(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
return null;
}
@Override
@Nullable
public String getMessageId(ConsumerRecord<?, ?> consumerRecord, @Nullable Void unused) {
public String getMessageId(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, @Nullable Void unused) {
return null;
}
@Override
public List<String> getMessageHeader(ConsumerRecord<?, ?> consumerRecord, String name) {
return StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false)
public List<String> getMessageHeader(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, String name) {
return StreamSupport.stream(
consumerAndRecord.record().headers().headers(name).spliterator(), false)
.map(header -> new String(header.value(), StandardCharsets.UTF_8))
.collect(Collectors.toList());
}

View File

@ -15,23 +15,21 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.record.TimestampType;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public final class KafkaConsumerExperimentalAttributesExtractor
implements AttributesExtractor<ConsumerRecord<?, ?>, Void> {
final class KafkaConsumerExperimentalAttributesExtractor
implements AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> {
private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS =
longKey("kafka.record.queue_time_ms");
@Override
public void onStart(
AttributesBuilder attributes, Context parentContext, ConsumerRecord<?, ?> consumerRecord) {
AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
// don't record a duration if the message was sent from an old Kafka client
if (consumerRecord.timestampType() != TimestampType.NO_TIMESTAMP_TYPE) {
long produceTime = consumerRecord.timestamp();
if (consumerAndRecord.record().timestampType() != TimestampType.NO_TIMESTAMP_TYPE) {
long produceTime = consumerAndRecord.record().timestamp();
// this attribute shows how much time elapsed between the producer and the consumer of this
// message, which can be helpful for identifying queue bottlenecks
attributes.put(
@ -43,7 +41,7 @@ public final class KafkaConsumerExperimentalAttributesExtractor
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerRecord<?, ?> consumerRecord,
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -13,24 +13,20 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.header.Header;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public enum KafkaConsumerRecordGetter implements TextMapGetter<ConsumerRecord<?, ?>> {
enum KafkaConsumerRecordGetter implements TextMapGetter<ConsumerAndRecord<ConsumerRecord<?, ?>>> {
INSTANCE;
@Override
public Iterable<String> keys(ConsumerRecord<?, ?> carrier) {
return StreamSupport.stream(carrier.headers().spliterator(), false)
public Iterable<String> keys(ConsumerAndRecord<ConsumerRecord<?, ?>> carrier) {
return StreamSupport.stream(carrier.record().headers().spliterator(), false)
.map(Header::key)
.collect(Collectors.toList());
}
@Nullable
@Override
public String get(@Nullable ConsumerRecord<?, ?> carrier, String key) {
Header header = carrier.headers().lastHeader(key);
public String get(@Nullable ConsumerAndRecord<ConsumerRecord<?, ?>> carrier, String key) {
Header header = carrier.record().headers().lastHeader(key);
if (header == null) {
return null;
}

View File

@ -1,41 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.context.propagation.TextMapGetter;
import java.nio.charset.StandardCharsets;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public final class KafkaHeadersGetter implements TextMapGetter<Headers> {
@Override
public Iterable<String> keys(Headers carrier) {
return StreamSupport.stream(carrier.spliterator(), false)
.map(Header::key)
.collect(Collectors.toList());
}
@Nullable
@Override
public String get(@Nullable Headers carrier, String key) {
Header header = carrier.lastHeader(key);
if (header == null) {
return null;
}
byte[] value = header.value();
if (value == null) {
return null;
}
return new String(value, StandardCharsets.UTF_8);
}
}

View File

@ -98,44 +98,48 @@ public final class KafkaInstrumenterFactory {
.addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractors(extractors)
.addAttributesExtractor(new KafkaProducerAdditionalAttributesExtractor())
.addAttributesExtractor(new KafkaProducerAttributesExtractor())
.setErrorCauseExtractor(errorCauseExtractor)
.buildInstrumenter(SpanKindExtractor.alwaysProducer());
}
public Instrumenter<ConsumerRecords<?, ?>, Void> createConsumerReceiveInstrumenter() {
public Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
createConsumerReceiveInstrumenter() {
KafkaReceiveAttributesGetter getter = KafkaReceiveAttributesGetter.INSTANCE;
MessageOperation operation = MessageOperation.RECEIVE;
return Instrumenter.<ConsumerRecords<?, ?>, Void>builder(
return Instrumenter.<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>builder(
openTelemetry,
instrumentationName,
MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(KafkaReceiveAttributesExtractor.INSTANCE)
.setErrorCauseExtractor(errorCauseExtractor)
.setEnabled(messagingReceiveInstrumentationEnabled)
.buildInstrumenter(SpanKindExtractor.alwaysConsumer());
}
public Instrumenter<ConsumerRecord<?, ?>, Void> createConsumerProcessInstrumenter() {
public Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
createConsumerProcessInstrumenter() {
return createConsumerOperationInstrumenter(MessageOperation.PROCESS, Collections.emptyList());
}
public Instrumenter<ConsumerRecord<?, ?>, Void> createConsumerOperationInstrumenter(
MessageOperation operation,
Iterable<AttributesExtractor<ConsumerRecord<?, ?>, Void>> extractors) {
public Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
createConsumerOperationInstrumenter(
MessageOperation operation,
Iterable<AttributesExtractor<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>> extractors) {
KafkaConsumerAttributesGetter getter = KafkaConsumerAttributesGetter.INSTANCE;
InstrumenterBuilder<ConsumerRecord<?, ?>, Void> builder =
Instrumenter.<ConsumerRecord<?, ?>, Void>builder(
InstrumenterBuilder<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> builder =
Instrumenter.<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>builder(
openTelemetry,
instrumentationName,
MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(new KafkaConsumerAdditionalAttributesExtractor())
.addAttributesExtractor(new KafkaConsumerAttributesExtractor())
.addAttributesExtractors(extractors)
.setErrorCauseExtractor(errorCauseExtractor);
if (captureExperimentalSpanAttributes) {
@ -144,7 +148,7 @@ public final class KafkaInstrumenterFactory {
if (messagingReceiveInstrumentationEnabled) {
builder.addSpanLinksExtractor(
new PropagatorBasedSpanLinksExtractor<ConsumerRecord<?, ?>>(
new PropagatorBasedSpanLinksExtractor<>(
openTelemetry.getPropagators().getTextMapPropagator(),
KafkaConsumerRecordGetter.INSTANCE));
return builder.buildInstrumenter(SpanKindExtractor.alwaysConsumer());
@ -153,16 +157,18 @@ public final class KafkaInstrumenterFactory {
}
}
public Instrumenter<ConsumerRecords<?, ?>, Void> createBatchProcessInstrumenter() {
KafkaBatchProcessAttributesGetter getter = KafkaBatchProcessAttributesGetter.INSTANCE;
public Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
createBatchProcessInstrumenter() {
KafkaReceiveAttributesGetter getter = KafkaReceiveAttributesGetter.INSTANCE;
MessageOperation operation = MessageOperation.PROCESS;
return Instrumenter.<ConsumerRecords<?, ?>, Void>builder(
return Instrumenter.<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>builder(
openTelemetry,
instrumentationName,
MessagingSpanNameExtractor.create(getter, operation))
.addAttributesExtractor(
buildMessagingAttributesExtractor(getter, operation, capturedHeaders))
.addAttributesExtractor(KafkaReceiveAttributesExtractor.INSTANCE)
.addSpanLinksExtractor(
new KafkaBatchProcessSpanLinksExtractor(
openTelemetry.getPropagators().getTextMapPropagator()))

View File

@ -5,36 +5,37 @@
package io.opentelemetry.instrumentation.kafka.internal;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import java.nio.ByteBuffer;
import javax.annotation.Nullable;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
final class KafkaProducerAdditionalAttributesExtractor
final class KafkaProducerAttributesExtractor
implements AttributesExtractor<ProducerRecord<?, ?>, RecordMetadata> {
// TODO: remove this constant when this attribute appears in SemanticAttributes
private static final AttributeKey<Long> MESSAGING_KAFKA_MESSAGE_OFFSET =
longKey("messaging.kafka.message.offset");
@Override
public void onStart(
AttributesBuilder attributes, Context parentContext, ProducerRecord<?, ?> producerRecord) {
if (producerRecord.value() == null) {
AttributesBuilder attributes, Context parentContext, ProducerRecord<?, ?> record) {
Object key = record.key();
if (key != null && canSerialize(key.getClass())) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, key.toString());
}
if (record.value() == null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_TOMBSTONE, true);
}
}
private static boolean canSerialize(Class<?> keyClass) {
// we make a simple assumption here that we can serialize keys by simply calling toString()
// and that does not work for byte[] or ByteBuffer
return !(keyClass.isArray() || keyClass == ByteBuffer.class);
}
@Override
public void onEnd(
AttributesBuilder attributes,
@ -46,7 +47,7 @@ final class KafkaProducerAdditionalAttributesExtractor
if (recordMetadata != null) {
attributes.put(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION, recordMetadata.partition());
attributes.put(MESSAGING_KAFKA_MESSAGE_OFFSET, recordMetadata.offset());
attributes.put(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET, recordMetadata.offset());
}
}
}

View File

@ -0,0 +1,38 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.instrumentation.kafka.internal;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords;
enum KafkaReceiveAttributesExtractor
implements AttributesExtractor<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> {
INSTANCE;
@Override
public void onStart(
AttributesBuilder attributes,
Context parentContext,
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
String consumerGroup = consumerAndRecords.consumerGroup();
if (consumerGroup != null) {
attributes.put(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, consumerGroup);
}
}
@Override
public void onEnd(
AttributesBuilder attributes,
Context context,
ConsumerAndRecord<ConsumerRecords<?, ?>> request,
@Nullable Void unused,
@Nullable Throwable error) {}
}

View File

@ -16,29 +16,25 @@ import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
/**
* This class is internal and is hence not for public use. Its APIs are unstable and can change at
* any time.
*/
public enum KafkaReceiveAttributesGetter
implements MessagingAttributesGetter<ConsumerRecords<?, ?>, Void> {
enum KafkaReceiveAttributesGetter
implements MessagingAttributesGetter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> {
INSTANCE;
@Override
public String getSystem(ConsumerRecords<?, ?> consumerRecords) {
public String getSystem(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return "kafka";
}
@Override
public String getDestinationKind(ConsumerRecords<?, ?> consumerRecords) {
public String getDestinationKind(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return SemanticAttributes.MessagingDestinationKindValues.TOPIC;
}
@Override
@Nullable
public String getDestination(ConsumerRecords<?, ?> consumerRecords) {
public String getDestination(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
Set<String> topics =
consumerRecords.partitions().stream()
consumerAndRecords.record().partitions().stream()
.map(TopicPartition::topic)
.collect(Collectors.toSet());
// only return topic when there's exactly one in the batch
@ -46,55 +42,59 @@ public enum KafkaReceiveAttributesGetter
}
@Override
public boolean isTemporaryDestination(ConsumerRecords<?, ?> consumerRecords) {
public boolean isTemporaryDestination(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return false;
}
@Override
@Nullable
public String getProtocol(ConsumerRecords<?, ?> consumerRecords) {
public String getProtocol(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public String getProtocolVersion(ConsumerRecords<?, ?> consumerRecords) {
public String getProtocolVersion(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public String getUrl(ConsumerRecords<?, ?> consumerRecords) {
public String getUrl(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public String getConversationId(ConsumerRecords<?, ?> consumerRecords) {
public String getConversationId(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public Long getMessagePayloadSize(ConsumerRecords<?, ?> consumerRecords) {
public Long getMessagePayloadSize(ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public Long getMessagePayloadCompressedSize(ConsumerRecords<?, ?> consumerRecords) {
public Long getMessagePayloadCompressedSize(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords) {
return null;
}
@Override
@Nullable
public String getMessageId(ConsumerRecords<?, ?> consumerRecords, @Nullable Void unused) {
public String getMessageId(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords, @Nullable Void unused) {
return null;
}
@Override
public List<String> getMessageHeader(ConsumerRecords<?, ?> records, String name) {
return StreamSupport.stream(records.spliterator(), false)
public List<String> getMessageHeader(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecords, String name) {
return StreamSupport.stream(consumerAndRecords.record().spliterator(), false)
.flatMap(
consumerRecord ->
StreamSupport.stream(consumerRecord.headers().headers(name).spliterator(), false))

View File

@ -27,6 +27,8 @@ tasks {
withType<Test>().configureEach {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", findProperty("testLatestDeps") as Boolean)
// TODO run tests both with and without experimental span attributes
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true")
}

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.kafkastreams;
import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig;
import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig;
@ -16,7 +17,7 @@ public final class KafkaStreamsSingletons {
private static final String INSTRUMENTATION_NAME = "io.opentelemetry.kafka-streams-0.11";
private static final Instrumenter<ConsumerRecord<?, ?>, Void> INSTRUMENTER =
private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> INSTRUMENTER =
new KafkaInstrumenterFactory(GlobalOpenTelemetry.get(), INSTRUMENTATION_NAME)
.setCapturedHeaders(ExperimentalConfig.get().getMessagingHeaders())
.setCaptureExperimentalSpanAttributes(
@ -26,7 +27,7 @@ public final class KafkaStreamsSingletons {
ExperimentalConfig.get().messagingReceiveInstrumentationEnabled())
.createConsumerProcessInstrumenter();
public static Instrumenter<ConsumerRecord<?, ?>, Void> instrumenter() {
public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> instrumenter() {
return INSTRUMENTER;
}

View File

@ -15,6 +15,7 @@ import static net.bytebuddy.matcher.ElementMatchers.returns;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import net.bytebuddy.asm.Advice;
@ -61,11 +62,13 @@ public class PartitionGroupInstrumentation implements TypeInstrumentation {
// use the receive CONSUMER span as parent if it's available
Context parentContext = receiveContext != null ? receiveContext : currentContext();
ConsumerAndRecord<ConsumerRecord<?, ?>> request =
ConsumerAndRecord.create(null, record.value);
if (!instrumenter().shouldStart(parentContext, record.value)) {
if (!instrumenter().shouldStart(parentContext, request)) {
return;
}
Context context = instrumenter().start(parentContext, record.value);
Context context = instrumenter().start(parentContext, request);
holder.set(record.value, context, context.makeCurrent());
}
}

View File

@ -11,6 +11,7 @@ import static net.bytebuddy.matcher.ElementMatchers.isPublic;
import static net.bytebuddy.matcher.ElementMatchers.named;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import net.bytebuddy.asm.Advice;
@ -51,7 +52,8 @@ public class StreamTaskInstrumentation implements TypeInstrumentation {
Context context = holder.getContext();
if (context != null) {
holder.closeScope();
instrumenter().end(context, holder.getRecord(), null, throwable);
instrumenter()
.end(context, ConsumerAndRecord.create(null, holder.getRecord()), null, throwable);
}
}
}

View File

@ -40,7 +40,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
} catch (ClassNotFoundException | NoClassDefFoundError e) {
builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance()
}
KStream<String, String> textLines = builder.stream(STREAM_PENDING)
KStream<Integer, String> textLines = builder.stream(STREAM_PENDING)
def values = textLines
.mapValues(new ValueMapper<String, String>() {
@Override
@ -53,11 +53,11 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
KafkaStreams streams
try {
// Different api for test and latestDepTest.
values.to(Serdes.String(), Serdes.String(), STREAM_PROCESSED)
values.to(Serdes.Integer(), Serdes.String(), STREAM_PROCESSED)
streams = new KafkaStreams(builder, config)
} catch (MissingMethodException e) {
def producer = Class.forName("org.apache.kafka.streams.kstream.Produced")
.with(Serdes.String(), Serdes.String())
.with(Serdes.Integer(), Serdes.String())
values.to(STREAM_PROCESSED, producer)
streams = new KafkaStreams(builder.build(), config)
}
@ -65,7 +65,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
when:
String greeting = "TESTING TESTING 123!"
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, greeting))
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, 10, greeting))
then:
awaitUntilConsumerIsReady()
@ -74,8 +74,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
for (record in records) {
Span.current().setAttribute("testing", 123)
assert record.key() == 10
assert record.value() == greeting.toLowerCase()
assert record.key() == null
if (receivedHeaders == null) {
receivedHeaders = record.headers()
@ -101,7 +101,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
}
}
@ -118,6 +119,9 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_OPERATION" "receive"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test-application"
}
}
}
// kafka-stream CONSUMER
@ -133,7 +137,8 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
"kafka.record.queue_time_ms" { it >= 0 }
"asdf" "testing"
}
@ -148,7 +153,7 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
}
}
@ -165,6 +170,9 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_OPERATION" "receive"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
}
}
// kafka-clients CONSUMER process
@ -180,7 +188,11 @@ class KafkaStreamsDefaultTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
"kafka.record.queue_time_ms" { it >= 0 }
"testing" 123
}

View File

@ -40,7 +40,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
} catch (ClassNotFoundException | NoClassDefFoundError e) {
builder = Class.forName("org.apache.kafka.streams.StreamsBuilder").newInstance()
}
KStream<String, String> textLines = builder.stream(STREAM_PENDING)
KStream<Integer, String> textLines = builder.stream(STREAM_PENDING)
def values = textLines
.mapValues(new ValueMapper<String, String>() {
@Override
@ -53,11 +53,11 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
KafkaStreams streams
try {
// Different api for test and latestDepTest.
values.to(Serdes.String(), Serdes.String(), STREAM_PROCESSED)
values.to(Serdes.Integer(), Serdes.String(), STREAM_PROCESSED)
streams = new KafkaStreams(builder, config)
} catch (MissingMethodException e) {
def producer = Class.forName("org.apache.kafka.streams.kstream.Produced")
.with(Serdes.String(), Serdes.String())
.with(Serdes.Integer(), Serdes.String())
values.to(STREAM_PROCESSED, producer)
streams = new KafkaStreams(builder.build(), config)
}
@ -65,7 +65,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
when:
String greeting = "TESTING TESTING 123!"
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, greeting))
KafkaStreamsBaseTest.producer.send(new ProducerRecord<>(STREAM_PENDING, 10, greeting))
then:
// check that the message was received
@ -74,8 +74,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
for (record in records) {
Span.current().setAttribute("testing", 123)
assert record.key() == 10
assert record.value() == greeting.toLowerCase()
assert record.key() == null
if (receivedHeaders == null) {
receivedHeaders = record.headers()
@ -96,7 +96,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PENDING
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
}
}
// kafka-stream CONSUMER
@ -111,7 +112,8 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
"kafka.record.queue_time_ms" { it >= 0 }
"asdf" "testing"
}
@ -129,7 +131,7 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_DESTINATION_NAME" STREAM_PROCESSED
"$SemanticAttributes.MESSAGING_DESTINATION_KIND" "topic"
"$SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
}
}
// kafka-clients CONSUMER process
@ -144,7 +146,11 @@ class KafkaStreamsSuppressReceiveSpansTest extends KafkaStreamsBaseTest {
"$SemanticAttributes.MESSAGING_OPERATION" "process"
"$SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES" Long
"$SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION" { it >= 0 }
"messaging.kafka.message.offset" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET" 0
"$SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY" "10"
if (Boolean.getBoolean("testLatestDeps")) {
"$SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP" "test"
}
"kafka.record.queue_time_ms" { it >= 0 }
"testing" 123
}

View File

@ -5,7 +5,6 @@
package io.opentelemetry.instrumentation.spring.autoconfigure.kafka;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
@ -47,7 +46,7 @@ class KafkaIntegrationTest {
@BeforeAll
static void setUpKafka() {
kafka =
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:5.4.3"))
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.1.9"))
.waitingFor(Wait.forLogMessage(".*started \\(kafka.server.KafkaServer\\).*", 1))
.withStartupTimeout(Duration.ofMinutes(1));
kafka.start();
@ -112,8 +111,9 @@ class KafkaIntegrationTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span ->
span.hasName("testTopic process")
.hasKind(SpanKind.CONSUMER)
@ -130,8 +130,11 @@ class KafkaIntegrationTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "testListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
}

View File

@ -73,8 +73,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1));
},
@ -89,7 +90,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
@ -108,8 +112,12 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener"),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
@ -147,8 +155,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1));
},
@ -163,7 +172,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
@ -184,8 +196,12 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener"),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
@ -205,7 +221,7 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"),
span ->
span.hasName("testBatchTopic send")
@ -219,8 +235,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span ->
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
@ -233,8 +250,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "20")));
producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2));
@ -250,7 +268,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span ->
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
@ -263,7 +284,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
}
@ -298,8 +322,9 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1));
},
@ -314,7 +339,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span ->
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
@ -327,7 +355,10 @@ class SpringKafkaTest extends AbstractSpringKafkaTest {
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
}
}

View File

@ -9,6 +9,7 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -18,14 +19,15 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
private static final VirtualField<ConsumerRecords<?, ?>, Context> receiveContextField =
VirtualField.find(ConsumerRecords.class, Context.class);
private static final VirtualField<ConsumerRecords<?, ?>, State<ConsumerRecords<?, ?>>>
stateField = VirtualField.find(ConsumerRecords.class, State.class);
private static final VirtualField<ConsumerRecords<?, ?>, State> stateField =
VirtualField.find(ConsumerRecords.class, State.class);
private final Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter;
private final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter;
@Nullable private final BatchInterceptor<K, V> decorated;
InstrumentedBatchInterceptor(
Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter,
Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> batchProcessInstrumenter,
@Nullable BatchInterceptor<K, V> decorated) {
this.batchProcessInstrumenter = batchProcessInstrumenter;
this.decorated = decorated;
@ -35,16 +37,17 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
public ConsumerRecords<K, V> intercept(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
Context parentContext = getParentContext(records);
if (batchProcessInstrumenter.shouldStart(parentContext, records)) {
Context context = batchProcessInstrumenter.start(parentContext, records);
ConsumerAndRecord<ConsumerRecords<?, ?>> request = ConsumerAndRecord.create(consumer, records);
if (batchProcessInstrumenter.shouldStart(parentContext, request)) {
Context context = batchProcessInstrumenter.start(parentContext, request);
Scope scope = context.makeCurrent();
stateField.set(records, State.create(records, context, scope));
stateField.set(records, State.create(context, scope));
}
return decorated == null ? records : decorated.intercept(records, consumer);
}
private Context getParentContext(ConsumerRecords<K, V> records) {
private static Context getParentContext(ConsumerRecords<?, ?> records) {
Context receiveContext = receiveContextField.get(records);
// use the receive CONSUMER span as parent if it's available
@ -53,7 +56,7 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
@Override
public void success(ConsumerRecords<K, V> records, Consumer<K, V> consumer) {
end(records, null);
end(ConsumerAndRecord.create(consumer, records), null);
if (decorated != null) {
decorated.success(records, consumer);
}
@ -61,18 +64,20 @@ final class InstrumentedBatchInterceptor<K, V> implements BatchInterceptor<K, V>
@Override
public void failure(ConsumerRecords<K, V> records, Exception exception, Consumer<K, V> consumer) {
end(records, exception);
end(ConsumerAndRecord.create(consumer, records), exception);
if (decorated != null) {
decorated.failure(records, exception, consumer);
}
}
private void end(ConsumerRecords<K, V> records, @Nullable Throwable error) {
State<ConsumerRecords<?, ?>> state = stateField.get(records);
private void end(
ConsumerAndRecord<ConsumerRecords<?, ?>> consumerAndRecord, @Nullable Throwable error) {
ConsumerRecords<?, ?> records = consumerAndRecord.record();
State state = stateField.get(records);
stateField.set(records, null);
if (state != null) {
state.scope().close();
batchProcessInstrumenter.end(state.context(), state.request(), null, error);
batchProcessInstrumenter.end(state.context(), consumerAndRecord, null, error);
}
}
}

View File

@ -9,6 +9,7 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.tooling.muzzle.NoMuzzle;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
@ -19,14 +20,14 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
private static final VirtualField<ConsumerRecord<?, ?>, Context> receiveContextField =
VirtualField.find(ConsumerRecord.class, Context.class);
private static final VirtualField<ConsumerRecord<?, ?>, State<ConsumerRecord<?, ?>>> stateField =
private static final VirtualField<ConsumerRecord<?, ?>, State> stateField =
VirtualField.find(ConsumerRecord.class, State.class);
private final Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter;
private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter;
@Nullable private final RecordInterceptor<K, V> decorated;
InstrumentedRecordInterceptor(
Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter,
Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter,
@Nullable RecordInterceptor<K, V> decorated) {
this.processInstrumenter = processInstrumenter;
this.decorated = decorated;
@ -37,27 +38,28 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
"deprecation") // implementing deprecated method (removed in 3.0) for better compatibility
@Override
public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record) {
start(record);
start(ConsumerAndRecord.create(null, record));
return decorated == null ? record : decorated.intercept(record);
}
@Override
public ConsumerRecord<K, V> intercept(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
start(record);
start(ConsumerAndRecord.create(consumer, record));
return decorated == null ? record : decorated.intercept(record, consumer);
}
private void start(ConsumerRecord<K, V> record) {
private void start(ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
Context parentContext = getParentContext(record);
if (processInstrumenter.shouldStart(parentContext, record)) {
Context context = processInstrumenter.start(parentContext, record);
if (processInstrumenter.shouldStart(parentContext, consumerAndRecord)) {
Context context = processInstrumenter.start(parentContext, consumerAndRecord);
Scope scope = context.makeCurrent();
stateField.set(record, State.create(record, context, scope));
stateField.set(record, State.create(context, scope));
}
}
private Context getParentContext(ConsumerRecord<K, V> records) {
private static Context getParentContext(ConsumerRecord<?, ?> records) {
Context receiveContext = receiveContextField.get(records);
// use the receive CONSUMER span as parent if it's available
@ -66,7 +68,7 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
@Override
public void success(ConsumerRecord<K, V> record, Consumer<K, V> consumer) {
end(record, null);
end(ConsumerAndRecord.create(consumer, record), null);
if (decorated != null) {
decorated.success(record, consumer);
}
@ -74,18 +76,20 @@ final class InstrumentedRecordInterceptor<K, V> implements RecordInterceptor<K,
@Override
public void failure(ConsumerRecord<K, V> record, Exception exception, Consumer<K, V> consumer) {
end(record, exception);
end(ConsumerAndRecord.create(consumer, record), exception);
if (decorated != null) {
decorated.failure(record, exception, consumer);
}
}
private void end(ConsumerRecord<K, V> record, @Nullable Throwable error) {
State<ConsumerRecord<?, ?>> state = stateField.get(record);
private void end(
ConsumerAndRecord<ConsumerRecord<?, ?>> consumerAndRecord, @Nullable Throwable error) {
ConsumerRecord<?, ?> record = consumerAndRecord.record();
State state = stateField.get(record);
stateField.set(record, null);
if (state != null) {
state.scope().close();
processInstrumenter.end(state.context(), state.request(), null, error);
processInstrumenter.end(state.context(), consumerAndRecord, null, error);
}
}
}

View File

@ -8,6 +8,7 @@ package io.opentelemetry.instrumentation.spring.kafka.v2_7;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
@ -30,12 +31,13 @@ public final class SpringKafkaTelemetry {
return new SpringKafkaTelemetryBuilder(openTelemetry);
}
private final Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter;
private final Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter;
private final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter;
private final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter;
SpringKafkaTelemetry(
Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter,
Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter) {
Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter,
Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void> batchProcessInstrumenter) {
this.processInstrumenter = processInstrumenter;
this.batchProcessInstrumenter = batchProcessInstrumenter;
}

View File

@ -10,14 +10,12 @@ import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
@AutoValue
abstract class State<REQUEST> {
abstract class State {
static <REQUEST> State<REQUEST> create(REQUEST request, Context context, Scope scope) {
return new AutoValue_State<>(request, context, scope);
static State create(Context context, Scope scope) {
return new AutoValue_State(context, scope);
}
abstract REQUEST request();
abstract Context context();
abstract Scope scope();

View File

@ -9,7 +9,6 @@ import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.or
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData;
@ -54,8 +53,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
@ -74,8 +74,12 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
}
@ -111,8 +115,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
@ -133,8 +138,12 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testSingleListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
}
@ -152,7 +161,7 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"),
span ->
span.hasName("testBatchTopic send")
@ -167,8 +176,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")),
span ->
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
@ -182,8 +192,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "20")));
producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2));
@ -204,7 +215,10 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_DESTINATION_NAME,
"testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(0))));
}
@ -242,8 +256,9 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative),
equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, "10")));
producer.set(trace.getSpan(1));
},
@ -262,7 +277,10 @@ public abstract class AbstractSpringKafkaNoReceiveTelemetryTest extends Abstract
SemanticAttributes.MESSAGING_DESTINATION_NAME,
"testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
equalTo(
SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP,
"testBatchListener")),
span -> span.hasName("consumer").hasParent(trace.getSpan(0))));
}
}

View File

@ -24,13 +24,22 @@ dependencies {
testInstrumentation(project(":instrumentation:kafka:kafka-clients:kafka-clients-0.11:javaagent"))
}
val latestDepTest = findProperty("testLatestDeps") as Boolean
testing {
suites {
val testNoReceiveTelemetry by registering(JvmTestSuite::class) {
dependencies {
implementation("io.vertx:vertx-kafka-client:3.6.0")
implementation("io.vertx:vertx-codegen:3.6.0")
implementation(project(":instrumentation:vertx:vertx-kafka-client-3.6:testing"))
// the "library" configuration is not recognized by the test suite plugin
if (latestDepTest) {
implementation("io.vertx:vertx-kafka-client:+")
implementation("io.vertx:vertx-codegen:+")
} else {
implementation("io.vertx:vertx-kafka-client:3.6.0")
implementation("io.vertx:vertx-codegen:3.6.0")
}
}
targets {
@ -38,6 +47,8 @@ testing {
testTask.configure {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", findProperty("testLatestDeps") as Boolean)
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=false")
jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=false")
}
@ -51,6 +62,8 @@ tasks {
test {
usesService(gradle.sharedServices.registrations["testcontainersBuildService"].service)
systemProperty("testLatestDeps", latestDepTest)
jvmArgs("-Dotel.instrumentation.kafka.experimental-span-attributes=true")
jvmArgs("-Dotel.instrumentation.messaging.experimental.receive-telemetry.enabled=true")
}

View File

@ -10,28 +10,35 @@ import static io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6.VertxK
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTracing;
import io.vertx.core.Handler;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<ConsumerRecords<K, V>> {
private final VirtualField<ConsumerRecords<K, V>, Context> receiveContextField;
private final Consumer<K, V> kafkaConsumer;
@Nullable private final Handler<ConsumerRecords<K, V>> delegate;
public InstrumentedBatchRecordsHandler(
VirtualField<ConsumerRecords<K, V>, Context> receiveContextField,
Consumer<K, V> kafkaConsumer,
@Nullable Handler<ConsumerRecords<K, V>> delegate) {
this.receiveContextField = receiveContextField;
this.kafkaConsumer = kafkaConsumer;
this.delegate = delegate;
}
@Override
public void handle(ConsumerRecords<K, V> records) {
Context parentContext = getParentContext(records);
ConsumerAndRecord<ConsumerRecords<?, ?>> request =
ConsumerAndRecord.create(kafkaConsumer, records);
if (!batchProcessInstrumenter().shouldStart(parentContext, records)) {
if (!batchProcessInstrumenter().shouldStart(parentContext, request)) {
callDelegateHandler(records);
return;
}
@ -39,7 +46,7 @@ public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<Cons
// the instrumenter iterates over records when adding links, we need to suppress that
boolean previousWrappingEnabled = KafkaClientsConsumerProcessTracing.setEnabled(false);
try {
Context context = batchProcessInstrumenter().start(parentContext, records);
Context context = batchProcessInstrumenter().start(parentContext, request);
Throwable error = null;
try (Scope ignored = context.makeCurrent()) {
callDelegateHandler(records);
@ -47,7 +54,7 @@ public final class InstrumentedBatchRecordsHandler<K, V> implements Handler<Cons
error = t;
throw t;
} finally {
batchProcessInstrumenter().end(context, records, null, error);
batchProcessInstrumenter().end(context, request, null, error);
}
} finally {
KafkaClientsConsumerProcessTracing.setEnabled(previousWrappingEnabled);

View File

@ -10,32 +10,39 @@ import static io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6.VertxK
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.util.VirtualField;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.vertx.core.Handler;
import javax.annotation.Nullable;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public final class InstrumentedSingleRecordHandler<K, V> implements Handler<ConsumerRecord<K, V>> {
private final VirtualField<ConsumerRecord<K, V>, Context> receiveContextField;
private final Consumer<K, V> kafkaConsumer;
@Nullable private final Handler<ConsumerRecord<K, V>> delegate;
public InstrumentedSingleRecordHandler(
VirtualField<ConsumerRecord<K, V>, Context> receiveContextField,
Consumer<K, V> kafkaConsumer,
@Nullable Handler<ConsumerRecord<K, V>> delegate) {
this.receiveContextField = receiveContextField;
this.kafkaConsumer = kafkaConsumer;
this.delegate = delegate;
}
@Override
public void handle(ConsumerRecord<K, V> record) {
Context parentContext = getParentContext(record);
ConsumerAndRecord<ConsumerRecord<?, ?>> request =
ConsumerAndRecord.create(kafkaConsumer, record);
if (!processInstrumenter().shouldStart(parentContext, record)) {
if (!processInstrumenter().shouldStart(parentContext, request)) {
callDelegateHandler(record);
return;
}
Context context = processInstrumenter().start(parentContext, record);
Context context = processInstrumenter().start(parentContext, request);
Throwable error = null;
try (Scope ignored = context.makeCurrent()) {
callDelegateHandler(record);
@ -43,7 +50,7 @@ public final class InstrumentedSingleRecordHandler<K, V> implements Handler<Cons
error = t;
throw t;
} finally {
processInstrumenter().end(context, record, null, error);
processInstrumenter().end(context, request, null, error);
}
}

View File

@ -16,9 +16,11 @@ import io.opentelemetry.javaagent.bootstrap.kafka.KafkaClientsConsumerProcessTra
import io.opentelemetry.javaagent.extension.instrumentation.TypeInstrumentation;
import io.opentelemetry.javaagent.extension.instrumentation.TypeTransformer;
import io.vertx.core.Handler;
import io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
@ -50,11 +52,13 @@ public class KafkaReadStreamImplInstrumentation implements TypeInstrumentation {
@Advice.OnMethodEnter(suppress = Throwable.class)
public static <K, V> void onEnter(
@Advice.This KafkaReadStreamImpl<K, V> readStream,
@Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecord<K, V>> handler) {
Consumer<K, V> consumer = readStream.unwrap();
VirtualField<ConsumerRecord<K, V>, Context> receiveContextField =
VirtualField.find(ConsumerRecord.class, Context.class);
handler = new InstrumentedSingleRecordHandler<>(receiveContextField, handler);
handler = new InstrumentedSingleRecordHandler<>(receiveContextField, consumer, handler);
}
}
@ -63,11 +67,13 @@ public class KafkaReadStreamImplInstrumentation implements TypeInstrumentation {
@Advice.OnMethodEnter(suppress = Throwable.class)
public static <K, V> void onEnter(
@Advice.This KafkaReadStreamImpl<K, V> readStream,
@Advice.Argument(value = 0, readOnly = false) Handler<ConsumerRecords<K, V>> handler) {
Consumer<K, V> consumer = readStream.unwrap();
VirtualField<ConsumerRecords<K, V>, Context> receiveContextField =
VirtualField.find(ConsumerRecords.class, Context.class);
handler = new InstrumentedBatchRecordsHandler<>(receiveContextField, handler);
handler = new InstrumentedBatchRecordsHandler<>(receiveContextField, consumer, handler);
}
}

View File

@ -7,6 +7,7 @@ package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.instrumentation.api.instrumenter.Instrumenter;
import io.opentelemetry.instrumentation.kafka.internal.ConsumerAndRecord;
import io.opentelemetry.instrumentation.kafka.internal.KafkaInstrumenterFactory;
import io.opentelemetry.javaagent.bootstrap.internal.ExperimentalConfig;
import io.opentelemetry.javaagent.bootstrap.internal.InstrumentationConfig;
@ -17,8 +18,10 @@ public final class VertxKafkaSingletons {
private static final String INSTRUMENTATION_NAME = "io.opentelemetry.vertx-kafka-client-3.6";
private static final Instrumenter<ConsumerRecords<?, ?>, Void> BATCH_PROCESS_INSTRUMENTER;
private static final Instrumenter<ConsumerRecord<?, ?>, Void> PROCESS_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
BATCH_PROCESS_INSTRUMENTER;
private static final Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void>
PROCESS_INSTRUMENTER;
static {
KafkaInstrumenterFactory factory =
@ -33,11 +36,12 @@ public final class VertxKafkaSingletons {
PROCESS_INSTRUMENTER = factory.createConsumerProcessInstrumenter();
}
public static Instrumenter<ConsumerRecords<?, ?>, Void> batchProcessInstrumenter() {
public static Instrumenter<ConsumerAndRecord<ConsumerRecords<?, ?>>, Void>
batchProcessInstrumenter() {
return BATCH_PROCESS_INSTRUMENTER;
}
public static Instrumenter<ConsumerRecord<?, ?>, Void> processInstrumenter() {
public static Instrumenter<ConsumerAndRecord<ConsumerRecord<?, ?>>, Void> processInstrumenter() {
return PROCESS_INSTRUMENTER;
}

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer.OrderAnnotation;
import org.junit.jupiter.api.Order;
@ -51,9 +45,11 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1"),
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2"));
KafkaProducerRecord<String, String> record1 =
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1");
KafkaProducerRecord<String, String> record2 =
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2");
sendBatchMessages(record1, record2);
AtomicReference<SpanData> producer1 = new AtomicReference<>();
AtomicReference<SpanData> producer2 = new AtomicReference<>();
@ -61,52 +57,29 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"),
span ->
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record1)),
span ->
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
.hasAttributesSatisfyingExactly(sendAttributes(record2)));
producer1.set(trace.getSpan(1));
producer2.set(trace.getSpan(2));
},
trace ->
trace.hasSpansSatisfyingExactly(
trace.hasSpansSatisfyingExactlyInAnyOrder(
span ->
span.hasName("testBatchTopic receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
.hasAttributesSatisfyingExactly(receiveAttributes("testBatchTopic")),
// batch consumer
span ->
@ -116,12 +89,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks(
LinkData.create(producer1.get().getSpanContext()),
LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
.hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)),
// single consumer 1
@ -130,24 +98,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer1.get().getSpanContext()))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record1)),
span -> span.hasName("process testSpan1").hasParent(trace.getSpan(3)),
// single consumer 2
@ -156,24 +107,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record2)),
span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5))));
}
@ -182,7 +116,9 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldHandleFailureInKafkaBatchListener() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(KafkaProducerRecord.create("testBatchTopic", "10", "error"));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testBatchTopic", "10", "error");
sendBatchMessages(record);
// make sure that the consumer eats up any leftover records
kafkaConsumer.resume();
@ -198,16 +134,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
.hasAttributesSatisfyingExactly(sendAttributes(record)));
producer.set(trace.getSpan(1));
},
@ -217,12 +144,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
.hasAttributesSatisfyingExactly(receiveAttributes("testBatchTopic")),
// batch consumer
span ->
@ -232,12 +154,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
.hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(1)),
// single consumer
@ -245,24 +162,7 @@ class BatchRecordsVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("process error").hasParent(trace.getSpan(3))));
}
}

View File

@ -5,23 +5,17 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@ -47,13 +41,10 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldCreateSpansForSingleRecordProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan");
CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan(
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan"),
result -> sent.countDown()));
testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS));
AtomicReference<SpanData> producer = new AtomicReference<>();
@ -67,16 +58,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
.hasAttributesSatisfyingExactly(sendAttributes(record)));
producer.set(trace.getSpan(1));
},
@ -86,35 +68,13 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
.hasAttributesSatisfyingExactly(receiveAttributes("testSingleTopic")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(0))
.hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
}
@ -122,13 +82,10 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
void shouldHandleFailureInSingleRecordHandler() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "error");
CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan(
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "error"),
result -> sent.countDown()));
testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS));
AtomicReference<SpanData> producer = new AtomicReference<>();
@ -142,16 +99,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)));
.hasAttributesSatisfyingExactly(sendAttributes(record)));
producer.set(trace.getSpan(1));
},
@ -161,12 +109,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
span.hasName("testSingleTopic receive")
.hasKind(SpanKind.CONSUMER)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "receive")),
.hasAttributesSatisfyingExactly(receiveAttributes("testSingleTopic")),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
@ -174,24 +117,7 @@ class SingleRecordVertxKafkaTest extends AbstractVertxKafkaTest {
.hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative),
satisfies(
longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("consumer").hasParent(trace.getSpan(1))));
}
}

View File

@ -6,21 +6,16 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanKind;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.LinkData;
import io.opentelemetry.sdk.trace.data.SpanData;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order;
@ -50,9 +45,11 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
void shouldCreateSpansForBatchReceiveAndProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1"),
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2"));
KafkaProducerRecord<String, String> record1 =
KafkaProducerRecord.create("testBatchTopic", "10", "testSpan1");
KafkaProducerRecord<String, String> record2 =
KafkaProducerRecord.create("testBatchTopic", "20", "testSpan2");
sendBatchMessages(record1, record2);
AtomicReference<SpanData> producer1 = new AtomicReference<>();
AtomicReference<SpanData> producer2 = new AtomicReference<>();
@ -60,7 +57,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
testing.waitAndAssertSortedTraces(
orderByRootSpanKind(SpanKind.INTERNAL, SpanKind.CONSUMER),
trace -> {
trace.hasSpansSatisfyingExactly(
trace.hasSpansSatisfyingExactlyInAnyOrder(
span -> span.hasName("producer"),
// first record
@ -68,34 +65,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record1)),
span ->
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record1)),
span -> span.hasName("process testSpan1").hasParent(trace.getSpan(2)),
// second record
@ -103,34 +78,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record2)),
span ->
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(4))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record2)),
span -> span.hasName("process testSpan2").hasParent(trace.getSpan(5)));
producer1.set(trace.getSpan(1));
@ -146,12 +99,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
.hasLinks(
LinkData.create(producer1.get().getSpanContext()),
LinkData.create(producer2.get().getSpanContext()))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
.hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(0))));
}
@ -160,7 +108,9 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
void shouldHandleFailureInKafkaBatchListener() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
sendBatchMessages(KafkaProducerRecord.create("testBatchTopic", "10", "error"));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testBatchTopic", "10", "error");
sendBatchMessages(record);
// make sure that the consumer eats up any leftover records
kafkaConsumer.resume();
@ -176,34 +126,12 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testBatchTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record)),
span ->
span.hasName("testBatchTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("process error").hasParent(trace.getSpan(2)));
producer.set(trace.getSpan(1));
@ -217,12 +145,7 @@ class NoReceiveTelemetryBatchRecordsVertxKafkaTest extends AbstractVertxKafkaTes
.hasLinks(LinkData.create(producer.get().getSpanContext()))
.hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testBatchTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process")),
.hasAttributesSatisfyingExactly(batchProcessAttributes("testBatchTopic")),
span -> span.hasName("batch consumer").hasParent(trace.getSpan(0))));
}
}

View File

@ -5,18 +5,13 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.kafka.client.producer.KafkaProducerRecord;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@ -42,13 +37,10 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
void shouldCreateSpansForSingleRecordProcess() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan");
CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan(
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "testSpan"),
result -> sent.countDown()));
testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS));
testing.waitAndAssertTraces(
@ -59,36 +51,12 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record)),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
}
@ -96,13 +64,10 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
void shouldHandleFailureInSingleRecordHandler() throws InterruptedException {
assertTrue(consumerReady.await(30, TimeUnit.SECONDS));
KafkaProducerRecord<String, String> record =
KafkaProducerRecord.create("testSingleTopic", "10", "error");
CountDownLatch sent = new CountDownLatch(1);
testing.runWithSpan(
"producer",
() ->
sendRecord(
KafkaProducerRecord.create("testSingleTopic", "10", "error"),
result -> sent.countDown()));
testing.runWithSpan("producer", () -> sendRecord(record, result -> sent.countDown()));
assertTrue(sent.await(30, TimeUnit.SECONDS));
testing.waitAndAssertTraces(
@ -113,38 +78,14 @@ class NoReceiveTelemetrySingleRecordVertxKafkaTest extends AbstractVertxKafkaTes
span.hasName("testSingleTopic send")
.hasKind(SpanKind.PRODUCER)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(sendAttributes(record)),
span ->
span.hasName("testSingleTopic process")
.hasKind(SpanKind.CONSUMER)
.hasParent(trace.getSpan(1))
.hasStatus(StatusData.error())
.hasException(new IllegalArgumentException("boom"))
.hasAttributesSatisfyingExactly(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(
SemanticAttributes.MESSAGING_DESTINATION_NAME, "testSingleTopic"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
AttributeKey.longKey("messaging.kafka.message.offset"),
AbstractLongAssert::isNotNegative)),
.hasAttributesSatisfyingExactly(processAttributes(record)),
span -> span.hasName("consumer").hasParent(trace.getSpan(2))));
}
}

View File

@ -5,10 +5,15 @@
package io.opentelemetry.javaagent.instrumentation.vertx.kafka.v3_6;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static org.junit.jupiter.api.Assertions.assertTrue;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
import io.vertx.core.AsyncResult;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
@ -19,12 +24,17 @@ import io.vertx.kafka.client.producer.RecordMetadata;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.assertj.core.api.AbstractLongAssert;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.extension.RegisterExtension;
@ -182,4 +192,87 @@ public abstract class AbstractVertxKafkaTest {
throw new AssertionError("Failed producer send/write invocation", e);
}
}
protected static List<AttributeAssertion> sendAttributes(
KafkaProducerRecord<String, String> record) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, record.topic()),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_DESTINATION_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
String messageKey = record.key();
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
return assertions;
}
protected static List<AttributeAssertion> receiveAttributes(String topic) {
return batchConsumerAttributes(topic, "receive");
}
protected static List<AttributeAssertion> batchProcessAttributes(String topic) {
return batchConsumerAttributes(topic, "process");
}
private static List<AttributeAssertion> batchConsumerAttributes(String topic, String operation) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, topic),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, operation)));
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
return assertions;
}
protected static List<AttributeAssertion> processAttributes(
KafkaProducerRecord<String, String> record) {
List<AttributeAssertion> assertions =
new ArrayList<>(
Arrays.asList(
equalTo(SemanticAttributes.MESSAGING_SYSTEM, "kafka"),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_NAME, record.topic()),
equalTo(SemanticAttributes.MESSAGING_DESTINATION_KIND, "topic"),
equalTo(SemanticAttributes.MESSAGING_OPERATION, "process"),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_SOURCE_PARTITION,
AbstractLongAssert::isNotNegative),
satisfies(
SemanticAttributes.MESSAGING_KAFKA_MESSAGE_OFFSET,
AbstractLongAssert::isNotNegative)));
if (Boolean.getBoolean("otel.instrumentation.kafka.experimental-span-attributes")) {
assertions.add(
satisfies(
AttributeKey.longKey("kafka.record.queue_time_ms"),
AbstractLongAssert::isNotNegative));
}
// consumer group id is not available in version 0.11
if (Boolean.getBoolean("testLatestDeps")) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_CONSUMER_GROUP, "test"));
}
String messageKey = record.key();
if (messageKey != null) {
assertions.add(equalTo(SemanticAttributes.MESSAGING_KAFKA_MESSAGE_KEY, messageKey));
}
String messageValue = record.value();
if (messageValue != null) {
assertions.add(
equalTo(
SemanticAttributes.MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES,
messageValue.getBytes(StandardCharsets.UTF_8).length));
}
return assertions;
}
}