Merge branch 'master' of github.com:DataDog/dd-trace-java into labbati/hostname

This commit is contained in:
Luca Abbati 2019-05-17 19:16:50 +02:00
commit 88b6a2ce51
No known key found for this signature in database
GPG Key ID: C901DDA2FFE14529
4 changed files with 48 additions and 26 deletions

View File

@ -25,7 +25,7 @@ jobs:
- run:
name: Build Project
command: GRADLE_OPTS="-Dorg.gradle.jvmargs='-Xmx1G -Xms64M' -Ddatadog.forkedMaxHeapSize=1G -Ddatadog.forkedMinHeapSize=64M" ./gradlew clean :dd-java-agent:shadowJar compileTestGroovy compileTestScala compileTestJava --build-cache --parallel --stacktrace --no-daemon --max-workers=8
command: GRADLE_OPTS="-Dorg.gradle.jvmargs='-Xmx1G -Xms64M' -Ddatadog.forkedMaxHeapSize=1G -Ddatadog.forkedMinHeapSize=64M" ./gradlew clean :dd-java-agent:shadowJar compileTestGroovy compileLatestDepTestGroovy compileTestScala compileLatestDepTestScala compileTestJava compileLatestDepTestJava --build-cache --parallel --stacktrace --no-daemon --max-workers=8
- run:
name: Collect Libs

View File

@ -1,5 +1,6 @@
import datadog.trace.agent.test.base.HttpClientTest
import datadog.trace.instrumentation.apachehttpasyncclient.ApacheHttpAsyncClientDecorator
import io.opentracing.util.GlobalTracer
import org.apache.http.client.methods.HttpGet
import org.apache.http.impl.nio.client.HttpAsyncClients
import org.apache.http.message.BasicHeader
@ -33,6 +34,10 @@ class ApacheHttpAsyncClientNullCallbackTest extends HttpClientTest<ApacheHttpAsy
Future future = client.execute(request, null)
future.get()
if (callback != null) {
// Request span is closed asynchronously even in regards to returned future so we have to wait here.
if (GlobalTracer.get().activeSpan() != null) {
blockUntilChildSpansFinished(1)
}
callback()
}
return 200

View File

@ -17,8 +17,15 @@ muzzle {
pass {
group = 'com.couchbase.client'
module = 'java-client'
versions = "[2.0.0,)"
assertInverse = true
// Looks like 2.7.5 was just released and didn't sync up with mirrors properly causing build failures
// TODO: remove this on a few days.
versions = "[2.0.0,2.7.5)"
// assertInverse = true
}
fail {
group = 'com.couchbase.client'
module = 'java-client'
versions = "(,2.0.0)"
}
fail {
group = 'com.couchbase.client'
@ -46,6 +53,8 @@ dependencies {
testCompile group: 'com.couchbase.client', name: 'java-client', version: '2.5.0'
latestDepTestCompile group: 'org.springframework.data', name: 'spring-data-couchbase', version: '3.+'
latestDepTestCompile group: 'com.couchbase.client', name: 'java-client', version: '2.6+'
latestDepTestCompile group: 'com.couchbase.client', name: 'encryption', version: '+'
// Looks like 2.7.5 was just released and didn't sync up with mirrors properly causing build failures
// TODO: remove this on a few days.
latestDepTestCompile group: 'com.couchbase.client', name: 'java-client', version: '2.7.4'
latestDepTestCompile group: 'com.couchbase.client', name: 'encryption', version: '1.0.0'
}

View File

@ -17,9 +17,11 @@ import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.method.MethodDescription;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.record.RecordBatch;
@AutoService(Instrumenter.class)
public final class KafkaProducerInstrumentation extends Instrumenter.Default {
@ -61,6 +63,7 @@ public final class KafkaProducerInstrumentation extends Instrumenter.Default {
@Advice.OnMethodEnter(suppress = Throwable.class)
public static Scope startSpan(
@Advice.FieldValue("apiVersions") final ApiVersions apiVersions,
@Advice.Argument(value = 0, readOnly = false) ProducerRecord record,
@Advice.Argument(value = 1, readOnly = false) Callback callback) {
final Scope scope = GlobalTracer.get().buildSpan("kafka.produce").startActive(false);
@ -69,28 +72,33 @@ public final class KafkaProducerInstrumentation extends Instrumenter.Default {
callback = new ProducerCallback(callback, scope);
try {
GlobalTracer.get()
.inject(
scope.span().context(),
Format.Builtin.TEXT_MAP,
new TextMapInjectAdapter(record.headers()));
} catch (final IllegalStateException e) {
// headers must be read-only from reused record. try again with new one.
record =
new ProducerRecord<>(
record.topic(),
record.partition(),
record.timestamp(),
record.key(),
record.value(),
record.headers());
// Do not inject headers for batch versions below 2
// This is how similar check is being done in Kafka client itself:
// https://github.com/apache/kafka/blob/05fcfde8f69b0349216553f711fdfc3f0259c601/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java#L411-L412
if (apiVersions.maxUsableProduceMagic() >= RecordBatch.MAGIC_VALUE_V2) {
try {
GlobalTracer.get()
.inject(
scope.span().context(),
Format.Builtin.TEXT_MAP,
new TextMapInjectAdapter(record.headers()));
} catch (final IllegalStateException e) {
// headers must be read-only from reused record. try again with new one.
record =
new ProducerRecord<>(
record.topic(),
record.partition(),
record.timestamp(),
record.key(),
record.value(),
record.headers());
GlobalTracer.get()
.inject(
scope.span().context(),
Format.Builtin.TEXT_MAP,
new TextMapInjectAdapter(record.headers()));
GlobalTracer.get()
.inject(
scope.span().context(),
Format.Builtin.TEXT_MAP,
new TextMapInjectAdapter(record.headers()));
}
}
return scope;