Merge pull request #464 from DataDog/tyler/couchbase

Couchbase 2.0+ instrumentation
This commit is contained in:
Tyler Benson 2018-09-05 15:10:12 +10:00 committed by GitHub
commit e508d9232b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 1248 additions and 203 deletions

View File

@ -4,219 +4,219 @@ defaults: &defaults
working_directory: ~/dd-trace-java
resource_class: xlarge
docker:
- image: &default_container circleci/openjdk:8
- image: &default_container circleci/openjdk:8
cache_keys: &cache_keys
# Reset the cache approx every release
keys:
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}
jobs:
build:
<<: *defaults
docker:
- image: circleci/openjdk:8-jdk
- image: circleci/openjdk:8-jdk
steps:
- checkout
- checkout
- restore_cache:
<<: *cache_keys
- restore_cache:
<<: *cache_keys
- run:
name: Build Project
command: GRADLE_OPTS="-Dorg.gradle.jvmargs=-Xmx1G -Xms64M" ./gradlew clean :dd-java-agent:shadowJar compileTestGroovy compileTestScala compileTestJava check -x test -x latestDepTest -x traceAgentTest --build-cache --parallel --stacktrace --no-daemon --max-workers=4
- run:
name: Build Project
command: GRADLE_OPTS="-Dorg.gradle.jvmargs=-Xmx1G -Xms64M" ./gradlew clean :dd-java-agent:shadowJar compileTestGroovy compileTestScala compileTestJava check -x test -x latestDepTest -x traceAgentTest --build-cache --parallel --stacktrace --no-daemon --max-workers=4
- run:
name: Collect Libs
when: always
command: .circleci/collect_libs.sh
- run:
name: Collect Libs
when: always
command: .circleci/collect_libs.sh
- store_artifacts:
path: ./libs
- store_artifacts:
path: ./libs
- run:
name: Collect Reports
when: always
command: .circleci/collect_reports.sh
- run:
name: Collect Reports
when: always
command: .circleci/collect_reports.sh
- store_artifacts:
path: ./reports
- store_artifacts:
path: ./reports
- persist_to_workspace:
root: .
paths:
- workspace
- persist_to_workspace:
root: .
paths:
- workspace
- save_cache:
key: dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
paths: ~/.gradle
background: true
- save_cache:
key: dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
paths: ~/.gradle
background: true
default_test_job: &default_test_job
<<: *defaults
docker:
- image: *default_container
# This is used by spymemcached instrumentation tests
- image: memcached
- image: *default_container
# This is used by spymemcached instrumentation tests
- image: memcached
steps:
- checkout
- checkout
- run:
name: Install Additional JVM
command: |
if [ "${INSTALL_ZULU}" != "" ]; then
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0x219BD9C9
echo 'deb http://repos.azulsystems.com/debian stable main' | sudo tee -a /etc/apt/sources.list.d/zulu.list
sudo apt-get update
sudo apt-get install $INSTALL_ZULU
fi
- run:
name: Install Additional JVM
command: |
if [ "${INSTALL_ZULU}" != "" ]; then
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0x219BD9C9
echo 'deb http://repos.azulsystems.com/debian stable main' | sudo tee -a /etc/apt/sources.list.d/zulu.list
sudo apt-get update
sudo apt-get install $INSTALL_ZULU
fi
- attach_workspace:
at: .
- attach_workspace:
at: .
- restore_cache:
<<: *cache_keys
- restore_cache:
<<: *cache_keys
- run:
name: Run Tests
command: GRADLE_OPTS="-Dorg.gradle.jvmargs=-Xmx2G -Xms512M" ./gradlew $TEST_TASK --build-cache --parallel --stacktrace --no-daemon --max-workers=3
- run:
name: Run Tests
command: GRADLE_OPTS="-Dorg.gradle.jvmargs=-Xmx2G -Xms512M" ./gradlew $TEST_TASK --build-cache --parallel --stacktrace --no-daemon --max-workers=3
- run:
name: Collect Reports
when: on_fail
command: .circleci/collect_reports.sh
- run:
name: Collect Reports
when: on_fail
command: .circleci/collect_reports.sh
- store_artifacts:
path: ./reports
- store_artifacts:
path: ./reports
- run:
name: Collect Test Results
when: always
command: .circleci/collect_results.sh
- run:
name: Collect Test Results
when: always
command: .circleci/collect_results.sh
- store_test_results:
path: ./results
- store_test_results:
path: ./results
test_7:
<<: *default_test_job
environment:
- JAVA7_HOME: /usr/lib/jvm/zulu-7-amd64
- TEST_TASK: testJava7
- INSTALL_ZULU: zulu-7
- JAVA7_HOME: /usr/lib/jvm/zulu-7-amd64
- TEST_TASK: testJava7
- INSTALL_ZULU: zulu-7
test_8:
<<: *default_test_job
environment:
# We are building on Java8, this is our default JVM so no need to set more homes
- TEST_TASK: test latestDepTest jacocoTestReport jacocoTestCoverageVerification
# We are building on Java8, this is our default JVM so no need to set more homes
- TEST_TASK: test latestDepTest jacocoTestReport jacocoTestCoverageVerification
test_9:
<<: *default_test_job
environment:
- JAVA9_HOME: /usr/lib/jvm/zulu-9-amd64
- TEST_TASK: testJava9 latestDepTestJava9
- INSTALL_ZULU: zulu-9
- JAVA9_HOME: /usr/lib/jvm/zulu-9-amd64
- TEST_TASK: testJava9 latestDepTestJava9
- INSTALL_ZULU: zulu-9
test_10:
<<: *default_test_job
environment:
- JAVA10_HOME: /usr/lib/jvm/zulu-10-amd64
- TEST_TASK: testJava10 latestDepTestJava10
- INSTALL_ZULU: zulu-10
- JAVA10_HOME: /usr/lib/jvm/zulu-10-amd64
- TEST_TASK: testJava10 latestDepTestJava10
- INSTALL_ZULU: zulu-10
agent_integration_tests:
<<: *defaults
docker:
- image: circleci/openjdk:8-jdk
- image: datadog/docker-dd-agent
environment:
- DD_APM_ENABLED=true
- DD_BIND_HOST=0.0.0.0
- DD_API_KEY=invalid_key_but_this_is_fine
- image: circleci/openjdk:8-jdk
- image: datadog/docker-dd-agent
environment:
- DD_APM_ENABLED=true
- DD_BIND_HOST=0.0.0.0
- DD_API_KEY=invalid_key_but_this_is_fine
steps:
- checkout
- checkout
- attach_workspace:
at: .
- attach_workspace:
at: .
- restore_cache:
# Reset the cache approx every release
keys:
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}
- restore_cache:
# Reset the cache approx every release
keys:
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}-{{ .Revision }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}-{{ .Branch }}
- dd-trace-java-{{ checksum "dd-trace-java.gradle" }}
- run:
name: Run Trace Agent Tests
command: ./gradlew traceAgentTest --build-cache --parallel --stacktrace --no-daemon --max-workers=6
- run:
name: Run Trace Agent Tests
command: ./gradlew traceAgentTest --build-cache --parallel --stacktrace --no-daemon --max-workers=6
- run:
name: Collect Reports
when: on_fail
command: .circleci/collect_reports.sh
- run:
name: Collect Reports
when: on_fail
command: .circleci/collect_reports.sh
- store_artifacts:
path: ./reports
- store_artifacts:
path: ./reports
- run:
name: Collect Test Results
when: always
command: .circleci/collect_results.sh
- run:
name: Collect Test Results
when: always
command: .circleci/collect_results.sh
- store_test_results:
path: ./results
- store_test_results:
path: ./results
scan_versions:
<<: *defaults
steps:
- checkout
- checkout
- restore_cache:
# Reset the cache approx every release
keys:
- dd-trace-java-version-scan-{{ checksum "dd-trace-java.gradle" }}
- dd-trace-java-version-scan
- restore_cache:
# Reset the cache approx every release
keys:
- dd-trace-java-version-scan-{{ checksum "dd-trace-java.gradle" }}
- dd-trace-java-version-scan
- run:
name: Verify Version Scan and Muzzle
command: ./gradlew verifyVersionScan muzzle --parallel --stacktrace --no-daemon --max-workers=6
- run:
name: Verify Version Scan and Muzzle
command: ./gradlew verifyVersionScan muzzle --parallel --stacktrace --no-daemon --max-workers=6
- save_cache:
key: dd-trace-java-version-scan-{{ checksum "dd-trace-java.gradle" }}
paths: ~/.gradle
- save_cache:
key: dd-trace-java-version-scan-{{ checksum "dd-trace-java.gradle" }}
paths: ~/.gradle
publish: &publish
<<: *defaults
steps:
- checkout
- checkout
- attach_workspace:
at: .
- attach_workspace:
at: .
- restore_cache:
<<: *cache_keys
- restore_cache:
<<: *cache_keys
- run:
name: Decode Signing Key
command: echo $PGP_KEY_FILE | base64 --decode > /home/circleci/dd-trace-java/.circleci/secring.gpg
- run:
name: Decode Signing Key
command: echo $PGP_KEY_FILE | base64 --decode > /home/circleci/dd-trace-java/.circleci/secring.gpg
- deploy:
name: Publish master to Artifactory
command: |
./gradlew -Psigning.keyId=${PGP_KEY_ID} \
-Psigning.password=${PGP_KEY_PASS} \
-Psigning.secretKeyRingFile=/home/circleci/dd-trace-java/.circleci/secring.gpg \
-PbintrayUser=${BINTRAY_USER} \
-PbintrayApiKey=${BINTRAY_API_KEY} \
-PbuildInfo.build.number=${CIRCLE_BUILD_NUM} \
artifactoryPublish --max-workers=1 --build-cache --stacktrace --no-daemon
- deploy:
name: Publish master to Artifactory
command: |
./gradlew -Psigning.keyId=${PGP_KEY_ID} \
-Psigning.password=${PGP_KEY_PASS} \
-Psigning.secretKeyRingFile=/home/circleci/dd-trace-java/.circleci/secring.gpg \
-PbintrayUser=${BINTRAY_USER} \
-PbintrayApiKey=${BINTRAY_API_KEY} \
-PbuildInfo.build.number=${CIRCLE_BUILD_NUM} \
artifactoryPublish --max-workers=1 --build-cache --stacktrace --no-daemon
publish_master:
<<: *publish
@ -227,72 +227,72 @@ workflows:
version: 2
build_test_deploy:
jobs:
- build:
filters:
tags:
only: /.*/
- build:
filters:
tags:
only: /.*/
- test_7:
requires:
- build
filters:
tags:
only: /.*/
- test_8:
requires:
- build
filters:
tags:
only: /.*/
- test_9:
requires:
- build
filters:
tags:
only: /.*/
- test_10:
requires:
- build
filters:
tags:
only: /.*/
- test_7:
requires:
- build
filters:
tags:
only: /.*/
- test_8:
requires:
- build
filters:
tags:
only: /.*/
- test_9:
requires:
- build
filters:
tags:
only: /.*/
- test_10:
requires:
- build
filters:
tags:
only: /.*/
- agent_integration_tests:
requires:
- build
filters:
tags:
only: /.*/
- agent_integration_tests:
requires:
- build
filters:
tags:
only: /.*/
- scan_versions:
requires:
- build
filters:
branches:
ignore: master
- scan_versions:
requires:
- build
filters:
branches:
ignore: master
- publish_master:
requires:
- test_7
- test_8
- test_9
- test_10
- agent_integration_tests
filters:
branches:
only: master
tags:
ignore: /.*/
- publish_master:
requires:
- test_7
- test_8
- test_9
- test_10
- agent_integration_tests
filters:
branches:
only: master
tags:
ignore: /.*/
- publish_tag:
requires:
- test_7
- test_8
- test_9
- test_10
- agent_integration_tests
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
- publish_tag:
requires:
- test_7
- test_8
- test_9
- test_10
- agent_integration_tests
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/

View File

@ -0,0 +1,48 @@
apply from: "${rootDir}/gradle/java.gradle"
apply plugin: 'org.unbroken-dome.test-sets'
testSets {
latestDepTest {
dirName = 'test'
}
}
muzzle {
pass {
group = 'com.couchbase.client'
module = 'java-client'
versions = "[2.0.0,)"
assertInverse = true
}
fail {
group = 'com.couchbase.client'
module = 'couchbase-client'
versions = "(,)"
}
}
dependencies {
compileOnly group: 'com.couchbase.client', name: 'java-client', version: '2.0.0'
compile project(':dd-java-agent:agent-tooling')
compile deps.bytebuddy
compile deps.opentracing
annotationProcessor deps.autoservice
implementation deps.autoservice
testCompile project(':dd-java-agent:testing')
testCompile group: 'com.couchbase.mock', name: 'CouchbaseMock', version: '1.5.19'
testCompile group: 'org.springframework.data', name: 'spring-data-couchbase', version: '2.0.0.RELEASE'
// Earliest version that seems to allow queries with CouchbaseMock:
testCompile group: 'com.couchbase.client', name: 'java-client', version: '2.5.0'
latestDepTestCompile group: 'org.springframework.data', name: 'spring-data-couchbase', version: '3.+'
latestDepTestCompile group: 'com.couchbase.client', name: 'java-client', version: '2.6+'
latestDepTestCompile group: 'com.couchbase.client', name: 'encryption', version: '+'
}
testJava8Minimum += "**/*.class"

View File

@ -0,0 +1,157 @@
package datadog.trace.instrumentation.couchbase.client;
import static io.opentracing.log.Fields.ERROR_OBJECT;
import static net.bytebuddy.matcher.ElementMatchers.isInterface;
import static net.bytebuddy.matcher.ElementMatchers.isMethod;
import static net.bytebuddy.matcher.ElementMatchers.isPublic;
import static net.bytebuddy.matcher.ElementMatchers.named;
import static net.bytebuddy.matcher.ElementMatchers.not;
import static net.bytebuddy.matcher.ElementMatchers.returns;
import com.couchbase.client.java.CouchbaseCluster;
import com.google.auto.service.AutoService;
import datadog.trace.agent.tooling.Instrumenter;
import datadog.trace.api.DDTags;
import datadog.trace.bootstrap.CallDepthThreadLocalMap;
import io.opentracing.Span;
import io.opentracing.noop.NoopSpan;
import io.opentracing.tag.Tags;
import io.opentracing.util.GlobalTracer;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import rx.Observable;
import rx.functions.Action0;
import rx.functions.Action1;
@AutoService(Instrumenter.class)
public class CouchbaseBucketInstrumentation extends Instrumenter.Default {
public CouchbaseBucketInstrumentation() {
super("couchbase");
}
@Override
public ElementMatcher<TypeDescription> typeMatcher() {
return not(isInterface())
.and(
named("com.couchbase.client.java.bucket.DefaultAsyncBucketManager")
.or(named("com.couchbase.client.java.CouchbaseAsyncBucket")));
}
@Override
public String[] helperClassNames() {
return new String[] {
getClass().getName() + "$TraceSpanStart",
getClass().getName() + "$TraceSpanFinish",
getClass().getName() + "$TraceSpanError",
};
}
@Override
public Map<ElementMatcher, String> transformers() {
return Collections.<ElementMatcher, String>singletonMap(
isMethod().and(isPublic()).and(returns(named("rx.Observable"))),
CouchbaseClientAdvice.class.getName());
}
public static class CouchbaseClientAdvice {
@Advice.OnMethodEnter
public static int trackCallDepth() {
return CallDepthThreadLocalMap.incrementCallDepth(CouchbaseCluster.class);
}
@Advice.OnMethodExit
public static void subscribeResult(
@Advice.Enter final int callDepth,
@Advice.Origin final Method method,
@Advice.FieldValue("bucket") final String bucket,
@Advice.AllArguments final Object[] args,
@Advice.Return(readOnly = false) Observable result) {
if (callDepth > 0) {
return;
}
CallDepthThreadLocalMap.reset(CouchbaseCluster.class);
final AtomicReference<Span> spanRef = new AtomicReference<>();
result =
result
.doOnSubscribe(new TraceSpanStart(method, bucket, spanRef))
.doOnCompleted(new TraceSpanFinish(spanRef))
.doOnError(new TraceSpanError(spanRef));
}
}
public static class TraceSpanStart implements Action0 {
private final Method method;
private final String bucket;
private final AtomicReference<Span> spanRef;
public TraceSpanStart(
final Method method, final String bucket, final AtomicReference<Span> spanRef) {
this.method = method;
this.bucket = bucket;
this.spanRef = spanRef;
}
@Override
public void call() {
// This is called each time an observer has a new subscriber, but we should only time it once.
if (!spanRef.compareAndSet(null, NoopSpan.INSTANCE)) {
return;
}
final Class<?> declaringClass = method.getDeclaringClass();
final String className =
declaringClass.getSimpleName().replace("CouchbaseAsync", "").replace("DefaultAsync", "");
final String resourceName = className + "." + method.getName() + "(" + bucket + ")";
// just replace the no-op span.
spanRef.set(
GlobalTracer.get()
.buildSpan("couchbase.call")
.withTag(DDTags.SERVICE_NAME, "couchbase")
.withTag(DDTags.RESOURCE_NAME, resourceName)
.withTag("bucket", bucket)
.start());
}
}
public static class TraceSpanFinish implements Action0 {
private final AtomicReference<Span> spanRef;
public TraceSpanFinish(final AtomicReference<Span> spanRef) {
this.spanRef = spanRef;
}
@Override
public void call() {
final Span span = spanRef.getAndSet(null);
if (span != null) {
span.finish();
}
}
}
public static class TraceSpanError implements Action1<Throwable> {
private final AtomicReference<Span> spanRef;
public TraceSpanError(final AtomicReference<Span> spanRef) {
this.spanRef = spanRef;
}
@Override
public void call(final Throwable throwable) {
final Span span = spanRef.getAndSet(null);
if (span != null) {
Tags.ERROR.set(span, true);
span.log(Collections.singletonMap(ERROR_OBJECT, throwable));
span.finish();
}
}
}
}

View File

@ -0,0 +1,151 @@
package datadog.trace.instrumentation.couchbase.client;
import static io.opentracing.log.Fields.ERROR_OBJECT;
import static net.bytebuddy.matcher.ElementMatchers.isInterface;
import static net.bytebuddy.matcher.ElementMatchers.isMethod;
import static net.bytebuddy.matcher.ElementMatchers.isPublic;
import static net.bytebuddy.matcher.ElementMatchers.named;
import static net.bytebuddy.matcher.ElementMatchers.not;
import static net.bytebuddy.matcher.ElementMatchers.returns;
import com.couchbase.client.java.CouchbaseCluster;
import com.google.auto.service.AutoService;
import datadog.trace.agent.tooling.Instrumenter;
import datadog.trace.api.DDTags;
import datadog.trace.bootstrap.CallDepthThreadLocalMap;
import io.opentracing.Span;
import io.opentracing.noop.NoopSpan;
import io.opentracing.tag.Tags;
import io.opentracing.util.GlobalTracer;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import rx.Observable;
import rx.functions.Action0;
import rx.functions.Action1;
@AutoService(Instrumenter.class)
public class CouchbaseClusterInstrumentation extends Instrumenter.Default {
public CouchbaseClusterInstrumentation() {
super("couchbase");
}
@Override
public ElementMatcher<TypeDescription> typeMatcher() {
return not(isInterface())
.and(
named("com.couchbase.client.java.cluster.DefaultAsyncClusterManager")
.or(named("com.couchbase.client.java.CouchbaseAsyncCluster")));
}
@Override
public String[] helperClassNames() {
return new String[] {
getClass().getName() + "$TraceSpanStart",
getClass().getName() + "$TraceSpanFinish",
getClass().getName() + "$TraceSpanError",
};
}
@Override
public Map<ElementMatcher, String> transformers() {
return Collections.<ElementMatcher, String>singletonMap(
isMethod().and(isPublic()).and(returns(named("rx.Observable"))),
CouchbaseClientAdvice.class.getName());
}
public static class CouchbaseClientAdvice {
@Advice.OnMethodEnter
public static int trackCallDepth() {
return CallDepthThreadLocalMap.incrementCallDepth(CouchbaseCluster.class);
}
@Advice.OnMethodExit
public static void subscribeResult(
@Advice.Enter final int callDepth,
@Advice.Origin final Method method,
@Advice.Return(readOnly = false) Observable result) {
if (callDepth > 0) {
return;
}
CallDepthThreadLocalMap.reset(CouchbaseCluster.class);
final AtomicReference<Span> spanRef = new AtomicReference<>();
result =
result
.doOnSubscribe(new TraceSpanStart(method, spanRef))
.doOnCompleted(new TraceSpanFinish(spanRef))
.doOnError(new TraceSpanError(spanRef));
}
}
public static class TraceSpanStart implements Action0 {
private final Method method;
private final AtomicReference<Span> spanRef;
public TraceSpanStart(final Method method, final AtomicReference<Span> spanRef) {
this.method = method;
this.spanRef = spanRef;
}
@Override
public void call() {
// This is called each time an observer has a new subscriber, but we should only time it once.
if (!spanRef.compareAndSet(null, NoopSpan.INSTANCE)) {
return;
}
final Class<?> declaringClass = method.getDeclaringClass();
final String className =
declaringClass.getSimpleName().replace("CouchbaseAsync", "").replace("DefaultAsync", "");
final String resourceName = className + "." + method.getName();
// just replace the no-op span.
spanRef.set(
GlobalTracer.get()
.buildSpan("couchbase.call")
.withTag(DDTags.SERVICE_NAME, "couchbase")
.withTag(DDTags.RESOURCE_NAME, resourceName)
.start());
}
}
public static class TraceSpanFinish implements Action0 {
private final AtomicReference<Span> spanRef;
public TraceSpanFinish(final AtomicReference<Span> spanRef) {
this.spanRef = spanRef;
}
@Override
public void call() {
final Span span = spanRef.getAndSet(null);
if (span != null) {
span.finish();
}
}
}
public static class TraceSpanError implements Action1<Throwable> {
private final AtomicReference<Span> spanRef;
public TraceSpanError(final AtomicReference<Span> spanRef) {
this.spanRef = spanRef;
}
@Override
public void call(final Throwable throwable) {
final Span span = spanRef.getAndSet(null);
if (span != null) {
Tags.ERROR.set(span, true);
span.log(Collections.singletonMap(ERROR_OBJECT, throwable));
span.finish();
}
}
}
}

View File

@ -0,0 +1,130 @@
import com.couchbase.client.java.Bucket
import com.couchbase.client.java.document.JsonDocument
import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.query.N1qlQuery
import util.AbstractCouchbaseTest
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
class CouchbaseClientTest extends AbstractCouchbaseTest {
def "test client #type"() {
when:
manager.hasBucket(bucketSettings.name())
then:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "ClusterManager.hasBucket"
operationName "couchbase.call"
errored false
parent()
tags {
defaultTags()
}
}
}
}
TEST_WRITER.clear()
when:
// Connect to the bucket and open it
Bucket bkt = cluster.openBucket(bucketSettings.name(), bucketSettings.password())
// Create a JSON document and store it with the ID "helloworld"
JsonObject content = JsonObject.create().put("hello", "world")
def inserted = bkt.upsert(JsonDocument.create("helloworld", content))
then:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.upsert(${bkt.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bkt.name()
defaultTags()
}
}
}
}
TEST_WRITER.clear()
when:
def found = bkt.get("helloworld")
then:
found == inserted
found.content().getString("hello") == "world"
and:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.get(${bkt.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bkt.name()
defaultTags()
}
}
}
}
TEST_WRITER.clear()
where:
manager | cluster | bucketSettings
couchbaseManager | couchbaseCluster | bucketCouchbase
memcacheManager | memcacheCluster | bucketMemcache
type = bucketSettings.type().name()
}
def "test query"() {
setup:
Bucket bkt = cluster.openBucket(bucketSettings.name(), bucketSettings.password())
when:
// Mock expects this specific query.
// See com.couchbase.mock.http.query.QueryServer.handleString.
def result = bkt.query(N1qlQuery.simple("SELECT mockrow"))
then:
result.parseSuccess()
result.finalSuccess()
result.first().value().get("row") == "value"
and:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.query(${bkt.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bkt.name()
defaultTags()
}
}
}
}
where:
manager | cluster | bucketSettings
couchbaseManager | couchbaseCluster | bucketCouchbase
// Only couchbase buckets support queries.
type = bucketSettings.type().name()
}
}

View File

@ -0,0 +1,41 @@
package springdata
import com.couchbase.client.java.cluster.BucketSettings
import com.couchbase.client.java.env.CouchbaseEnvironment
import org.springframework.context.annotation.ComponentScan
import org.springframework.context.annotation.Configuration
import org.springframework.data.couchbase.config.AbstractCouchbaseConfiguration
import org.springframework.data.couchbase.repository.config.EnableCouchbaseRepositories
import static com.google.common.base.Preconditions.checkNotNull
@Configuration
@EnableCouchbaseRepositories(basePackages = "springdata")
@ComponentScan(basePackages = "springdata")
class CouchbaseConfig extends AbstractCouchbaseConfiguration {
// These need to be set before this class can be used by Spring
static CouchbaseEnvironment environment
static BucketSettings bucketSettings
@Override
protected CouchbaseEnvironment getEnvironment() {
return checkNotNull(environment)
}
@Override
protected List<String> getBootstrapHosts() {
return Collections.singletonList("127.0.0.1")
}
@Override
protected String getBucketName() {
return bucketSettings.name()
}
@Override
protected String getBucketPassword() {
return bucketSettings.password()
}
}

View File

@ -0,0 +1,228 @@
package springdata
import com.couchbase.client.java.view.DefaultView
import com.couchbase.client.java.view.DesignDocument
import org.springframework.context.ConfigurableApplicationContext
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.springframework.data.repository.CrudRepository
import spock.lang.Shared
import util.AbstractCouchbaseTest
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
class CouchbaseSpringRepositoryTest extends AbstractCouchbaseTest {
private static final Closure<Doc> FIND
static {
// This method is different in Spring Data 2+
try {
CrudRepository.getMethod("findOne", Serializable)
FIND = { DocRepository repo, String id ->
repo.findOne(id)
}
} catch (NoSuchMethodException e) {
FIND = { DocRepository repo, String id ->
repo.findById(id).get()
}
}
}
@Shared
ConfigurableApplicationContext applicationContext
@Shared
DocRepository repo
def setupSpec() {
// Create view for SpringRepository's findAll()
couchbaseCluster.openBucket(bucketCouchbase.name(), bucketCouchbase.password()).bucketManager()
.insertDesignDocument(
DesignDocument.create("doc", Collections.singletonList(DefaultView.create("all",
'''
function (doc, meta) {
if (doc._class == "springdata.Doc") {
emit(meta.id, null);
}
}
'''.stripIndent()
)))
)
CouchbaseConfig.setEnvironment(couchbaseEnvironment)
CouchbaseConfig.setBucketSettings(bucketCouchbase)
// Close all buckets and disconnect
couchbaseCluster.disconnect()
applicationContext = new AnnotationConfigApplicationContext(CouchbaseConfig)
repo = applicationContext.getBean(DocRepository)
}
def cleanupSpec() {
applicationContext.close()
}
def setup() {
repo.deleteAll()
TEST_WRITER.waitForTraces(1) // There might be more if there were documents to delete
TEST_WRITER.clear()
}
def "test empty repo"() {
when:
def result = repo.findAll()
then:
!result.iterator().hasNext()
and:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.query(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
}
where:
indexName = "test-index"
}
def "test CRUD"() {
when:
def doc = new Doc()
then: // CREATE
repo.save(doc) == doc
and:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.upsert(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
}
TEST_WRITER.clear()
and: // RETRIEVE
FIND(repo, "1") == doc
and:
assertTraces(TEST_WRITER, 1) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.get(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
}
TEST_WRITER.clear()
when:
doc.data = "other data"
then: // UPDATE
repo.save(doc) == doc
repo.findAll().asList() == [doc]
assertTraces(TEST_WRITER, 3) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.upsert(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
trace(1, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.query(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
trace(2, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.get(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
}
TEST_WRITER.clear()
when: // DELETE
repo.delete("1")
then:
!repo.findAll().iterator().hasNext()
and:
assertTraces(TEST_WRITER, 2) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.remove(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
trace(1, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.query(${bucketCouchbase.name()})"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" bucketCouchbase.name()
defaultTags()
}
}
}
}
}
}

View File

@ -0,0 +1,101 @@
package springdata
import com.couchbase.client.java.Bucket
import org.springframework.data.couchbase.core.CouchbaseTemplate
import spock.lang.Shared
import util.AbstractCouchbaseTest
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
class CouchbaseSpringTemplateTest extends AbstractCouchbaseTest {
@Shared
List<CouchbaseTemplate> templates
def setupSpec() {
Bucket bucketCouchbase = couchbaseCluster.openBucket(bucketCouchbase.name(), bucketCouchbase.password())
Bucket bucketMemcache = memcacheCluster.openBucket(bucketMemcache.name(), bucketMemcache.password())
templates = [new CouchbaseTemplate(couchbaseManager.info(), bucketCouchbase),
new CouchbaseTemplate(memcacheManager.info(), bucketMemcache)]
}
def "test write/read #name"() {
setup:
def doc = new Doc()
when:
template.save(doc)
then:
template.findById("1", Doc) != null
when:
template.remove(doc)
then:
template.findById("1", Doc) == null
and:
assertTraces(TEST_WRITER, 4) {
trace(0, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.upsert($name)"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" name
defaultTags()
}
}
}
trace(1, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.get($name)"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" name
defaultTags()
}
}
}
trace(2, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.remove($name)"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" name
defaultTags()
}
}
}
trace(3, 1) {
span(0) {
serviceName "couchbase"
resourceName "Bucket.get($name)"
operationName "couchbase.call"
errored false
parent()
tags {
"bucket" name
defaultTags()
}
}
}
}
where:
template << templates
name = template.couchbaseBucket.name()
}
}

View File

@ -0,0 +1,29 @@
package springdata
import groovy.transform.EqualsAndHashCode
import org.springframework.data.annotation.Id
import org.springframework.data.couchbase.core.mapping.Document
@Document
@EqualsAndHashCode
class Doc {
@Id
private String id = "1"
private String data = "some data"
String getId() {
return id
}
void setId(String id) {
this.id = id
}
String getData() {
return data
}
void setData(String data) {
this.data = data
}
}

View File

@ -0,0 +1,5 @@
package springdata
import org.springframework.data.couchbase.repository.CouchbaseRepository
interface DocRepository extends CouchbaseRepository<Doc, String> {}

View File

@ -0,0 +1,132 @@
package util
import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig
import com.couchbase.client.core.metrics.DefaultMetricsCollectorConfig
import com.couchbase.client.java.CouchbaseCluster
import com.couchbase.client.java.bucket.BucketType
import com.couchbase.client.java.cluster.BucketSettings
import com.couchbase.client.java.cluster.ClusterManager
import com.couchbase.client.java.cluster.DefaultBucketSettings
import com.couchbase.client.java.env.CouchbaseEnvironment
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment
import com.couchbase.mock.Bucket
import com.couchbase.mock.BucketConfiguration
import com.couchbase.mock.CouchbaseMock
import com.couchbase.mock.http.query.QueryServer
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import spock.lang.Shared
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.TimeUnit
abstract class AbstractCouchbaseTest extends AgentTestRunner {
private static final USERNAME = "Administrator"
private static final PASSWORD = "password"
@Shared
private int port = TestUtils.randomOpenPort()
@Shared
private String testBucketName = this.getClass().simpleName
@Shared
protected bucketCouchbase = DefaultBucketSettings.builder()
.enableFlush(true)
.name("$testBucketName-cb")
.password("test-pass")
.type(BucketType.COUCHBASE)
.quota(100)
.build()
@Shared
protected bucketMemcache = DefaultBucketSettings.builder()
.enableFlush(true)
.name("$testBucketName-mem")
.password("test-pass")
.type(BucketType.MEMCACHED)
.quota(100)
.build()
@Shared
CouchbaseMock mock
@Shared
protected CouchbaseCluster couchbaseCluster
@Shared
protected CouchbaseCluster memcacheCluster
@Shared
protected CouchbaseEnvironment couchbaseEnvironment
@Shared
protected CouchbaseEnvironment memcacheEnvironment
@Shared
protected ClusterManager couchbaseManager
@Shared
protected ClusterManager memcacheManager
def setupSpec() {
mock = new CouchbaseMock("127.0.0.1", port, 1, 1)
mock.httpServer.register("/query", new QueryServer())
mock.start()
println "CouchbaseMock listening on localhost:$port"
mock.createBucket(convert(bucketCouchbase))
couchbaseEnvironment = envBuilder(bucketCouchbase).build()
couchbaseCluster = CouchbaseCluster.create(couchbaseEnvironment, Arrays.asList("127.0.0.1"))
couchbaseManager = couchbaseCluster.clusterManager(USERNAME, PASSWORD)
mock.createBucket(convert(bucketMemcache))
memcacheEnvironment = envBuilder(bucketMemcache).build()
memcacheCluster = CouchbaseCluster.create(memcacheEnvironment, Arrays.asList("127.0.0.1"))
memcacheManager = memcacheCluster.clusterManager(USERNAME, PASSWORD)
// Cache buckets:
couchbaseCluster.openBucket(bucketCouchbase.name(), bucketCouchbase.password())
memcacheCluster.openBucket(bucketMemcache.name(), bucketMemcache.password())
}
private static BucketConfiguration convert(BucketSettings bucketSettings) {
def configuration = new BucketConfiguration()
configuration.name = bucketSettings.name()
configuration.password = bucketSettings.password()
configuration.type = Bucket.BucketType.valueOf(bucketSettings.type().name())
configuration.numNodes = 1
configuration.numReplicas = 0
return configuration
}
def cleanupSpec() {
try {
couchbaseCluster?.disconnect()
} catch (RejectedExecutionException e) {
// already closed by a test?
}
try {
memcacheCluster?.disconnect()
} catch (RejectedExecutionException e) {
// already closed by a test?
}
mock?.stop()
}
private DefaultCouchbaseEnvironment.Builder envBuilder(BucketSettings bucketSettings) {
def timeout = TimeUnit.SECONDS.toMillis(5)
return DefaultCouchbaseEnvironment.builder()
.bootstrapCarrierDirectPort(mock.getCarrierPort(bucketSettings.name()))
.bootstrapHttpDirectPort(port)
// settings to try to reduce variability in the tests:
.runtimeMetricsCollectorConfig(DefaultMetricsCollectorConfig.create(0, TimeUnit.DAYS))
.networkLatencyMetricsCollectorConfig(DefaultLatencyMetricsCollectorConfig.create(0, TimeUnit.DAYS))
.computationPoolSize(1)
.connectTimeout(timeout)
.kvTimeout(timeout)
.managementTimeout(timeout)
.queryTimeout(timeout)
.viewTimeout(timeout)
}
}

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -19,6 +20,7 @@ import spock.lang.Shared
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
@RetryOnFailure
class Elasticsearch6RestClientTest extends AgentTestRunner {
@Shared
int httpPort

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -21,6 +22,7 @@ import spock.lang.Shared
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
@RetryOnFailure
class Elasticsearch5RestClientTest extends AgentTestRunner {
@Shared
int httpPort

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -14,6 +15,7 @@ import spock.lang.Shared
import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
@RetryOnFailure
class Elasticsearch2NodeClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -17,6 +18,7 @@ import spock.lang.Shared
import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
@RetryOnFailure
class Elasticsearch2TransportClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -30,7 +30,7 @@ class Config {
System.addShutdownHook {
if (tmpDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
FileSystemUtils.deleteSubDirectories(tmpDir.toPath())
tmpDir.delete()
}
}

View File

@ -1,5 +1,6 @@
package springdata
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.api.DDSpanTypes
import datadog.trace.api.DDTags
@ -10,6 +11,7 @@ import spock.lang.Shared
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
@RetryOnFailure
class Elasticsearch2SpringRepositoryTest extends AgentTestRunner {
@Shared
ApplicationContext applicationContext = new AnnotationConfigApplicationContext(Config)
@ -156,7 +158,7 @@ class Elasticsearch2SpringRepositoryTest extends AgentTestRunner {
TEST_WRITER.clear()
when:
doc.data == "other data"
doc.data = "other data"
then:
repo.index(doc) == doc

View File

@ -1,3 +1,6 @@
package springdata
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -17,12 +20,12 @@ import org.springframework.data.elasticsearch.core.query.IndexQueryBuilder
import org.springframework.data.elasticsearch.core.query.NativeSearchQuery
import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder
import spock.lang.Shared
import springdata.Doc
import java.util.concurrent.atomic.AtomicLong
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
@RetryOnFailure
class Elasticsearch2SpringTemplateTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -17,6 +18,7 @@ import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
@RetryOnFailure
class Elasticsearch5NodeClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -21,6 +22,7 @@ import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
@RetryOnFailure
class Elasticsearch5TransportClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -16,6 +17,7 @@ import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
@RetryOnFailure
class Elasticsearch6NodeClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -1,3 +1,4 @@
import com.anotherchrisberry.spock.extensions.retry.RetryOnFailure
import datadog.trace.agent.test.AgentTestRunner
import datadog.trace.agent.test.TestUtils
import datadog.trace.api.DDSpanTypes
@ -20,6 +21,7 @@ import static datadog.trace.agent.test.TestUtils.runUnderTrace
import static datadog.trace.agent.test.asserts.ListWriterAssert.assertTraces
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
@RetryOnFailure
class Elasticsearch6TransportClientTest extends AgentTestRunner {
public static final long TIMEOUT = 10000; // 10 seconds

View File

@ -33,7 +33,6 @@ dependencies {
testCompile project(':dd-java-agent:testing')
testCompile group: 'net.spy', name: 'spymemcached', version: '2.12.0'
testCompile group: 'org.spockframework', name: 'spock-core', version: '1.1-groovy-2.4'
testCompile group: 'org.testcontainers', name: 'testcontainers', version: '1.7.3'
}

View File

@ -19,7 +19,7 @@ dependencies {
compile group: 'org.eclipse.jetty', name: 'jetty-server', version: '8.0.0.v20110901'
compile group: 'com.squareup.okhttp3', name: 'okhttp', version: '3.10.0'
compile group: 'com.squareup.okhttp3', name: 'okhttp', version: '3.11.0'
compile project(':dd-trace-ot')
compile project(':dd-java-agent:agent-tooling')

View File

@ -69,6 +69,8 @@ dependencies {
testCompile deps.testLogging
testCompile 'info.solidsoft.spock:spock-global-unroll:0.5.1'
testCompile group: 'com.github.stefanbirkner', name: 'system-rules', version: '1.17.1'
testCompile group: 'com.anotherchrisberry', name: 'spock-retry', version: '0.6.4'
}
tasks.withType(Javadoc) {

View File

@ -13,6 +13,7 @@ include ':dd-java-agent:instrumentation:akka-http-10.0'
include ':dd-java-agent:instrumentation:apache-httpclient-4.3'
include ':dd-java-agent:instrumentation:aws-java-sdk-1.11.0'
include ':dd-java-agent:instrumentation:aws-java-sdk-1.11.106'
include ':dd-java-agent:instrumentation:couchbase-2.0'
include ':dd-java-agent:instrumentation:datastax-cassandra-3.2'
include ':dd-java-agent:instrumentation:elasticsearch-rest-5'
include ':dd-java-agent:instrumentation:elasticsearch-transport-2'