Convert elasticsearch tests to java (#12300)

This commit is contained in:
Lauri Tulmin 2024-09-24 18:51:45 +03:00 committed by GitHub
parent 6d94506b79
commit e4752abc53
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
44 changed files with 2575 additions and 2906 deletions

View File

@ -5,6 +5,7 @@
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_0;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.ElasticTransportRequest;
@ -14,6 +15,11 @@ import org.elasticsearch.action.DocumentRequest;
public class Elasticsearch5TransportExperimentalAttributesExtractor
extends ElasticsearchTransportExperimentalAttributesExtractor {
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_WRITE_TYPE =
AttributeKey.stringKey("elasticsearch.request.write.type");
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_WRITE_ROUTING =
AttributeKey.stringKey("elasticsearch.request.write.routing");
@Override
public void onStart(
AttributesBuilder attributes,
@ -24,8 +30,8 @@ public class Elasticsearch5TransportExperimentalAttributesExtractor
Object request = transportRequest.getRequest();
if (request instanceof DocumentRequest) {
DocumentRequest<?> req = (DocumentRequest<?>) request;
attributes.put("elasticsearch.request.write.type", req.type());
attributes.put("elasticsearch.request.write.routing", req.routing());
attributes.put(ELASTICSEARCH_REQUEST_WRITE_TYPE, req.type());
attributes.put(ELASTICSEARCH_REQUEST_WRITE_ROUTING, req.routing());
}
}
}

View File

@ -1,309 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.client.Client
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.env.Environment
import org.elasticsearch.http.BindHttpException
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.Node
import org.elasticsearch.node.internal.InternalSettingsPreparer
import org.elasticsearch.transport.BindTransportException
import org.elasticsearch.transport.Netty3Plugin
import spock.lang.Shared
import spock.lang.Unroll
import java.util.concurrent.TimeUnit
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.awaitility.Awaitility.await
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch5NodeClientTest extends AbstractElasticsearchNodeClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
Client client
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "local")
.build()
testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(10, TimeUnit.SECONDS)
.ignoreExceptionsMatching({
BindHttpException.isInstance(it) || BindTransportException.isInstance(it)
})
.until({
testNode.start()
true
})
client = testNode.client()
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
}
ignoreTracesAndClear(1)
}
def cleanupSpec() {
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
Client client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
childOf(span(0))
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
@Unroll
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, "no such index"
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
status ERROR
errorEvent IndexNotFoundException, "no such index"
kind CLIENT
childOf(span(0))
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.acknowledged
when:
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(5) {
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "ClusterHealthAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
}
trace(2, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(4, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -1,333 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.NetworkAttributes
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.TransportAddress
import org.elasticsearch.env.Environment
import org.elasticsearch.http.BindHttpException
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.Node
import org.elasticsearch.node.internal.InternalSettingsPreparer
import org.elasticsearch.transport.BindTransportException
import org.elasticsearch.transport.Netty3Plugin
import org.elasticsearch.transport.RemoteTransportException
import org.elasticsearch.transport.TransportService
import org.elasticsearch.transport.client.PreBuiltTransportClient
import spock.lang.Shared
import spock.lang.Unroll
import java.util.concurrent.TimeUnit
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.awaitility.Awaitility.await
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch5TransportClientTest extends AbstractElasticsearchTransportClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
TransportAddress tcpPublishAddress
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
TransportClient client
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "local")
.build()
testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(10, TimeUnit.SECONDS)
.ignoreExceptionsMatching({
BindHttpException.isInstance(it) || BindTransportException.isInstance(it)
})
.until({
testNode.start()
true
})
tcpPublishAddress = testNode.injector().getInstance(TransportService).boundAddress().publishAddress()
client = new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build()
)
client.addTransportAddress(tcpPublishAddress)
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
}
ignoreTracesAndClear(1)
}
def cleanupSpec() {
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
TransportClient client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, "no such index"
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
kind CLIENT
status ERROR
childOf(span(0))
errorEvent RemoteTransportException, String
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.acknowledged
when:
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(5) {
// PutMappingAction and IndexAction run in separate threads so their order can vary
traces.subList(2, 4).sort(orderByRootSpanName("PutMappingAction", "IndexAction"))
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(2, 1) {
span(0) {
name "PutMappingAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "PutMappingAction"
"elasticsearch.action" "PutMappingAction"
"elasticsearch.request" "PutMappingRequest"
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(4, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -0,0 +1,86 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_0;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchNodeClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.transport.Netty3Plugin;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Elasticsearch5NodeClientTest extends AbstractElasticsearchNodeClientTest {
private static final Logger logger = LoggerFactory.getLogger(Elasticsearch5NodeClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private static Node testNode;
private static Client client;
@BeforeAll
static void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
// Since we use listeners to close spans this should make our span closing deterministic
// which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "local")
.build();
testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
startNode(testNode);
client = testNode.client();
testing.runWithSpan(
"setup",
() ->
// this may potentially create multiple requests and therefore multiple spans, so we
// wrap this call
// into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT));
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
static void cleanUp() throws Exception {
testNode.close();
}
@Override
public Client client() {
return client;
}
@Override
protected boolean hasWriteVersion() {
return false;
}
}

View File

@ -0,0 +1,109 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_0;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchTransportClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Elasticsearch5TransportClientTest extends AbstractElasticsearchTransportClientTest {
private static final Logger logger =
LoggerFactory.getLogger(Elasticsearch5TransportClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private static Node testNode;
private static TransportAddress tcpPublishAddress;
private static TransportClient client;
@BeforeAll
static void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "local")
.build();
testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
startNode(testNode);
tcpPublishAddress =
testNode.injector().getInstance(TransportService.class).boundAddress().publishAddress();
client =
new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing
// deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build());
client.addTransportAddress(tcpPublishAddress);
testing.runWithSpan(
"setup",
() ->
// this may potentially create multiple requests and therefore multiple spans, so we
// wrap this call
// into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT));
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
static void cleanUp() throws Exception {
testNode.close();
}
@Override
protected TransportClient client() {
return client;
}
@Override
protected String getAddress() {
return tcpPublishAddress.getAddress();
}
@Override
protected int getPort() {
return tcpPublishAddress.getPort();
}
@Override
protected boolean hasWriteVersion() {
return false;
}
}

View File

@ -1,313 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.client.Client
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.env.Environment
import org.elasticsearch.http.BindHttpException
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.InternalSettingsPreparer
import org.elasticsearch.node.Node
import org.elasticsearch.transport.BindTransportException
import org.elasticsearch.transport.Netty3Plugin
import spock.lang.Shared
import spock.lang.Unroll
import java.util.concurrent.TimeUnit
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.awaitility.Awaitility.await
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch53NodeClientTest extends AbstractElasticsearchNodeClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
Client client
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build()
testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(10, TimeUnit.SECONDS)
.ignoreExceptionsMatching({
BindHttpException.isInstance(it) || BindTransportException.isInstance(it)
})
.until({
testNode.start()
true
})
client = testNode.client()
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
client.admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
}
ignoreTracesAndClear(1)
}
def cleanupSpec() {
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
Client client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
childOf(span(0))
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
@Unroll
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, "no such index"
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
kind CLIENT
status ERROR
childOf(span(0))
errorEvent IndexNotFoundException, "no such index"
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.acknowledged
when:
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(5) {
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "ClusterHealthAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
}
trace(2, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(4, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -1,339 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.NetworkAttributes
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.TransportAddress
import org.elasticsearch.env.Environment
import org.elasticsearch.http.BindHttpException
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.InternalSettingsPreparer
import org.elasticsearch.node.Node
import org.elasticsearch.transport.BindTransportException
import org.elasticsearch.transport.Netty3Plugin
import org.elasticsearch.transport.RemoteTransportException
import org.elasticsearch.transport.TransportService
import org.elasticsearch.transport.client.PreBuiltTransportClient
import spock.lang.Shared
import spock.lang.Unroll
import java.util.concurrent.TimeUnit
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.awaitility.Awaitility.await
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch53TransportClientTest extends AbstractElasticsearchTransportClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
TransportAddress tcpPublishAddress
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
TransportClient client
@Shared
String clusterName = UUID.randomUUID().toString()
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build()
testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(10, TimeUnit.SECONDS)
.ignoreExceptionsMatching({
BindHttpException.isInstance(it) || BindTransportException.isInstance(it)
})
.until({
testNode.start()
true
})
tcpPublishAddress = testNode.injector().getInstance(TransportService).boundAddress().publishAddress()
client = new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build()
)
client.addTransportAddress(tcpPublishAddress)
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
client.admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
}
ignoreTracesAndClear(1)
}
def cleanupSpec() {
client?.close()
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
TransportClient client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
childOf(span(0))
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, "no such index"
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
kind CLIENT
status ERROR
errorEvent RemoteTransportException, String
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.acknowledged
when:
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(5) {
// PutMappingAction and IndexAction run in separate threads so their order can vary
traces.subList(2, 4).sort(orderByRootSpanName("PutMappingAction", "IndexAction"))
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(2, 1) {
span(0) {
name "PutMappingAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "PutMappingAction"
"elasticsearch.action" "PutMappingAction"
"elasticsearch.request" "PutMappingRequest"
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(4, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -1,70 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package springdata
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.env.Environment
import org.elasticsearch.node.InternalSettingsPreparer
import org.elasticsearch.node.Node
import org.elasticsearch.transport.Netty3Plugin
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.ComponentScan
import org.springframework.context.annotation.Configuration
import org.springframework.data.elasticsearch.core.ElasticsearchOperations
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate
import org.springframework.data.elasticsearch.repository.config.EnableElasticsearchRepositories
@Configuration
@EnableElasticsearchRepositories(basePackages = "springdata")
@ComponentScan(basePackages = "springdata")
class Config {
@Bean
NodeBuilder nodeBuilder() {
return new NodeBuilder()
}
@Bean
Node elasticSearchNode() {
def tmpDir = File.createTempFile("test-es-working-dir-", "")
tmpDir.delete()
tmpDir.mkdir()
tmpDir.deleteOnExit()
System.addShutdownHook {
if (tmpDir != null) {
FileSystemUtils.deleteSubDirectories(tmpDir.toPath())
tmpDir.delete()
}
}
def settings = Settings.builder()
.put("http.enabled", "false")
.put("path.data", tmpDir.toString())
.put("path.home", tmpDir.toString())
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put("discovery.type", "single-node")
.build()
println "ES work dir: $tmpDir"
def testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
testNode.start()
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
testNode.client().admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
return testNode
}
@Bean
ElasticsearchOperations elasticsearchTemplate(Node node) {
return new ElasticsearchTemplate(node.client())
}
}

View File

@ -1,34 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package springdata
import groovy.transform.EqualsAndHashCode
import org.springframework.data.annotation.Id
import org.springframework.data.elasticsearch.annotations.Document
@Document(indexName = "test-index")
@EqualsAndHashCode
class Doc {
@Id
private String id = "1"
private String data = "some data"
String getId() {
return id
}
void setId(String id) {
this.id = id
}
String getData() {
return data
}
void setData(String data) {
this.data = data
}
}

View File

@ -1,364 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package springdata
import io.opentelemetry.instrumentation.test.AgentInstrumentationSpecification
import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.junit.jupiter.api.Assumptions
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import spock.lang.Shared
import spock.util.environment.Jvm
import java.lang.reflect.InvocationHandler
import java.lang.reflect.Method
import java.lang.reflect.Proxy
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
class Elasticsearch53SpringRepositoryTest extends AgentInstrumentationSpecification {
// Setting up appContext & repo with @Shared doesn't allow
// spring-data instrumentation to applied.
// To change the timing without adding ugly checks everywhere -
// use a dynamic proxy. There's probably a more "groovy" way to do this.
@Shared
LazyProxyInvoker lazyProxyInvoker = new LazyProxyInvoker()
@Shared
DocRepository repo = Proxy.newProxyInstance(
getClass().getClassLoader(),
[DocRepository] as Class[],
lazyProxyInvoker)
static class LazyProxyInvoker implements InvocationHandler {
def repo
def applicationContext
DocRepository getOrCreateRepository() {
if (repo != null) {
return repo
}
applicationContext = new AnnotationConfigApplicationContext(Config)
repo = applicationContext.getBean(DocRepository)
return repo
}
void close() {
applicationContext?.close()
}
@Override
Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
return method.invoke(getOrCreateRepository(), args)
}
}
def setup() {
// when running on jdk 21 this test occasionally fails with timeout
Assumptions.assumeTrue(Boolean.getBoolean("testLatestDeps") || !Jvm.getCurrent().isJava21Compatible())
repo.refresh()
clearExportedData()
runWithSpan("delete") {
repo.deleteAll()
}
ignoreTracesAndClear(1)
}
def cleanupSpec() {
lazyProxyInvoker.close()
}
def "test empty repo"() {
when:
def result = repo.findAll()
then:
!result.iterator().hasNext()
and:
assertTraces(1) {
trace(0, 2) {
span(0) {
name "DocRepository.findAll"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "findAll"
}
}
span(1) {
name "SearchAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "SearchAction"
"elasticsearch.action" "SearchAction"
"elasticsearch.request" "SearchRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.search.types" "doc"
}
}
}
}
where:
indexName = "test-index"
}
def "test CRUD"() {
when:
def doc = new Doc()
then:
repo.index(doc) == doc
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "DocRepository.index"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "index"
}
}
span(1) {
name "IndexAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" "doc"
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.failed" 0
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.total" 2
}
}
span(2) {
name "RefreshAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "RefreshAction"
"elasticsearch.action" "RefreshAction"
"elasticsearch.request" "RefreshRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.shard.broadcast.failed" 0
"elasticsearch.shard.broadcast.successful" 5
"elasticsearch.shard.broadcast.total" 10
}
}
}
}
clearExportedData()
and:
repo.findById("1").get() == doc
and:
assertTraces(1) {
trace(0, 2) {
span(0) {
name "DocRepository.findById"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "findById"
}
}
span(1) {
name "GetAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" "doc"
"elasticsearch.id" "1"
"elasticsearch.version" Number
}
}
}
}
clearExportedData()
when:
doc.data = "other data"
then:
repo.index(doc) == doc
repo.findById("1").get() == doc
and:
assertTraces(2) {
trace(0, 3) {
span(0) {
name "DocRepository.index"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "index"
}
}
span(1) {
name "IndexAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" "doc"
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 200
"elasticsearch.shard.replication.failed" 0
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.total" 2
}
}
span(2) {
name "RefreshAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "RefreshAction"
"elasticsearch.action" "RefreshAction"
"elasticsearch.request" "RefreshRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.shard.broadcast.failed" 0
"elasticsearch.shard.broadcast.successful" 5
"elasticsearch.shard.broadcast.total" 10
}
}
}
trace(1, 2) {
span(0) {
name "DocRepository.findById"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "findById"
}
}
span(1) {
name "GetAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" "doc"
"elasticsearch.id" "1"
"elasticsearch.version" Number
}
}
}
}
clearExportedData()
when:
repo.deleteById("1")
then:
!repo.findAll().iterator().hasNext()
and:
assertTraces(2) {
trace(0, 3) {
span(0) {
name "DocRepository.deleteById"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "deleteById"
}
}
span(1) {
name "DeleteAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "DeleteAction"
"elasticsearch.action" "DeleteAction"
"elasticsearch.request" "DeleteRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" "doc"
"elasticsearch.request.write.version"(-3)
"elasticsearch.shard.replication.failed" 0
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.total" 2
}
}
span(2) {
name "RefreshAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "RefreshAction"
"elasticsearch.action" "RefreshAction"
"elasticsearch.request" "RefreshRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.shard.broadcast.failed" 0
"elasticsearch.shard.broadcast.successful" 5
"elasticsearch.shard.broadcast.total" 10
}
}
}
trace(1, 2) {
span(0) {
name "DocRepository.findAll"
kind INTERNAL
attributes {
"$CodeIncubatingAttributes.CODE_NAMESPACE" DocRepository.name
"$CodeIncubatingAttributes.CODE_FUNCTION" "findAll"
}
}
span(1) {
name "SearchAction"
kind CLIENT
childOf span(0)
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "SearchAction"
"elasticsearch.action" "SearchAction"
"elasticsearch.request" "SearchRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.search.types" "doc"
}
}
}
}
where:
indexName = "test-index"
}
}

View File

@ -1,322 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package springdata
import io.opentelemetry.instrumentation.test.AgentInstrumentationSpecification
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.action.search.SearchResponse
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.env.Environment
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.InternalSettingsPreparer
import org.elasticsearch.node.Node
import org.elasticsearch.search.aggregations.bucket.nested.InternalNested
import org.elasticsearch.search.aggregations.bucket.terms.Terms
import org.elasticsearch.transport.Netty3Plugin
import org.junit.jupiter.api.Assumptions
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate
import org.springframework.data.elasticsearch.core.ResultsExtractor
import org.springframework.data.elasticsearch.core.query.IndexQueryBuilder
import org.springframework.data.elasticsearch.core.query.NativeSearchQuery
import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder
import spock.lang.Shared
import spock.util.environment.Jvm
import java.util.concurrent.atomic.AtomicLong
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch53SpringTemplateTest extends AgentInstrumentationSpecification {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
ElasticsearchTemplate template
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build()
testNode = new Node(new Environment(InternalSettingsPreparer.prepareSettings(settings)), [Netty3Plugin])
testNode.start()
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
testNode.client().admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
testNode.client().admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
}
waitForTraces(1)
template = new ElasticsearchTemplate(testNode.client())
}
def cleanupSpec() {
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
def setup() {
// when running on jdk 21 this test occasionally fails with timeout
Assumptions.assumeTrue(Boolean.getBoolean("testLatestDeps") || !Jvm.getCurrent().isJava21Compatible())
}
def "test elasticsearch error"() {
when:
template.refresh(indexName)
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 1) {
span(0) {
name "RefreshAction"
kind CLIENT
status ERROR
errorEvent IndexNotFoundException, "no such index"
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "RefreshAction"
"elasticsearch.action" "RefreshAction"
"elasticsearch.request" "RefreshRequest"
"elasticsearch.request.indices" indexName
}
}
}
}
where:
indexName = "invalid-index"
}
def "test elasticsearch get"() {
expect:
template.createIndex(indexName)
template.getClient().admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
when:
NativeSearchQuery query = new NativeSearchQueryBuilder()
.withIndices(indexName)
.withTypes(indexType)
.withIds([id])
.build()
then:
template.queryForIds(query) == []
when:
def result = template.index(IndexQueryBuilder.newInstance()
.withObject(new Doc())
.withIndexName(indexName)
.withType(indexType)
.withId(id)
.build())
template.refresh(Doc)
then:
result == id
template.queryForList(query, Doc) == [new Doc()]
and:
assertTraces(6) {
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "ClusterHealthAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
}
trace(2, 1) {
span(0) {
name "SearchAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "SearchAction"
"elasticsearch.action" "SearchAction"
"elasticsearch.request" "SearchRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.search.types" indexType
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.failed" 0
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.total" 2
}
}
}
trace(4, 1) {
span(0) {
name "RefreshAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "RefreshAction"
"elasticsearch.action" "RefreshAction"
"elasticsearch.request" "RefreshRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.shard.broadcast.failed" 0
"elasticsearch.shard.broadcast.successful" 5
"elasticsearch.shard.broadcast.total" 10
}
}
}
trace(5, 1) {
span(0) {
name "SearchAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "SearchAction"
"elasticsearch.action" "SearchAction"
"elasticsearch.request" "SearchRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.search.types" indexType
}
}
}
}
cleanup:
template.deleteIndex(indexName)
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
def "test results extractor"() {
setup:
template.createIndex(indexName)
testNode.client().admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
template.index(IndexQueryBuilder.newInstance()
.withObject(new Doc(id: 1, data: "doc a"))
.withIndexName(indexName)
.withId("a")
.build())
template.index(IndexQueryBuilder.newInstance()
.withObject(new Doc(id: 2, data: "doc b"))
.withIndexName(indexName)
.withId("b")
.build())
template.refresh(indexName)
ignoreTracesAndClear(5)
and:
def query = new NativeSearchQueryBuilder().withIndices(indexName).build()
def hits = new AtomicLong()
List<Map<String, Object>> results = []
def bucketTags = [:]
when:
template.query(query, new ResultsExtractor<Doc>() {
@Override
Doc extract(SearchResponse response) {
hits.addAndGet(response.getHits().totalHits())
results.addAll(response.hits.collect { it.source })
if (response.getAggregations() != null) {
InternalNested internalNested = response.getAggregations().get("tag")
if (internalNested != null) {
Terms terms = internalNested.getAggregations().get("count_agg")
Collection<Terms.Bucket> buckets = terms.getBuckets()
for (Terms.Bucket bucket : buckets) {
bucketTags.put(Integer.valueOf(bucket.getKeyAsString()), bucket.getDocCount())
}
}
}
return null
}
})
then:
hits.get() == 2
results[0] == [id: "2", data: "doc b"]
results[1] == [id: "1", data: "doc a"]
bucketTags == [:]
assertTraces(1) {
trace(0, 1) {
span(0) {
name "SearchAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "SearchAction"
"elasticsearch.action" "SearchAction"
"elasticsearch.request" "SearchRequest"
"elasticsearch.request.indices" indexName
}
}
}
}
cleanup:
template.deleteIndex(indexName)
where:
indexName = "test-index-extract"
}
}

View File

@ -0,0 +1,92 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchNodeClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty3Plugin;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Elasticsearch53NodeClientTest extends AbstractElasticsearchNodeClientTest {
private static final Logger logger = LoggerFactory.getLogger(Elasticsearch53NodeClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private static Node testNode;
private static Client client;
@BeforeAll
static void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
// Since we use listeners to close spans this should make our span closing deterministic
// which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build();
testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
startNode(testNode);
client = testNode.client();
testing.runWithSpan(
"setup",
() -> {
// this may potentially create multiple requests and therefore multiple spans, so we wrap
// this call into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests
// don't expect
client
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
});
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
static void cleanUp() throws Exception {
testNode.close();
}
@Override
public Client client() {
return client;
}
}

View File

@ -0,0 +1,115 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchTransportClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class Elasticsearch53TransportClientTest extends AbstractElasticsearchTransportClientTest {
private static final Logger logger =
LoggerFactory.getLogger(Elasticsearch53TransportClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private static Node testNode;
private static TransportAddress tcpPublishAddress;
private static TransportClient client;
@BeforeAll
static void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build();
testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
startNode(testNode);
tcpPublishAddress =
testNode.injector().getInstance(TransportService.class).boundAddress().publishAddress();
client =
new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing
// deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build());
client.addTransportAddress(tcpPublishAddress);
testing.runWithSpan(
"setup",
() -> {
// this may potentially create multiple requests and therefore multiple spans, so we wrap
// this call into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests
// don't expect
client
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
});
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
static void cleanUp() throws Exception {
testNode.close();
}
@Override
protected TransportClient client() {
return client;
}
@Override
protected String getAddress() {
return tcpPublishAddress.getAddress();
}
@Override
protected int getPort() {
return tcpPublishAddress.getPort();
}
}

View File

@ -0,0 +1,83 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata;
import java.io.File;
import java.util.Collections;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty3Plugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.elasticsearch.core.ElasticsearchOperations;
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate;
import org.springframework.data.elasticsearch.repository.config.EnableElasticsearchRepositories;
@Configuration
@EnableElasticsearchRepositories(
basePackages =
"io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata")
@ComponentScan(
basePackages =
"io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata")
class Config {
private static final Logger logger = LoggerFactory.getLogger(Config.class);
static File esWorkingDir;
@Bean
Node elasticSearchNode() throws Exception {
if (esWorkingDir == null) {
throw new IllegalStateException("elasticsearch working directory not set");
}
if (!esWorkingDir.exists()) {
throw new IllegalStateException("elasticsearch working directory does not exist");
}
Settings settings =
Settings.builder()
.put("http.enabled", "false")
.put("path.data", esWorkingDir.toString())
.put("path.home", esWorkingDir.toString())
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put("discovery.type", "single-node")
.build();
logger.info("ES work dir: {}", esWorkingDir);
Node testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
testNode.start();
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't
// expect
testNode
.client()
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
return testNode;
}
@Bean
ElasticsearchOperations elasticsearchTemplate(Node node) {
return new ElasticsearchTemplate(node.client());
}
}

View File

@ -0,0 +1,60 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata;
import java.util.Objects;
import org.springframework.data.annotation.Id;
import org.springframework.data.elasticsearch.annotations.Document;
@Document(indexName = "test-index")
class Doc {
@Id private String id = "1";
private String data = "some data";
public Doc() {}
public Doc(int id, String data) {
this(String.valueOf(id), data);
}
public Doc(String id, String data) {
this.id = id;
this.data = data;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (!(object instanceof Doc)) {
return false;
}
Doc doc = (Doc) object;
return Objects.equals(id, doc.id) && Objects.equals(data, doc.data);
}
@Override
public int hashCode() {
return Objects.hash(id, data);
}
}

View File

@ -3,8 +3,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
package springdata
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata;
import org.springframework.data.elasticsearch.repository.ElasticsearchRepository
import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;
interface DocRepository extends ElasticsearchRepository<Doc, String> {}

View File

@ -0,0 +1,333 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.testing.internal.AutoCleanupExtension;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes;
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes;
import java.io.File;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.api.io.TempDir;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import spock.util.environment.Jvm;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class Elasticsearch53SpringRepositoryTest {
@RegisterExtension
private static final InstrumentationExtension testing = AgentInstrumentationExtension.create();
@RegisterExtension static final AutoCleanupExtension autoCleanup = AutoCleanupExtension.create();
@BeforeAll
void setUp(@TempDir File esWorkingDir) {
Config.esWorkingDir = esWorkingDir;
}
private static DocRepository repository() {
// when running on jdk 21 this test occasionally fails with timeout
Assumptions.assumeTrue(
Boolean.getBoolean("testLatestDeps") || !Jvm.getCurrent().isJava21Compatible());
DocRepository result =
testing.runWithSpan(
"setup",
() -> {
AnnotationConfigApplicationContext context =
new AnnotationConfigApplicationContext(Config.class);
autoCleanup.deferCleanup(context);
DocRepository repo = context.getBean(DocRepository.class);
repo.deleteAll();
return repo;
});
testing.waitForTraces(1);
testing.clearData();
return result;
}
@Test
void emptyRepository() {
Iterable<Doc> result = repository().findAll();
assertThat(result.iterator().hasNext()).isFalse();
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.findAll")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "findAll")),
span ->
span.hasName("SearchAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "SearchAction"),
equalTo(stringKey("elasticsearch.action"), "SearchAction"),
equalTo(stringKey("elasticsearch.request"), "SearchRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.request.search.types"), "doc"))));
}
@Test
void crud() {
DocRepository repository = repository();
Doc doc = new Doc();
assertThat(repository.index(doc)).isEqualTo(doc);
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.index")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "index")),
span ->
span.hasName("IndexAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "IndexAction"),
equalTo(stringKey("elasticsearch.action"), "IndexAction"),
equalTo(stringKey("elasticsearch.request"), "IndexRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.request.write.type"), "doc"),
equalTo(longKey("elasticsearch.request.write.version"), -3),
equalTo(longKey("elasticsearch.response.status"), 201),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.total"), 2)),
span ->
span.hasName("RefreshAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "RefreshAction"),
equalTo(stringKey("elasticsearch.action"), "RefreshAction"),
equalTo(stringKey("elasticsearch.request"), "RefreshRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(longKey("elasticsearch.shard.broadcast.failed"), 0),
equalTo(longKey("elasticsearch.shard.broadcast.successful"), 5),
equalTo(longKey("elasticsearch.shard.broadcast.total"), 10))));
testing.clearData();
assertThat(repository.findById("1").get()).isEqualTo(doc);
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.findById")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "findById")),
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(stringKey("elasticsearch.action"), "GetAction"),
equalTo(stringKey("elasticsearch.request"), "GetRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.type"), "doc"),
equalTo(stringKey("elasticsearch.id"), "1"),
equalTo(longKey("elasticsearch.version"), 1))));
testing.clearData();
doc.setData("other data");
assertThat(repository.index(doc)).isEqualTo(doc);
assertThat(repository.findById("1").get()).isEqualTo(doc);
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.index")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "index")),
span ->
span.hasName("IndexAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "IndexAction"),
equalTo(stringKey("elasticsearch.action"), "IndexAction"),
equalTo(stringKey("elasticsearch.request"), "IndexRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.request.write.type"), "doc"),
equalTo(longKey("elasticsearch.request.write.version"), -3),
equalTo(longKey("elasticsearch.response.status"), 200),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.total"), 2)),
span ->
span.hasName("RefreshAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "RefreshAction"),
equalTo(stringKey("elasticsearch.action"), "RefreshAction"),
equalTo(stringKey("elasticsearch.request"), "RefreshRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(longKey("elasticsearch.shard.broadcast.failed"), 0),
equalTo(longKey("elasticsearch.shard.broadcast.successful"), 5),
equalTo(longKey("elasticsearch.shard.broadcast.total"), 10))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.findById")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "findById")),
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(stringKey("elasticsearch.action"), "GetAction"),
equalTo(stringKey("elasticsearch.request"), "GetRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.type"), "doc"),
equalTo(stringKey("elasticsearch.id"), "1"),
equalTo(longKey("elasticsearch.version"), 2))));
testing.clearData();
repository.deleteById("1");
assertThat(repository.findAll().iterator().hasNext()).isFalse();
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.deleteById")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "deleteById")),
span ->
span.hasName("DeleteAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "DeleteAction"),
equalTo(stringKey("elasticsearch.action"), "DeleteAction"),
equalTo(stringKey("elasticsearch.request"), "DeleteRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.request.write.type"), "doc"),
equalTo(longKey("elasticsearch.request.write.version"), -3),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.total"), 2)),
span ->
span.hasName("RefreshAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "RefreshAction"),
equalTo(stringKey("elasticsearch.action"), "RefreshAction"),
equalTo(stringKey("elasticsearch.request"), "RefreshRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(longKey("elasticsearch.shard.broadcast.failed"), 0),
equalTo(longKey("elasticsearch.shard.broadcast.successful"), 5),
equalTo(longKey("elasticsearch.shard.broadcast.total"), 10))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("DocRepository.findAll")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
CodeIncubatingAttributes.CODE_NAMESPACE,
DocRepository.class.getName()),
equalTo(CodeIncubatingAttributes.CODE_FUNCTION, "findAll")),
span ->
span.hasName("SearchAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "SearchAction"),
equalTo(stringKey("elasticsearch.action"), "SearchAction"),
equalTo(stringKey("elasticsearch.request"), "SearchRequest"),
equalTo(stringKey("elasticsearch.request.indices"), "test-index"),
equalTo(stringKey("elasticsearch.request.search.types"), "doc"))));
}
}

View File

@ -0,0 +1,398 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v5_3.springdata;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.awaitility.Awaitility.await;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.testing.internal.AutoCleanupExtension;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes;
import java.io.File;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.bucket.nested.InternalNested;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.Netty3Plugin;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate;
import org.springframework.data.elasticsearch.core.ResultsExtractor;
import org.springframework.data.elasticsearch.core.query.IndexQueryBuilder;
import org.springframework.data.elasticsearch.core.query.NativeSearchQuery;
import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder;
import org.testcontainers.shaded.com.google.common.collect.ImmutableMap;
import spock.util.environment.Jvm;
class Elasticsearch53SpringTemplateTest {
private static final Logger logger =
LoggerFactory.getLogger(Elasticsearch53SpringTemplateTest.class);
private static final long TIMEOUT = TimeUnit.SECONDS.toMillis(10);
@RegisterExtension
private static final InstrumentationExtension testing = AgentInstrumentationExtension.create();
@RegisterExtension static final AutoCleanupExtension autoCleanup = AutoCleanupExtension.create();
private static final String clusterName = UUID.randomUUID().toString();
private static Node testNode;
private static ElasticsearchTemplate template;
@BeforeAll
static void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
// Since we use listeners to close spans this should make our span closing deterministic
// which is good for tests
.put("thread_pool.listener.size", 1)
.put("transport.type", "netty3")
.put("http.type", "netty3")
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build();
testNode =
new Node(
new Environment(InternalSettingsPreparer.prepareSettings(settings)),
Collections.singletonList(Netty3Plugin.class)) {};
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(Duration.ofSeconds(10))
.ignoreExceptionsMatching(
it -> it instanceof BindHttpException || it instanceof BindTransportException)
.until(
() -> {
testNode.start();
return true;
});
Client client = testNode.client();
testing.runWithSpan(
"setup",
() -> {
// this may potentially create multiple requests and therefore multiple spans, so we wrap
// this call into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests
// don't expect
client
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
});
testing.waitForTraces(1);
testing.clearData();
template = new ElasticsearchTemplate(client);
}
@AfterAll
static void cleanUp() throws Exception {
testNode.close();
}
@BeforeEach
void prepareTest() {
// when running on jdk 21 this test occasionally fails with timeout
Assumptions.assumeTrue(
Boolean.getBoolean("testLatestDeps") || !Jvm.getCurrent().isJava21Compatible());
}
@Test
void elasticsearchError() {
String indexName = "invalid-index";
assertThatThrownBy(() -> template.refresh(indexName))
.isInstanceOf(IndexNotFoundException.class);
IndexNotFoundException expectedException = new IndexNotFoundException("no such index");
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("RefreshAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasStatus(StatusData.error())
.hasException(expectedException)
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "RefreshAction"),
equalTo(stringKey("elasticsearch.action"), "RefreshAction"),
equalTo(stringKey("elasticsearch.request"), "RefreshRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName))));
}
@Test
void elasticsearchGet() {
String indexName = "test-index";
String indexType = "test-type";
String id = "1";
template.createIndex(indexName);
autoCleanup.deferCleanup(() -> template.deleteIndex(indexName));
template
.getClient()
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
NativeSearchQuery query =
new NativeSearchQueryBuilder()
.withIndices(indexName)
.withTypes(indexType)
.withIds(Collections.singleton(id))
.build();
assertThat(template.queryForIds(query)).isEmpty();
String result =
template.index(
new IndexQueryBuilder()
.withObject(new Doc())
.withIndexName(indexName)
.withType(indexType)
.withId(id)
.build());
template.refresh(Doc.class);
assertThat(result).isEqualTo(id);
assertThat(template.queryForList(query, Doc.class))
.satisfiesExactly(doc -> assertThat(doc).isEqualTo(new Doc()));
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("CreateIndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "CreateIndexAction"),
equalTo(stringKey("elasticsearch.action"), "CreateIndexAction"),
equalTo(stringKey("elasticsearch.request"), "CreateIndexRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("ClusterHealthAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "ClusterHealthAction"),
equalTo(stringKey("elasticsearch.action"), "ClusterHealthAction"),
equalTo(stringKey("elasticsearch.request"), "ClusterHealthRequest"))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("SearchAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "SearchAction"),
equalTo(stringKey("elasticsearch.action"), "SearchAction"),
equalTo(stringKey("elasticsearch.request"), "SearchRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName),
equalTo(stringKey("elasticsearch.request.search.types"), indexType))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("IndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "IndexAction"),
equalTo(stringKey("elasticsearch.action"), "IndexAction"),
equalTo(stringKey("elasticsearch.request"), "IndexRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName),
equalTo(stringKey("elasticsearch.request.write.type"), indexType),
equalTo(longKey("elasticsearch.request.write.version"), -3),
equalTo(longKey("elasticsearch.response.status"), 201),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.total"), 2))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("RefreshAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "RefreshAction"),
equalTo(stringKey("elasticsearch.action"), "RefreshAction"),
equalTo(stringKey("elasticsearch.request"), "RefreshRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName),
equalTo(longKey("elasticsearch.shard.broadcast.failed"), 0),
equalTo(longKey("elasticsearch.shard.broadcast.successful"), 5),
equalTo(longKey("elasticsearch.shard.broadcast.total"), 10))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("SearchAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "SearchAction"),
equalTo(stringKey("elasticsearch.action"), "SearchAction"),
equalTo(stringKey("elasticsearch.request"), "SearchRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName),
equalTo(stringKey("elasticsearch.request.search.types"), indexType))));
}
@Test
void resultsExtractor() {
String indexName = "test-index-extract";
testing.runWithSpan(
"setup",
() -> {
template.createIndex(indexName);
autoCleanup.deferCleanup(() -> template.deleteIndex(indexName));
testNode
.client()
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
template.index(
new IndexQueryBuilder()
.withObject(new Doc(1, "doc a"))
.withIndexName(indexName)
.withId("a")
.build());
template.index(
new IndexQueryBuilder()
.withObject(new Doc(2, "doc b"))
.withIndexName(indexName)
.withId("b")
.build());
template.refresh(indexName);
});
testing.waitForTraces(1);
testing.clearData();
NativeSearchQuery query = new NativeSearchQueryBuilder().withIndices(indexName).build();
AtomicLong hits = new AtomicLong();
List<Map<String, Object>> results = new ArrayList<>();
Map<Integer, Long> bucketTags = new HashMap<>();
template.query(
query,
(ResultsExtractor<Doc>)
response -> {
hits.addAndGet(response.getHits().getTotalHits());
results.addAll(
StreamSupport.stream(response.getHits().spliterator(), false)
.map(SearchHit::getSource)
.collect(Collectors.toList()));
if (response.getAggregations() != null) {
InternalNested internalNested = response.getAggregations().get("tag");
if (internalNested != null) {
Terms terms = internalNested.getAggregations().get("count_agg");
List<? extends Terms.Bucket> buckets = terms.getBuckets();
for (Terms.Bucket bucket : buckets) {
bucketTags.put(Integer.valueOf(bucket.getKeyAsString()), bucket.getDocCount());
}
}
}
return null;
});
assertThat(hits.get()).isEqualTo(2);
assertThat(results.get(0)).isEqualTo(ImmutableMap.of("id", "2", "data", "doc b"));
assertThat(results.get(1)).isEqualTo(ImmutableMap.of("id", "1", "data", "doc a"));
assertThat(bucketTags).isEmpty();
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("SearchAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "SearchAction"),
equalTo(stringKey("elasticsearch.action"), "SearchAction"),
equalTo(stringKey("elasticsearch.request"), "SearchRequest"),
equalTo(stringKey("elasticsearch.request.indices"), indexName))));
}
}

View File

@ -40,12 +40,68 @@ dependencies {
testLibrary("org.elasticsearch.plugin:transport-netty4-client:6.0.0")
testImplementation(project(":instrumentation:elasticsearch:elasticsearch-transport-6.0:testing"))
testImplementation(project(":instrumentation:elasticsearch:elasticsearch-transport-common:testing"))
testImplementation("org.apache.logging.log4j:log4j-core:2.11.0")
testImplementation("org.apache.logging.log4j:log4j-api:2.11.0")
}
tasks.withType<Test>().configureEach {
// TODO run tests both with and without experimental span attributes
jvmArgs("-Dotel.instrumentation.elasticsearch.experimental-span-attributes=true")
val latestDepTest = findProperty("testLatestDeps") as Boolean
testing {
suites {
val elasticsearch6Test by registering(JvmTestSuite::class) {
dependencies {
if (latestDepTest) {
implementation("org.elasticsearch.client:transport:6.4.+")
implementation("org.elasticsearch.plugin:transport-netty4-client:6.4.+")
} else {
implementation("org.elasticsearch.client:transport:6.0.0")
implementation("org.elasticsearch.plugin:transport-netty4-client:6.0.0")
}
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-6.0:testing"))
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-common:testing"))
}
}
val elasticsearch65Test by registering(JvmTestSuite::class) {
dependencies {
if (latestDepTest) {
implementation("org.elasticsearch.client:transport:6.+")
implementation("org.elasticsearch.plugin:transport-netty4-client:6.+")
} else {
implementation("org.elasticsearch.client:transport:6.5.0")
implementation("org.elasticsearch.plugin:transport-netty4-client:6.5.0")
}
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-6.0:testing"))
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-common:testing"))
}
}
val elasticsearch7Test by registering(JvmTestSuite::class) {
dependencies {
if (latestDepTest) {
implementation("org.elasticsearch.client:transport:+")
implementation("org.elasticsearch.plugin:transport-netty4-client:+")
} else {
implementation("org.elasticsearch.client:transport:7.0.0")
implementation("org.elasticsearch.plugin:transport-netty4-client:7.0.0")
}
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-6.0:testing"))
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-common:testing"))
}
}
}
}
tasks {
withType<Test>().configureEach {
systemProperty("testLatestDeps", findProperty("testLatestDeps") as Boolean)
// TODO run tests both with and without experimental span attributes
jvmArgs("-Dotel.instrumentation.elasticsearch.experimental-span-attributes=true")
}
check {
dependsOn(testing.suites)
}
}

View File

@ -0,0 +1,17 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_5;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.AbstractElasticsearch6NodeClientTest;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
class Elasticsearch65NodeClientTest extends AbstractElasticsearch6NodeClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch65NodeFactory();
}
}

View File

@ -0,0 +1,27 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_5;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
import java.util.Collections;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty4Plugin;
class Elasticsearch65NodeFactory implements NodeFactory {
@Override
public Node newNode(Settings settings) {
return new Node(
InternalSettingsPreparer.prepareEnvironment(settings, null),
Collections.singleton(Netty4Plugin.class),
true) {
@Override
protected void registerDerivedNodeNameWithLogger(String s) {}
};
}
}

View File

@ -0,0 +1,17 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_5;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.AbstractElasticsearch6TransportClientTest;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
class Elasticsearch65TransportClientTest extends AbstractElasticsearch6TransportClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch65NodeFactory();
}
}

View File

@ -0,0 +1,14 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
class Elasticsearch6NodeClientTest extends AbstractElasticsearch6NodeClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch6NodeFactory();
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
import java.util.Collections;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty4Plugin;
class Elasticsearch6NodeFactory implements NodeFactory {
@Override
public Node newNode(Settings settings) {
return new Node(
InternalSettingsPreparer.prepareEnvironment(settings, null),
Collections.singleton(Netty4Plugin.class)) {
protected void registerDerivedNodeNameWithLogger(String s) {}
};
}
}

View File

@ -0,0 +1,14 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
class Elasticsearch6TransportClientTest extends AbstractElasticsearch6TransportClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch6NodeFactory();
}
}

View File

@ -0,0 +1,22 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v7_0;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.AbstractElasticsearch6NodeClientTest;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
class Elasticsearch7NodeClientTest extends AbstractElasticsearch6NodeClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch7NodeFactory();
}
@Override
protected String getIndexNotFoundMessage() {
return "invalid-index";
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v7_0;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
import java.util.Collections;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.Netty4Plugin;
class Elasticsearch7NodeFactory implements NodeFactory {
@Override
public Node newNode(Settings settings) {
return new Node(
InternalSettingsPreparer.prepareEnvironment(
settings, Collections.emptyMap(), null, () -> "default node name"),
Collections.singleton(Netty4Plugin.class),
true) {};
}
}

View File

@ -0,0 +1,27 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v7_0;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.AbstractElasticsearch6TransportClientTest;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0.NodeFactory;
class Elasticsearch7TransportClientTest extends AbstractElasticsearch6TransportClientTest {
@Override
protected NodeFactory getNodeFactory() {
return new Elasticsearch7NodeFactory();
}
@Override
protected String getIndexNotFoundMessage() {
return "invalid-index";
}
@Override
protected String getPutMappingActionName() {
return Boolean.getBoolean("testLatestDeps") ? "AutoPutMappingAction" : "PutMappingAction";
}
}

View File

@ -5,6 +5,7 @@
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.ElasticTransportRequest;
@ -14,6 +15,13 @@ import org.elasticsearch.action.DocWriteRequest;
public class Elasticsearch6TransportExperimentalAttributesExtractor
extends ElasticsearchTransportExperimentalAttributesExtractor {
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_WRITE_TYPE =
AttributeKey.stringKey("elasticsearch.request.write.type");
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_WRITE_ROUTING =
AttributeKey.stringKey("elasticsearch.request.write.routing");
private static final AttributeKey<Long> ELASTICSEARCH_REQUEST_WRITE_VERSION =
AttributeKey.longKey("elasticsearch.request.write.version");
@Override
public void onStart(
AttributesBuilder attributes,
@ -24,9 +32,9 @@ public class Elasticsearch6TransportExperimentalAttributesExtractor
Object request = transportRequest.getRequest();
if (request instanceof DocWriteRequest) {
DocWriteRequest<?> req = (DocWriteRequest<?>) request;
attributes.put("elasticsearch.request.write.type", req.type());
attributes.put("elasticsearch.request.write.routing", req.routing());
attributes.put("elasticsearch.request.write.version", req.version());
attributes.put(ELASTICSEARCH_REQUEST_WRITE_TYPE, req.type());
attributes.put(ELASTICSEARCH_REQUEST_WRITE_ROUTING, req.routing());
attributes.put(ELASTICSEARCH_REQUEST_WRITE_VERSION, req.version());
}
}
}

View File

@ -1,275 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.client.Client
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.Node
import spock.lang.Shared
import spock.lang.Unroll
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch6NodeClientTest extends AbstractElasticsearchNodeClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
Client client
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build()
testNode = NodeFactory.newNode(settings)
testNode.start()
client = testNode.client()
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
client.admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
}
waitForTraces(1)
}
def cleanupSpec() {
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
Client client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
childOf(span(0))
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, ~/no such index( \[invalid-index])?/
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
kind CLIENT
status ERROR
childOf(span(0))
errorEvent IndexNotFoundException, ~/no such index( \[invalid-index])?/
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.index() == indexName
when:
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(4) {
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(2, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(3, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -1,321 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes
import io.opentelemetry.semconv.NetworkAttributes
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.io.FileSystemUtils
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.TransportAddress
import org.elasticsearch.index.IndexNotFoundException
import org.elasticsearch.node.Node
import org.elasticsearch.transport.RemoteTransportException
import org.elasticsearch.transport.TransportService
import org.elasticsearch.transport.client.PreBuiltTransportClient
import spock.lang.Shared
import spock.lang.Unroll
import static io.opentelemetry.api.trace.SpanKind.CLIENT
import static io.opentelemetry.api.trace.SpanKind.INTERNAL
import static io.opentelemetry.api.trace.StatusCode.ERROR
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
class Elasticsearch6TransportClientTest extends AbstractElasticsearchTransportClientTest {
public static final long TIMEOUT = 10000 // 10 seconds
@Shared
TransportAddress tcpPublishAddress
@Shared
Node testNode
@Shared
File esWorkingDir
@Shared
String clusterName = UUID.randomUUID().toString()
@Shared
TransportClient client
def setupSpec() {
esWorkingDir = File.createTempDir("test-es-working-dir-", "")
esWorkingDir.deleteOnExit()
println "ES work dir: $esWorkingDir"
def settings = Settings.builder()
.put("path.home", esWorkingDir.path)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build()
testNode = NodeFactory.newNode(settings)
testNode.start()
tcpPublishAddress = testNode.injector().getInstance(TransportService).boundAddress().publishAddress()
client = new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build()
)
client.addTransportAddress(tcpPublishAddress)
runWithSpan("setup") {
// this may potentially create multiple requests and therefore multiple spans, so we wrap this call
// into a top level trace to get exactly one trace in the result.
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(TIMEOUT)
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests don't expect
client.admin().cluster().updateSettings(new ClusterUpdateSettingsRequest().transientSettings(["cluster.routing.allocation.disk.threshold_enabled": false]))
}
waitForTraces(1)
}
def cleanupSpec() {
client?.close()
testNode?.close()
if (esWorkingDir != null) {
FileSystemUtils.deleteSubDirectories(esWorkingDir.toPath())
esWorkingDir.delete()
}
}
@Override
TransportClient client() {
client
}
@Unroll
def "test elasticsearch status #callKind"() {
setup:
def clusterHealthStatus = runWithSpan("parent") {
call.call()
}
expect:
clusterHealthStatus.name() == "GREEN"
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
kind INTERNAL
hasNoParent()
}
span(1) {
name "ClusterHealthAction"
kind CLIENT
childOf(span(0))
attributes {
"$NetworkAttributes.NETWORK_TYPE" { it == "ipv4" || it == "ipv6" }
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "ClusterHealthAction"
"elasticsearch.action" "ClusterHealthAction"
"elasticsearch.request" "ClusterHealthRequest"
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
callKind | call
"sync" | { clusterHealthSync() }
"async" | { clusterHealthAsync() }
}
def "test elasticsearch error #callKind"() {
when:
runWithSpan("parent") {
call.call(indexName, indexType, id)
}
then:
thrown IndexNotFoundException
and:
assertTraces(1) {
trace(0, 3) {
span(0) {
name "parent"
status ERROR
errorEvent IndexNotFoundException, ~/no such index( \[invalid-index])?/
kind INTERNAL
hasNoParent()
}
span(1) {
name "GetAction"
kind CLIENT
status ERROR
errorEvent RemoteTransportException, String
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
}
}
span(2) {
name "callback"
kind INTERNAL
childOf(span(0))
}
}
}
where:
indexName = "invalid-index"
indexType = "test-type"
id = "1"
callKind | call
"sync" | { indexName, indexType, id -> prepareGetSync(indexName, indexType, id) }
"async" | { indexName, indexType, id -> prepareGetAsync(indexName, indexType, id) }
}
def "test elasticsearch get"() {
setup:
def indexResult = client.admin().indices().prepareCreate(indexName).get()
expect:
indexResult.index() == indexName
when:
def emptyResult = client.prepareGet(indexName, indexType, id).get()
then:
!emptyResult.isExists()
emptyResult.id == id
emptyResult.type == indexType
emptyResult.index == indexName
when:
def createResult = client.prepareIndex(indexName, indexType, id).setSource([:]).get()
then:
createResult.id == id
createResult.type == indexType
createResult.index == indexName
createResult.status().status == 201
when:
def result = client.prepareGet(indexName, indexType, id).get()
then:
result.isExists()
result.id == id
result.type == indexType
result.index == indexName
and:
assertTraces(5) {
// PutMappingAction and IndexAction run in separate threads so their order can vary
traces.subList(2, 4).sort(orderByRootSpanName(
"PutMappingAction", // elasticsearch < 7
"AutoPutMappingAction", // elasticsearch >= 7
"IndexAction"))
trace(0, 1) {
span(0) {
name "CreateIndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_TYPE" { it == "ipv4" || it == "ipv6" }
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "CreateIndexAction"
"elasticsearch.action" "CreateIndexAction"
"elasticsearch.request" "CreateIndexRequest"
"elasticsearch.request.indices" indexName
}
}
}
trace(1, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_TYPE" { it == "ipv4" || it == "ipv6" }
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version"(-1)
}
}
}
trace(2, 1) {
span(0) {
name ~/(Auto)?PutMappingAction/
kind CLIENT
attributes {
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" ~/(Auto)?PutMappingAction/
"elasticsearch.action" ~/(Auto)?PutMappingAction/
"elasticsearch.request" "PutMappingRequest"
}
}
}
trace(3, 1) {
span(0) {
name "IndexAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_TYPE" { it == "ipv4" || it == "ipv6" }
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "IndexAction"
"elasticsearch.action" "IndexAction"
"elasticsearch.request" "IndexRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.request.write.type" indexType
"elasticsearch.request.write.version"(-3)
"elasticsearch.response.status" 201
"elasticsearch.shard.replication.total" 2
"elasticsearch.shard.replication.successful" 1
"elasticsearch.shard.replication.failed" 0
}
}
}
trace(4, 1) {
span(0) {
name "GetAction"
kind CLIENT
attributes {
"$NetworkAttributes.NETWORK_TYPE" { it == "ipv4" || it == "ipv6" }
"$NetworkAttributes.NETWORK_PEER_ADDRESS" tcpPublishAddress.address
"$NetworkAttributes.NETWORK_PEER_PORT" tcpPublishAddress.port
"$DbIncubatingAttributes.DB_SYSTEM" "elasticsearch"
"$DbIncubatingAttributes.DB_OPERATION" "GetAction"
"elasticsearch.action" "GetAction"
"elasticsearch.request" "GetRequest"
"elasticsearch.request.indices" indexName
"elasticsearch.type" indexType
"elasticsearch.id" "1"
"elasticsearch.version" 1
}
}
}
}
cleanup:
client.admin().indices().prepareDelete(indexName).get()
where:
indexName = "test-index"
indexType = "test-type"
id = "1"
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.node.InternalSettingsPreparer
import org.elasticsearch.node.Node
import org.elasticsearch.transport.Netty4Plugin
class NodeFactory {
static Node newNode(Settings settings) {
def version = org.elasticsearch.Version.CURRENT
if (version.major >= 7) {
return new NodeV7(settings)
} else if (version.major == 6 && version.minor >= 5) {
return new NodeV65(settings)
}
return new NodeV6(settings)
}
static class NodeV6 extends Node {
NodeV6(Settings settings) {
super(InternalSettingsPreparer.prepareEnvironment(settings, null), [Netty4Plugin])
}
protected void registerDerivedNodeNameWithLogger(String s) {
}
}
static class NodeV65 extends Node {
NodeV65(Settings settings) {
super(InternalSettingsPreparer.prepareEnvironment(settings, null), [Netty4Plugin], true)
}
protected void registerDerivedNodeNameWithLogger(String s) {
}
}
static class NodeV7 extends Node {
NodeV7(Settings settings) {
super(InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), null, { "default node name" }), [Netty4Plugin], true)
}
protected void registerDerivedNodeNameWithLogger(String s) {
}
}
}

View File

@ -0,0 +1,12 @@
plugins {
id("otel.java-conventions")
}
dependencies {
api(project(":testing-common"))
implementation("org.elasticsearch.client:transport:6.0.0")
implementation(project(":instrumentation:elasticsearch:elasticsearch-transport-common:testing"))
implementation("org.apache.logging.log4j:log4j-core:2.11.0")
implementation("org.apache.logging.log4j:log4j-api:2.11.0")
}

View File

@ -0,0 +1,101 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchNodeClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractElasticsearch6NodeClientTest
extends AbstractElasticsearchNodeClientTest {
private static final Logger logger =
LoggerFactory.getLogger(AbstractElasticsearch6NodeClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private Node testNode;
private Client client;
@BeforeAll
void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
// Since we use listeners to close spans this should make our span closing deterministic
// which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build();
testNode = getNodeFactory().newNode(settings);
startNode(testNode);
client = testNode.client();
testing.runWithSpan(
"setup",
() -> {
// this may potentially create multiple requests and therefore multiple spans, so we wrap
// this call into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests
// don't expect
client
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
});
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
void cleanUp() throws Exception {
testNode.close();
}
protected abstract NodeFactory getNodeFactory();
@Override
protected Client client() {
return client;
}
@Override
protected void waitYellowStatus() {
// although the body of this method looks exactly the same as the super class method we need to
// have this here because return type of the execute() method has changed
client()
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
}
}

View File

@ -0,0 +1,116 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING;
import io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.AbstractElasticsearchTransportClientTest;
import java.io.File;
import java.util.Collections;
import java.util.UUID;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractElasticsearch6TransportClientTest
extends AbstractElasticsearchTransportClientTest {
private static final Logger logger =
LoggerFactory.getLogger(AbstractElasticsearch6TransportClientTest.class);
private static final String clusterName = UUID.randomUUID().toString();
private Node testNode;
private TransportAddress tcpPublishAddress;
private TransportClient client;
@BeforeAll
void setUp(@TempDir File esWorkingDir) {
logger.info("ES work dir: {}", esWorkingDir);
Settings settings =
Settings.builder()
.put("path.home", esWorkingDir.getPath())
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.put("discovery.type", "single-node")
.build();
testNode = getNodeFactory().newNode(settings);
startNode(testNode);
tcpPublishAddress =
testNode.injector().getInstance(TransportService.class).boundAddress().publishAddress();
client =
new PreBuiltTransportClient(
Settings.builder()
// Since we use listeners to close spans this should make our span closing
// deterministic which is good for tests
.put("thread_pool.listener.size", 1)
.put(CLUSTER_NAME_SETTING.getKey(), clusterName)
.build());
client.addTransportAddress(tcpPublishAddress);
testing.runWithSpan(
"setup",
() -> {
// this may potentially create multiple requests and therefore multiple spans, so we wrap
// this call
// into a top level trace to get exactly one trace in the result.
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
// disable periodic refresh in InternalClusterInfoService as it creates spans that tests
// don't expect
client
.admin()
.cluster()
.updateSettings(
new ClusterUpdateSettingsRequest()
.transientSettings(
Collections.singletonMap(
"cluster.routing.allocation.disk.threshold_enabled", Boolean.FALSE)));
});
testing.waitForTraces(1);
testing.clearData();
}
@AfterAll
void cleanUp() throws Exception {
testNode.close();
}
protected abstract NodeFactory getNodeFactory();
@Override
protected TransportClient client() {
return client;
}
@Override
protected String getAddress() {
return tcpPublishAddress.getAddress();
}
@Override
protected int getPort() {
return tcpPublishAddress.getPort();
}
@Override
protected boolean hasNetworkType() {
return true;
}
}

View File

@ -0,0 +1,14 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport.v6_0;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
public interface NodeFactory {
Node newNode(Settings settings);
}

View File

@ -5,6 +5,10 @@
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.AttributesBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.instrumentation.api.instrumenter.AttributesExtractor;
@ -21,27 +25,62 @@ import org.elasticsearch.action.support.replication.ReplicationResponse;
public class ElasticsearchTransportExperimentalAttributesExtractor
implements AttributesExtractor<ElasticTransportRequest, ActionResponse> {
private static final AttributeKey<String> ELASTICSEARCH_ACTION =
stringKey("elasticsearch.action");
private static final AttributeKey<String> ELASTICSEARCH_REQUEST =
stringKey("elasticsearch.request");
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_INDICES =
stringKey("elasticsearch.request.indices");
private static final AttributeKey<String> ELASTICSEARCH_REQUEST_SEARCH_TYPES =
stringKey("elasticsearch.request.search.types");
private static final AttributeKey<String> ELASTICSEARCH_TYPE = stringKey("elasticsearch.type");
private static final AttributeKey<String> ELASTICSEARCH_ID = stringKey("elasticsearch.id");
private static final AttributeKey<Long> ELASTICSEARCH_VERSION = longKey("elasticsearch.version");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_BROADCAST_TOTAL =
longKey("elasticsearch.shard.broadcast.total");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_BROADCAST_SUCCESSFUL =
longKey("elasticsearch.shard.broadcast.successful");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_BROADCAST_FAILED =
longKey("elasticsearch.shard.broadcast.failed");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_REPLICATION_TOTAL =
longKey("elasticsearch.shard.replication.total");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_REPLICATION_SUCCESSFUL =
longKey("elasticsearch.shard.replication.successful");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_REPLICATION_FAILED =
longKey("elasticsearch.shard.replication.failed");
private static final AttributeKey<Long> ELASTICSEARCH_RESPONSE_STATUS =
longKey("elasticsearch.response.status");
private static final AttributeKey<Long> ELASTICSEARCH_SHARD_BULK_ID =
longKey("elasticsearch.shard.bulk.id");
private static final AttributeKey<String> ELASTICSEARCH_SHARD_BULK_INDEX =
stringKey("elasticsearch.shard.bulk.index");
private static final AttributeKey<Long> ELASTICSEARCH_NODE_FAILURES =
longKey("elasticsearch.node.failures");
private static final AttributeKey<String> ELASTICSEARCH_NODE_CLUSTER_NAME =
stringKey("elasticsearch.node.cluster.name");
@Override
public void onStart(
AttributesBuilder attributes,
Context parentContext,
ElasticTransportRequest transportRequest) {
Object request = transportRequest.getRequest();
attributes.put("elasticsearch.action", transportRequest.getAction().getClass().getSimpleName());
attributes.put("elasticsearch.request", request.getClass().getSimpleName());
attributes.put(ELASTICSEARCH_ACTION, transportRequest.getAction().getClass().getSimpleName());
attributes.put(ELASTICSEARCH_REQUEST, request.getClass().getSimpleName());
if (request instanceof IndicesRequest) {
IndicesRequest req = (IndicesRequest) request;
String[] indices = req.indices();
if (indices != null && indices.length > 0) {
attributes.put("elasticsearch.request.indices", String.join(",", indices));
attributes.put(ELASTICSEARCH_REQUEST_INDICES, String.join(",", indices));
}
}
if (request instanceof SearchRequest) {
SearchRequest req = (SearchRequest) request;
String[] types = req.types();
if (types != null && types.length > 0) {
attributes.put("elasticsearch.request.search.types", String.join(",", types));
attributes.put(ELASTICSEARCH_REQUEST_SEARCH_TYPES, String.join(",", types));
}
}
}
@ -55,43 +94,43 @@ public class ElasticsearchTransportExperimentalAttributesExtractor
@Nullable Throwable error) {
if (response instanceof GetResponse) {
GetResponse resp = (GetResponse) response;
attributes.put("elasticsearch.type", resp.getType());
attributes.put("elasticsearch.id", resp.getId());
attributes.put("elasticsearch.version", resp.getVersion());
attributes.put(ELASTICSEARCH_TYPE, resp.getType());
attributes.put(ELASTICSEARCH_ID, resp.getId());
attributes.put(ELASTICSEARCH_VERSION, resp.getVersion());
}
if (response instanceof BroadcastResponse) {
BroadcastResponse resp = (BroadcastResponse) response;
attributes.put("elasticsearch.shard.broadcast.total", resp.getTotalShards());
attributes.put("elasticsearch.shard.broadcast.successful", resp.getSuccessfulShards());
attributes.put("elasticsearch.shard.broadcast.failed", resp.getFailedShards());
attributes.put(ELASTICSEARCH_SHARD_BROADCAST_TOTAL, resp.getTotalShards());
attributes.put(ELASTICSEARCH_SHARD_BROADCAST_SUCCESSFUL, resp.getSuccessfulShards());
attributes.put(ELASTICSEARCH_SHARD_BROADCAST_FAILED, resp.getFailedShards());
}
if (response instanceof ReplicationResponse) {
ReplicationResponse resp = (ReplicationResponse) response;
attributes.put("elasticsearch.shard.replication.total", resp.getShardInfo().getTotal());
attributes.put(ELASTICSEARCH_SHARD_REPLICATION_TOTAL, resp.getShardInfo().getTotal());
attributes.put(
"elasticsearch.shard.replication.successful", resp.getShardInfo().getSuccessful());
attributes.put("elasticsearch.shard.replication.failed", resp.getShardInfo().getFailed());
ELASTICSEARCH_SHARD_REPLICATION_SUCCESSFUL, resp.getShardInfo().getSuccessful());
attributes.put(ELASTICSEARCH_SHARD_REPLICATION_FAILED, resp.getShardInfo().getFailed());
}
if (response instanceof IndexResponse) {
attributes.put(
"elasticsearch.response.status", ((IndexResponse) response).status().getStatus());
ELASTICSEARCH_RESPONSE_STATUS, ((IndexResponse) response).status().getStatus());
}
if (response instanceof BulkShardResponse) {
BulkShardResponse resp = (BulkShardResponse) response;
attributes.put("elasticsearch.shard.bulk.id", resp.getShardId().getId());
attributes.put("elasticsearch.shard.bulk.index", resp.getShardId().getIndexName());
attributes.put(ELASTICSEARCH_SHARD_BULK_ID, resp.getShardId().getId());
attributes.put(ELASTICSEARCH_SHARD_BULK_INDEX, resp.getShardId().getIndexName());
}
if (response instanceof BaseNodesResponse) {
BaseNodesResponse<?> resp = (BaseNodesResponse<?>) response;
if (resp.hasFailures()) {
attributes.put("elasticsearch.node.failures", resp.failures().size());
attributes.put(ELASTICSEARCH_NODE_FAILURES, resp.failures().size());
}
attributes.put("elasticsearch.node.cluster.name", resp.getClusterName().value());
attributes.put(ELASTICSEARCH_NODE_CLUSTER_NAME, resp.getClusterName().value());
}
}
}

View File

@ -1,66 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import io.opentelemetry.instrumentation.test.AgentInstrumentationSpecification
import io.opentelemetry.instrumentation.test.InstrumentationSpecification
import org.elasticsearch.action.ActionListener
import org.elasticsearch.transport.RemoteTransportException
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
abstract class AbstractElasticsearchClientTest extends AgentInstrumentationSpecification {
static class Result<RESPONSE> {
CountDownLatch latch = new CountDownLatch(1)
RESPONSE response
Exception failure
void setResponse(RESPONSE response) {
this.response = response
latch.countDown()
}
void setFailure(Exception failure) {
this.failure = failure
latch.countDown()
}
RESPONSE get() {
latch.await(1, TimeUnit.MINUTES)
if (response != null) {
return response
}
throw failure
}
}
static class ResultListener<T> implements ActionListener<T> {
final Result<T> result
final InstrumentationSpecification spec
ResultListener(InstrumentationSpecification spec, Result<T> result) {
this.spec = spec
this.result = result
}
@Override
void onResponse(T response) {
spec.runWithSpan("callback") {
result.setResponse(response)
}
}
@Override
void onFailure(Exception e) {
if (e instanceof RemoteTransportException) {
e = e.getCause()
}
spec.runWithSpan("callback") {
result.setFailure(e)
}
}
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.client.Client
import org.elasticsearch.cluster.health.ClusterHealthStatus
abstract class AbstractElasticsearchNodeClientTest extends AbstractElasticsearchClientTest {
abstract Client client()
ClusterHealthStatus clusterHealthSync() {
def result = client().admin().cluster().health(new ClusterHealthRequest())
return runWithSpan("callback") {
result.get().status
}
}
ClusterHealthStatus clusterHealthAsync() {
def result = new Result<ClusterHealthResponse>()
client().admin().cluster().health(new ClusterHealthRequest(), new ResultListener<ClusterHealthResponse>(this, result))
return result.get().status
}
def prepareGetSync(indexName, indexType, id) {
try {
client().prepareGet(indexName, indexType, id).get()
} finally {
runWithSpan("callback") {}
}
}
def prepareGetAsync(indexName, indexType, id) {
def result = new Result<GetResponse>()
client().prepareGet(indexName, indexType, id).execute(new ResultListener<GetResponse>(this, result))
result.get()
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.cluster.health.ClusterHealthStatus
abstract class AbstractElasticsearchTransportClientTest extends AbstractElasticsearchClientTest {
abstract TransportClient client()
ClusterHealthStatus clusterHealthSync() {
def result = client().admin().cluster().health(new ClusterHealthRequest())
return runWithSpan("callback") {
result.get().status
}
}
ClusterHealthStatus clusterHealthAsync() {
def result = new Result<ClusterHealthResponse>()
client().admin().cluster().health(new ClusterHealthRequest(), new ResultListener<ClusterHealthResponse>(this, result))
return result.get().status
}
def prepareGetSync(indexName, indexType, id) {
try {
client().prepareGet(indexName, indexType, id).get()
} finally {
runWithSpan("callback") {}
}
}
def prepareGetAsync(indexName, indexType, id) {
def result = new Result<GetResponse>()
client().prepareGet(indexName, indexType, id).execute(new ResultListener<GetResponse>(this, result))
result.get()
}
}

View File

@ -0,0 +1,152 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport;
import static org.awaitility.Awaitility.await;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.instrumentation.testing.internal.AutoCleanupExtension;
import io.opentelemetry.instrumentation.testing.junit.AgentInstrumentationExtension;
import io.opentelemetry.instrumentation.testing.junit.InstrumentationExtension;
import java.time.Duration;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.node.Node;
import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.RemoteTransportException;
import org.junit.jupiter.api.extension.RegisterExtension;
abstract class AbstractElasticsearchClientTest {
protected static final long TIMEOUT = TimeUnit.SECONDS.toMillis(10);
protected static final AttributeKey<String> ELASTICSEARCH_ACTION =
AttributeKey.stringKey("elasticsearch.action");
protected static final AttributeKey<String> ELASTICSEARCH_REQUEST =
AttributeKey.stringKey("elasticsearch.request");
protected static final AttributeKey<String> ELASTICSEARCH_REQUEST_INDICES =
AttributeKey.stringKey("elasticsearch.request.indices");
protected static final AttributeKey<String> ELASTICSEARCH_TYPE =
AttributeKey.stringKey("elasticsearch.type");
protected static final AttributeKey<String> ELASTICSEARCH_ID =
AttributeKey.stringKey("elasticsearch.id");
protected static final AttributeKey<Long> ELASTICSEARCH_VERSION =
AttributeKey.longKey("elasticsearch.version");
@RegisterExtension
protected static final InstrumentationExtension testing = AgentInstrumentationExtension.create();
@RegisterExtension
protected static final AutoCleanupExtension cleanup = AutoCleanupExtension.create();
protected abstract Client client();
protected static void startNode(Node node) {
// retry when starting elasticsearch fails with
// org.elasticsearch.http.BindHttpException: Failed to resolve host [[]]
// Caused by: java.net.SocketException: No such device (getFlags() failed)
// or
// org.elasticsearch.transport.BindTransportException: Failed to resolve host null
// Caused by: java.net.SocketException: No such device (getFlags() failed)
await()
.atMost(Duration.ofSeconds(10))
.ignoreExceptionsMatching(
it -> it instanceof BindHttpException || it instanceof BindTransportException)
.until(
() -> {
node.start();
return true;
});
}
protected ClusterHealthStatus clusterHealthSync() throws Exception {
ActionFuture<ClusterHealthResponse> result =
client().admin().cluster().health(new ClusterHealthRequest());
return testing.runWithSpan("callback", () -> result.get().getStatus());
}
protected ClusterHealthStatus clusterHealthAsync() {
Result<ClusterHealthResponse> result = new Result<>();
client().admin().cluster().health(new ClusterHealthRequest(), new ResultListener<>(result));
return result.get().getStatus();
}
protected GetResponse prepareGetSync(String indexName, String indexType, String id) {
try {
return client().prepareGet(indexName, indexType, id).get();
} finally {
testing.runWithSpan("callback", () -> {});
}
}
protected GetResponse prepareGetAsync(String indexName, String indexType, String id) {
Result<GetResponse> result = new Result<>();
client().prepareGet(indexName, indexType, id).execute(new ResultListener<>(result));
return result.get();
}
static class Result<RESPONSE> {
private final CountDownLatch latch = new CountDownLatch(1);
private RESPONSE response;
private Throwable failure;
void setResponse(RESPONSE response) {
this.response = response;
latch.countDown();
}
void setFailure(Throwable failure) {
this.failure = failure;
latch.countDown();
}
RESPONSE get() {
try {
latch.await(1, TimeUnit.MINUTES);
} catch (InterruptedException exception) {
Thread.currentThread().interrupt();
}
if (response != null) {
return response;
}
if (failure instanceof RuntimeException) {
throw (RuntimeException) failure;
}
throw new IllegalStateException(failure);
}
}
static class ResultListener<T> implements ActionListener<T> {
final Result<T> result;
ResultListener(Result<T> result) {
this.result = result;
}
@Override
public void onResponse(T response) {
testing.runWithSpan("callback", () -> result.setResponse(response));
}
@Override
public void onFailure(Exception exception) {
Throwable throwable = exception;
if (throwable instanceof RemoteTransportException) {
throwable = throwable.getCause();
}
Throwable finalThrowable = throwable;
testing.runWithSpan("callback", () -> result.setFailure(finalThrowable));
}
}
}

View File

@ -0,0 +1,267 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.junit.jupiter.api.Named.named;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.testing.util.ThrowingSupplier;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Stream;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.index.IndexNotFoundException;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public abstract class AbstractElasticsearchNodeClientTest extends AbstractElasticsearchClientTest {
private Stream<Arguments> healthArguments() {
return Stream.of(
Arguments.of(
named(
"sync",
(ThrowingSupplier<ClusterHealthStatus, Exception>) this::clusterHealthSync)),
Arguments.of(
named(
"async",
(ThrowingSupplier<ClusterHealthStatus, Exception>) this::clusterHealthAsync)));
}
@ParameterizedTest
@MethodSource("healthArguments")
void elasticsearchStatus(ThrowingSupplier<ClusterHealthStatus, Exception> supplier)
throws Exception {
ClusterHealthStatus clusterHealthStatus = testing.runWithSpan("parent", supplier);
assertThat(clusterHealthStatus.name()).isEqualTo("GREEN");
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span ->
span.hasName("ClusterHealthAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_ACTION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_REQUEST, "ClusterHealthRequest")),
span ->
span.hasName("callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0))));
}
private Stream<Arguments> errorArguments() {
return Stream.of(
Arguments.of(
named("sync", (Runnable) () -> prepareGetSync("invalid-index", "test-type", "1"))),
Arguments.of(
named("async", (Runnable) () -> prepareGetAsync("invalid-index", "test-type", "1"))));
}
protected String getIndexNotFoundMessage() {
return "no such index";
}
@ParameterizedTest
@MethodSource("errorArguments")
void elasticsearchError(Runnable action) {
IndexNotFoundException expectedException =
new IndexNotFoundException(getIndexNotFoundMessage());
assertThatThrownBy(() -> testing.runWithSpan("parent", action::run))
.isInstanceOf(IndexNotFoundException.class)
.hasMessage(expectedException.getMessage());
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("parent")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasStatus(StatusData.error())
.hasException(expectedException),
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasStatus(StatusData.error())
.hasException(expectedException)
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, "invalid-index")),
span ->
span.hasName("callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0))));
}
protected void waitYellowStatus() {
client()
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.execute()
.actionGet(TIMEOUT);
}
@Test
void elasticsearchGet() {
String indexName = "test-index";
String indexType = "test-type";
String id = "1";
Client client = client();
CreateIndexResponse indexResult = client.admin().indices().prepareCreate(indexName).get();
assertThat(indexResult.isAcknowledged()).isTrue();
waitYellowStatus();
GetResponse emptyResult = client.prepareGet(indexName, indexType, id).get();
assertThat(emptyResult.isExists()).isFalse();
assertThat(emptyResult.getId()).isEqualTo(id);
assertThat(emptyResult.getType()).isEqualTo(indexType);
assertThat(emptyResult.getIndex()).isEqualTo(indexName);
IndexResponse createResult =
client.prepareIndex(indexName, indexType, id).setSource(Collections.emptyMap()).get();
assertThat(createResult.getId()).isEqualTo(id);
assertThat(createResult.getType()).isEqualTo(indexType);
assertThat(createResult.getIndex()).isEqualTo(indexName);
assertThat(createResult.status().getStatus()).isEqualTo(201);
cleanup.deferCleanup(() -> client.admin().indices().prepareDelete(indexName).get());
GetResponse result = client.prepareGet(indexName, indexType, id).get();
assertThat(result.isExists()).isTrue();
assertThat(result.getId()).isEqualTo(id);
assertThat(result.getType()).isEqualTo(indexType);
assertThat(result.getIndex()).isEqualTo(indexName);
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("CreateIndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "CreateIndexAction"),
equalTo(ELASTICSEARCH_ACTION, "CreateIndexAction"),
equalTo(ELASTICSEARCH_REQUEST, "CreateIndexRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("ClusterHealthAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_ACTION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_REQUEST, "ClusterHealthRequest"))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(ELASTICSEARCH_TYPE, indexType),
equalTo(ELASTICSEARCH_ID, id),
equalTo(ELASTICSEARCH_VERSION, -1))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("IndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
addIndexActionAttributes(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "IndexAction"),
equalTo(ELASTICSEARCH_ACTION, "IndexAction"),
equalTo(ELASTICSEARCH_REQUEST, "IndexRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(stringKey("elasticsearch.request.write.type"), indexType),
equalTo(longKey("elasticsearch.response.status"), 201),
equalTo(longKey("elasticsearch.shard.replication.total"), 2),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0)))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(ELASTICSEARCH_TYPE, indexType),
equalTo(ELASTICSEARCH_ID, id),
equalTo(ELASTICSEARCH_VERSION, 1))));
}
protected boolean hasWriteVersion() {
return true;
}
private List<AttributeAssertion> addIndexActionAttributes(AttributeAssertion... assertions) {
List<AttributeAssertion> result = new ArrayList<>(Arrays.asList(assertions));
if (hasWriteVersion()) {
result.add(equalTo(longKey("elasticsearch.request.write.version"), -3));
}
return result;
}
}

View File

@ -0,0 +1,313 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.elasticsearch.transport;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;
import static io.opentelemetry.instrumentation.testing.util.TelemetryDataUtil.orderByRootSpanName;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.equalTo;
import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.satisfies;
import static io.opentelemetry.semconv.ExceptionAttributes.EXCEPTION_TYPE;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.junit.jupiter.api.Named.named;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.instrumentation.testing.util.ThrowingSupplier;
import io.opentelemetry.sdk.testing.assertj.AttributeAssertion;
import io.opentelemetry.sdk.trace.data.StatusData;
import io.opentelemetry.semconv.NetworkAttributes;
import io.opentelemetry.semconv.incubating.DbIncubatingAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Stream;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.transport.RemoteTransportException;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public abstract class AbstractElasticsearchTransportClientTest
extends AbstractElasticsearchClientTest {
protected abstract String getAddress();
protected abstract int getPort();
private Stream<Arguments> healthArguments() {
return Stream.of(
Arguments.of(
named(
"sync",
(ThrowingSupplier<ClusterHealthStatus, Exception>) this::clusterHealthSync)),
Arguments.of(
named(
"async",
(ThrowingSupplier<ClusterHealthStatus, Exception>) this::clusterHealthAsync)));
}
@ParameterizedTest
@MethodSource("healthArguments")
void elasticsearchStatus(ThrowingSupplier<ClusterHealthStatus, Exception> supplier)
throws Exception {
ClusterHealthStatus clusterHealthStatus = testing.runWithSpan("parent", supplier);
assertThat(clusterHealthStatus.name()).isEqualTo("GREEN");
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span -> span.hasName("parent").hasKind(SpanKind.INTERNAL).hasNoParent(),
span ->
span.hasName("ClusterHealthAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasAttributesSatisfyingExactly(
addNetworkTypeAttribute(
equalTo(NetworkAttributes.NETWORK_PEER_ADDRESS, getAddress()),
equalTo(NetworkAttributes.NETWORK_PEER_PORT, getPort()),
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_ACTION, "ClusterHealthAction"),
equalTo(ELASTICSEARCH_REQUEST, "ClusterHealthRequest"))),
span ->
span.hasName("callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0))));
}
private Stream<Arguments> errorArguments() {
return Stream.of(
Arguments.of(
named("sync", (Runnable) () -> prepareGetSync("invalid-index", "test-type", "1"))),
Arguments.of(
named("async", (Runnable) () -> prepareGetAsync("invalid-index", "test-type", "1"))));
}
protected String getIndexNotFoundMessage() {
return "no such index";
}
@ParameterizedTest
@MethodSource("errorArguments")
void elasticsearchError(Runnable action) {
IndexNotFoundException expectedException =
new IndexNotFoundException(getIndexNotFoundMessage());
assertThatThrownBy(() -> testing.runWithSpan("parent", action::run))
.isInstanceOf(IndexNotFoundException.class)
.hasMessage(expectedException.getMessage());
testing.waitAndAssertTraces(
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("parent")
.hasKind(SpanKind.INTERNAL)
.hasNoParent()
.hasStatus(StatusData.error())
.hasException(expectedException),
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasParent(trace.getSpan(0))
.hasStatus(StatusData.error())
.hasEventsSatisfyingExactly(
event ->
event
.hasName("exception")
.hasAttributesSatisfying(
equalTo(
EXCEPTION_TYPE,
RemoteTransportException.class.getName())))
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, "invalid-index")),
span ->
span.hasName("callback")
.hasKind(SpanKind.INTERNAL)
.hasParent(trace.getSpan(0))));
}
protected String getPutMappingActionName() {
return "PutMappingAction";
}
@Test
void elasticsearchGet() {
String indexName = "test-index";
String indexType = "test-type";
String id = "1";
Client client = client();
CreateIndexResponse indexResult = client.admin().indices().prepareCreate(indexName).get();
assertThat(indexResult.isAcknowledged()).isTrue();
GetResponse emptyResult = client.prepareGet(indexName, indexType, id).get();
assertThat(emptyResult.isExists()).isFalse();
assertThat(emptyResult.getId()).isEqualTo(id);
assertThat(emptyResult.getType()).isEqualTo(indexType);
assertThat(emptyResult.getIndex()).isEqualTo(indexName);
IndexResponse createResult =
client.prepareIndex(indexName, indexType, id).setSource(Collections.emptyMap()).get();
assertThat(createResult.getId()).isEqualTo(id);
assertThat(createResult.getType()).isEqualTo(indexType);
assertThat(createResult.getIndex()).isEqualTo(indexName);
assertThat(createResult.status().getStatus()).isEqualTo(201);
cleanup.deferCleanup(() -> client.admin().indices().prepareDelete(indexName).get());
GetResponse result = client.prepareGet(indexName, indexType, id).get();
assertThat(result.isExists()).isTrue();
assertThat(result.getId()).isEqualTo(id);
assertThat(result.getType()).isEqualTo(indexType);
assertThat(result.getIndex()).isEqualTo(indexName);
// PutMappingAction and IndexAction run in separate threads so their order can vary
testing.waitAndAssertSortedTraces(
orderByRootSpanName(
"CreateIndexAction", getPutMappingActionName(), "IndexAction", "GetAction"),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("CreateIndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
addNetworkTypeAttribute(
equalTo(NetworkAttributes.NETWORK_PEER_ADDRESS, getAddress()),
equalTo(NetworkAttributes.NETWORK_PEER_PORT, getPort()),
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "CreateIndexAction"),
equalTo(ELASTICSEARCH_ACTION, "CreateIndexAction"),
equalTo(ELASTICSEARCH_REQUEST, "CreateIndexRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName)))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName(getPutMappingActionName())
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, getPutMappingActionName()),
equalTo(ELASTICSEARCH_ACTION, getPutMappingActionName()),
equalTo(ELASTICSEARCH_REQUEST, "PutMappingRequest"))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("IndexAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
addIndexActionAttributes(
equalTo(NetworkAttributes.NETWORK_PEER_ADDRESS, getAddress()),
equalTo(NetworkAttributes.NETWORK_PEER_PORT, getPort()),
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "IndexAction"),
equalTo(ELASTICSEARCH_ACTION, "IndexAction"),
equalTo(ELASTICSEARCH_REQUEST, "IndexRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(stringKey("elasticsearch.request.write.type"), indexType),
equalTo(longKey("elasticsearch.response.status"), 201),
equalTo(longKey("elasticsearch.shard.replication.total"), 2),
equalTo(longKey("elasticsearch.shard.replication.successful"), 1),
equalTo(longKey("elasticsearch.shard.replication.failed"), 0)))),
// moved here by sorting, chronologically happens before PutMappingAction
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
addNetworkTypeAttribute(
equalTo(NetworkAttributes.NETWORK_PEER_ADDRESS, getAddress()),
equalTo(NetworkAttributes.NETWORK_PEER_PORT, getPort()),
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(ELASTICSEARCH_TYPE, indexType),
equalTo(ELASTICSEARCH_ID, id),
equalTo(ELASTICSEARCH_VERSION, -1)))),
trace ->
trace.hasSpansSatisfyingExactly(
span ->
span.hasName("GetAction")
.hasKind(SpanKind.CLIENT)
.hasNoParent()
.hasAttributesSatisfyingExactly(
addNetworkTypeAttribute(
equalTo(NetworkAttributes.NETWORK_PEER_ADDRESS, getAddress()),
equalTo(NetworkAttributes.NETWORK_PEER_PORT, getPort()),
equalTo(
DbIncubatingAttributes.DB_SYSTEM,
DbIncubatingAttributes.DbSystemValues.ELASTICSEARCH),
equalTo(DbIncubatingAttributes.DB_OPERATION, "GetAction"),
equalTo(ELASTICSEARCH_ACTION, "GetAction"),
equalTo(ELASTICSEARCH_REQUEST, "GetRequest"),
equalTo(ELASTICSEARCH_REQUEST_INDICES, indexName),
equalTo(ELASTICSEARCH_TYPE, indexType),
equalTo(ELASTICSEARCH_ID, id),
equalTo(ELASTICSEARCH_VERSION, 1)))));
}
protected boolean hasNetworkType() {
return false;
}
private List<AttributeAssertion> addNetworkTypeAttribute(AttributeAssertion... assertions) {
List<AttributeAssertion> result = new ArrayList<>(Arrays.asList(assertions));
if (hasNetworkType()) {
result.add(
satisfies(
NetworkAttributes.NETWORK_TYPE,
k ->
k.satisfiesAnyOf(
val -> assertThat(val).isEqualTo("ipv4"),
val -> assertThat(val).isEqualTo("ipv6"))));
}
return result;
}
protected boolean hasWriteVersion() {
return true;
}
private List<AttributeAssertion> addIndexActionAttributes(AttributeAssertion... assertions) {
List<AttributeAssertion> result = new ArrayList<>(addNetworkTypeAttribute(assertions));
if (hasWriteVersion()) {
result.add(equalTo(longKey("elasticsearch.request.write.version"), -3));
}
return result;
}
}

View File

@ -250,6 +250,7 @@ include(":instrumentation:elasticsearch:elasticsearch-rest-common:library")
include(":instrumentation:elasticsearch:elasticsearch-transport-5.0:javaagent")
include(":instrumentation:elasticsearch:elasticsearch-transport-5.3:javaagent")
include(":instrumentation:elasticsearch:elasticsearch-transport-6.0:javaagent")
include(":instrumentation:elasticsearch:elasticsearch-transport-6.0:testing")
include(":instrumentation:elasticsearch:elasticsearch-transport-common:javaagent")
include(":instrumentation:elasticsearch:elasticsearch-transport-common:testing")
include(":instrumentation:executors:bootstrap")