Merge pull request #1211 from DataDog/profiling-release
Support JFR profiling
This commit is contained in:
commit
23d3b75555
|
@ -14,11 +14,13 @@ examples/**/build/
|
||||||
|
|
||||||
# Eclipse #
|
# Eclipse #
|
||||||
###########
|
###########
|
||||||
bin/
|
|
||||||
*.launch
|
*.launch
|
||||||
.settings
|
.settings
|
||||||
.project
|
.project
|
||||||
.classpath
|
.classpath
|
||||||
|
# Eclipse is odd in assuming in can use bin to put temp files into it
|
||||||
|
# This assumes we do not have sub-projects that actually need bin files committed
|
||||||
|
*/bin/
|
||||||
|
|
||||||
# OS generated files #
|
# OS generated files #
|
||||||
######################
|
######################
|
||||||
|
@ -56,3 +58,6 @@ hs_err_pid*
|
||||||
replay_pid*
|
replay_pid*
|
||||||
|
|
||||||
!dd-java-agent/benchmark/releases/*.jar
|
!dd-java-agent/benchmark/releases/*.jar
|
||||||
|
|
||||||
|
# Magic for local JMC built
|
||||||
|
/vendor/jmc-libs
|
||||||
|
|
|
@ -42,6 +42,7 @@ public class Agent {
|
||||||
private static ClassLoader BOOTSTRAP_PROXY = null;
|
private static ClassLoader BOOTSTRAP_PROXY = null;
|
||||||
private static ClassLoader AGENT_CLASSLOADER = null;
|
private static ClassLoader AGENT_CLASSLOADER = null;
|
||||||
private static ClassLoader JMXFETCH_CLASSLOADER = null;
|
private static ClassLoader JMXFETCH_CLASSLOADER = null;
|
||||||
|
private static ClassLoader PROFILING_CLASSLOADER = null;
|
||||||
|
|
||||||
public static void start(final Instrumentation inst, final URL bootstrapURL) {
|
public static void start(final Instrumentation inst, final URL bootstrapURL) {
|
||||||
createParentClassloader(bootstrapURL);
|
createParentClassloader(bootstrapURL);
|
||||||
|
@ -80,6 +81,18 @@ public class Agent {
|
||||||
} else {
|
} else {
|
||||||
installDatadogTracer();
|
installDatadogTracer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Similar thing happens with Profiler on (at least) zulu-8 because it uses OkHttp which indirectly loads JFR
|
||||||
|
* events which in turn loads LogManager. This is not a problem on newer JDKs because there JFR uses different
|
||||||
|
* logging facility.
|
||||||
|
*/
|
||||||
|
if (isJavaBefore9() && appUsingCustomLogManager) {
|
||||||
|
log.debug("Custom logger detected. Delaying Profiling Agent startup.");
|
||||||
|
registerLogManagerCallback(new StartProfilingAgentCallback(inst, bootstrapURL));
|
||||||
|
} else {
|
||||||
|
startProfilingAgent(bootstrapURL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void registerLogManagerCallback(final ClassLoadCallBack callback) {
|
private static void registerLogManagerCallback(final ClassLoadCallBack callback) {
|
||||||
|
@ -163,6 +176,22 @@ public class Agent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected static class StartProfilingAgentCallback extends ClassLoadCallBack {
|
||||||
|
StartProfilingAgentCallback(final Instrumentation inst, final URL bootstrapURL) {
|
||||||
|
super(bootstrapURL);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return "datadog-profiler";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void execute() {
|
||||||
|
startProfilingAgent(bootstrapURL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static synchronized void createParentClassloader(final URL bootstrapURL) {
|
private static synchronized void createParentClassloader(final URL bootstrapURL) {
|
||||||
if (PARENT_CLASSLOADER == null) {
|
if (PARENT_CLASSLOADER == null) {
|
||||||
try {
|
try {
|
||||||
|
@ -246,6 +275,32 @@ public class Agent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static synchronized void startProfilingAgent(final URL bootstrapURL) {
|
||||||
|
if (PROFILING_CLASSLOADER == null) {
|
||||||
|
final ClassLoader contextLoader = Thread.currentThread().getContextClassLoader();
|
||||||
|
try {
|
||||||
|
final ClassLoader profilingClassLoader =
|
||||||
|
createDatadogClassLoader("agent-profiling.isolated", bootstrapURL, PARENT_CLASSLOADER);
|
||||||
|
Thread.currentThread().setContextClassLoader(profilingClassLoader);
|
||||||
|
final Class<?> profilingAgentClass =
|
||||||
|
profilingClassLoader.loadClass("com.datadog.profiling.agent.ProfilingAgent");
|
||||||
|
final Method profilingInstallerMethod = profilingAgentClass.getMethod("run");
|
||||||
|
profilingInstallerMethod.invoke(null);
|
||||||
|
PROFILING_CLASSLOADER = profilingClassLoader;
|
||||||
|
} catch (final ClassFormatError e) {
|
||||||
|
/*
|
||||||
|
Profiling is compiled for Java8. Loading it on Java7 results in ClassFormatError
|
||||||
|
(more specifically UnsupportedClassVersionError). Just ignore and continue when this happens.
|
||||||
|
*/
|
||||||
|
log.error("Cannot start profiling agent ", e);
|
||||||
|
} catch (final Throwable ex) {
|
||||||
|
log.error("Throwable thrown while starting profiling agent", ex);
|
||||||
|
} finally {
|
||||||
|
Thread.currentThread().setContextClassLoader(contextLoader);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static void configureLogger() {
|
private static void configureLogger() {
|
||||||
setSystemPropertyDefault(SIMPLE_LOGGER_SHOW_DATE_TIME_PROPERTY, "true");
|
setSystemPropertyDefault(SIMPLE_LOGGER_SHOW_DATE_TIME_PROPERTY, "true");
|
||||||
setSystemPropertyDefault(
|
setSystemPropertyDefault(
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
plugins {
|
||||||
|
id "com.github.johnrengelman.shadow"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set properties before any plugins get loaded
|
||||||
|
ext {
|
||||||
|
enableJunitPlatform = true
|
||||||
|
minJavaVersionForTests = JavaVersion.VERSION_1_8
|
||||||
|
}
|
||||||
|
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
// We do not publish separate jar, but having version file is useful
|
||||||
|
apply from: "${rootDir}/gradle/version.gradle"
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.slf4j
|
||||||
|
compile project(':dd-trace-api')
|
||||||
|
|
||||||
|
compile project(':dd-java-agent:agent-profiling:profiling-uploader')
|
||||||
|
compile project(':dd-java-agent:agent-profiling:profiling-controller')
|
||||||
|
compile project(':dd-java-agent:agent-profiling:profiling-controller-openjdk')
|
||||||
|
}
|
||||||
|
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
|
||||||
|
configurations {
|
||||||
|
// exclude bootstrap dependencies from shadowJar
|
||||||
|
runtime.exclude module: deps.opentracing
|
||||||
|
runtime.exclude module: deps.slf4j
|
||||||
|
runtime.exclude group: 'org.slf4j'
|
||||||
|
runtime.exclude group: 'io.opentracing'
|
||||||
|
}
|
||||||
|
|
||||||
|
shadowJar {
|
||||||
|
dependencies deps.sharedInverse
|
||||||
|
dependencies {
|
||||||
|
exclude(project(':dd-java-agent:agent-bootstrap'))
|
||||||
|
exclude(project(':dd-trace-api'))
|
||||||
|
exclude(dependency('org.slf4j::'))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jar {
|
||||||
|
classifier = 'unbundled'
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
// Set properties before any plugins get loaded
|
||||||
|
ext {
|
||||||
|
minJavaVersionForTests = JavaVersion.VERSION_11
|
||||||
|
}
|
||||||
|
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
apply plugin: 'idea'
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.slf4j
|
||||||
|
compile project(':dd-trace-api')
|
||||||
|
compile project(':dd-java-agent:agent-profiling:profiling-controller')
|
||||||
|
|
||||||
|
testCompile deps.junit5
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-core', version: '3.1.0'
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-junit-jupiter', version: '3.1.0'
|
||||||
|
// Mockito dependency above pulls older version of Bytebuddy that fails to work on java13,
|
||||||
|
// so force correct version here. Note: we can remove this once Mockito upgrades.
|
||||||
|
testCompile deps.bytebuddy
|
||||||
|
testCompile deps.bytebuddyagent
|
||||||
|
testCompile group: 'org.hamcrest', name: 'hamcrest', version: '2.1'
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Setup here is as following:
|
||||||
|
* We compile with Java11 compiler to get JFR definitions.
|
||||||
|
* We specify source/target as Java8 to get code that is loadable on Java8 - JFR defs are Java8 compatible.
|
||||||
|
* We force IDEA to treat this as Java11 project with 'idea' plugin below.
|
||||||
|
* We run tests only on Java11+.
|
||||||
|
*/
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
|
||||||
|
[JavaCompile, GroovyCompile].each {
|
||||||
|
tasks.withType(it) {
|
||||||
|
doFirst {
|
||||||
|
// Disable '-processing' because some annotations are not claimed.
|
||||||
|
// Disable '-options' because we are compiling for java8 without specifying bootstrap - intentionally.
|
||||||
|
// Disable '-path' because we do not have some of the paths seem to be missing.
|
||||||
|
options.compilerArgs.addAll(['-Xlint:all,-processing,-options,-path', '-Werror'])
|
||||||
|
options.fork = true
|
||||||
|
options.forkOptions.javaHome = file(System.env.JAVA_11_HOME)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idea {
|
||||||
|
module {
|
||||||
|
jdkName = '11'
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Toolkit for working with .jfp files. A .jfp file is a .jfc file which has been transformed (using
|
||||||
|
* the XSLT in the template-transformer project). It contains the same event settings as the
|
||||||
|
* template, but in a format that is easier to handle in the profiling agent, not requiring us to
|
||||||
|
* parse XML.
|
||||||
|
*/
|
||||||
|
final class JfpUtils {
|
||||||
|
private JfpUtils() {
|
||||||
|
throw new UnsupportedOperationException("Toolkit!");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Map<String, String> readJfpFile(final InputStream stream) throws IOException {
|
||||||
|
if (stream == null) {
|
||||||
|
throw new IllegalArgumentException("Cannot read jfp file from empty stream!");
|
||||||
|
}
|
||||||
|
final Properties props = new Properties();
|
||||||
|
props.load(stream);
|
||||||
|
final Map<String, String> map = new HashMap<>();
|
||||||
|
for (final Entry<Object, Object> o : props.entrySet()) {
|
||||||
|
map.put(String.valueOf(o.getKey()), String.valueOf(o.getValue()));
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static InputStream getNamedResource(final String name) {
|
||||||
|
return JfpUtils.class.getClassLoader().getResourceAsStream(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Map<String, String> readNamedJfpResource(
|
||||||
|
final String name, final String overridesFile) throws IOException {
|
||||||
|
final Map<String, String> result;
|
||||||
|
try (final InputStream stream = getNamedResource(name)) {
|
||||||
|
result = readJfpFile(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (overridesFile != null) {
|
||||||
|
try (final InputStream stream = new FileInputStream(overridesFile)) {
|
||||||
|
result.putAll(readJfpFile(stream));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Collections.unmodifiableMap(result);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.ConfigurationException;
|
||||||
|
import com.datadog.profiling.controller.Controller;
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.util.Map;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is the implementation of the controller for OpenJDK. It should work for JDK 11+ today, and
|
||||||
|
* unmodified for JDK 8+ once JFR has been back-ported. The Oracle JDK implementation will be far
|
||||||
|
* messier... ;)
|
||||||
|
*/
|
||||||
|
public final class OpenJdkController implements Controller {
|
||||||
|
// Visible for testing
|
||||||
|
static final String JFP = "jfr/dd.jfp";
|
||||||
|
static final int RECORDING_MAX_SIZE = 64 * 1024 * 1024; // 64 megs
|
||||||
|
static final Duration RECORDING_MAX_AGE = Duration.ofMinutes(5);
|
||||||
|
|
||||||
|
private final Map<String, String> recordingSettings;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main constructor for OpenJDK profiling controller.
|
||||||
|
*
|
||||||
|
* <p>This has to be public because it is created via reflection
|
||||||
|
*/
|
||||||
|
public OpenJdkController(final Config config)
|
||||||
|
throws ConfigurationException, ClassNotFoundException {
|
||||||
|
// Make sure we can load JFR classes before declaring that we have successfully created
|
||||||
|
// factory and can use it.
|
||||||
|
Class.forName("jdk.jfr.Recording");
|
||||||
|
Class.forName("jdk.jfr.FlightRecorder");
|
||||||
|
|
||||||
|
try {
|
||||||
|
recordingSettings =
|
||||||
|
JfpUtils.readNamedJfpResource(JFP, config.getProfilingTemplateOverrideFile());
|
||||||
|
} catch (final IOException e) {
|
||||||
|
throw new ConfigurationException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OpenJdkOngoingRecording createRecording(final String recordingName) {
|
||||||
|
final Recording recording = new Recording();
|
||||||
|
recording.setName(recordingName);
|
||||||
|
recording.setSettings(recordingSettings);
|
||||||
|
recording.setMaxSize(RECORDING_MAX_SIZE);
|
||||||
|
recording.setMaxAge(RECORDING_MAX_AGE);
|
||||||
|
recording.start();
|
||||||
|
return new OpenJdkOngoingRecording(recording);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.OngoingRecording;
|
||||||
|
import java.time.Instant;
|
||||||
|
import jdk.jfr.FlightRecorder;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
import jdk.jfr.RecordingState;
|
||||||
|
|
||||||
|
public class OpenJdkOngoingRecording implements OngoingRecording {
|
||||||
|
|
||||||
|
private final Recording recording;
|
||||||
|
|
||||||
|
OpenJdkOngoingRecording(final Recording recording) {
|
||||||
|
this.recording = recording;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OpenJdkRecordingData stop() {
|
||||||
|
if (recording.getState() != RecordingState.RUNNING) {
|
||||||
|
throw new IllegalStateException("Cannot stop recording that is not running");
|
||||||
|
}
|
||||||
|
|
||||||
|
recording.stop();
|
||||||
|
return new OpenJdkRecordingData(recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OpenJdkRecordingData snapshot(final Instant start, final Instant end) {
|
||||||
|
if (recording.getState() != RecordingState.RUNNING) {
|
||||||
|
throw new IllegalStateException("Cannot snapshot recording that is not running");
|
||||||
|
}
|
||||||
|
|
||||||
|
final Recording snapshot = FlightRecorder.getFlightRecorder().takeSnapshot();
|
||||||
|
snapshot.setName(recording.getName()); // Copy name from original recording
|
||||||
|
return new OpenJdkRecordingData(snapshot, start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
recording.close();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.RecordingData;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.time.Instant;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
|
||||||
|
/** Implementation for profiling recordings. */
|
||||||
|
public class OpenJdkRecordingData implements RecordingData {
|
||||||
|
|
||||||
|
private final Recording recording;
|
||||||
|
private final Instant start;
|
||||||
|
private final Instant end;
|
||||||
|
|
||||||
|
OpenJdkRecordingData(final Recording recording) {
|
||||||
|
this(recording, recording.getStartTime(), recording.getStopTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
OpenJdkRecordingData(final Recording recording, final Instant start, final Instant end) {
|
||||||
|
this.recording = recording;
|
||||||
|
this.start = start;
|
||||||
|
this.end = end;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream getStream() throws IOException {
|
||||||
|
return recording.getStream(start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void release() {
|
||||||
|
recording.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return recording.getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "OpenJdkRecording: " + getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Instant getStart() {
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Instant getEnd() {
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visible for testing
|
||||||
|
Recording getRecording() {
|
||||||
|
return recording;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,243 @@
|
||||||
|
jdk.ThreadAllocationStatistics#enabled=true
|
||||||
|
jdk.ThreadAllocationStatistics#period=everyChunk
|
||||||
|
jdk.ClassLoadingStatistics#enabled=true
|
||||||
|
jdk.ClassLoadingStatistics#period=1000 ms
|
||||||
|
jdk.ClassLoaderStatistics#enabled=true
|
||||||
|
jdk.ClassLoaderStatistics#period=everyChunk
|
||||||
|
jdk.JavaThreadStatistics#enabled=true
|
||||||
|
jdk.JavaThreadStatistics#period=1000 ms
|
||||||
|
jdk.ThreadStart#enabled=true
|
||||||
|
jdk.ThreadEnd#enabled=true
|
||||||
|
jdk.ThreadSleep#enabled=true
|
||||||
|
jdk.ThreadSleep#stackTrace=true
|
||||||
|
jdk.ThreadSleep#threshold=10 ms
|
||||||
|
jdk.ThreadPark#enabled=true
|
||||||
|
jdk.ThreadPark#stackTrace=true
|
||||||
|
jdk.ThreadPark#threshold=10 ms
|
||||||
|
jdk.JavaMonitorEnter#enabled=true
|
||||||
|
jdk.JavaMonitorEnter#stackTrace=true
|
||||||
|
jdk.JavaMonitorEnter#threshold=10 ms
|
||||||
|
jdk.JavaMonitorWait#enabled=true
|
||||||
|
jdk.JavaMonitorWait#stackTrace=true
|
||||||
|
jdk.JavaMonitorWait#threshold=10 ms
|
||||||
|
jdk.JavaMonitorInflate#enabled=true
|
||||||
|
jdk.JavaMonitorInflate#stackTrace=true
|
||||||
|
jdk.JavaMonitorInflate#threshold=10 ms
|
||||||
|
jdk.BiasedLockRevocation#enabled=true
|
||||||
|
jdk.BiasedLockRevocation#stackTrace=true
|
||||||
|
jdk.BiasedLockRevocation#threshold=0 ms
|
||||||
|
jdk.BiasedLockSelfRevocation#enabled=true
|
||||||
|
jdk.BiasedLockSelfRevocation#stackTrace=true
|
||||||
|
jdk.BiasedLockSelfRevocation#threshold=0 ms
|
||||||
|
jdk.BiasedLockClassRevocation#enabled=true
|
||||||
|
jdk.BiasedLockClassRevocation#stackTrace=true
|
||||||
|
jdk.BiasedLockClassRevocation#threshold=0 ms
|
||||||
|
jdk.ReservedStackActivation#enabled=true
|
||||||
|
jdk.ReservedStackActivation#stackTrace=true
|
||||||
|
jdk.ClassLoad#enabled=false
|
||||||
|
jdk.ClassLoad#stackTrace=true
|
||||||
|
jdk.ClassLoad#threshold=0 ms
|
||||||
|
jdk.ClassDefine#enabled=false
|
||||||
|
jdk.ClassDefine#stackTrace=true
|
||||||
|
jdk.ClassUnload#enabled=false
|
||||||
|
jdk.JVMInformation#enabled=true
|
||||||
|
jdk.JVMInformation#period=beginChunk
|
||||||
|
jdk.InitialSystemProperty#enabled=true
|
||||||
|
jdk.InitialSystemProperty#period=beginChunk
|
||||||
|
jdk.ExecutionSample#enabled=true
|
||||||
|
# Note: we use 9 ms sampling rate in a hope to avoid 'lockstep' sampling.
|
||||||
|
# Ideally JFR should provide random jitter in sampling to ensure this doesn't happen.
|
||||||
|
jdk.ExecutionSample#period=9 ms
|
||||||
|
jdk.NativeMethodSample#enabled=true
|
||||||
|
jdk.NativeMethodSample#period=9 ms
|
||||||
|
jdk.SafepointBegin#enabled=true
|
||||||
|
jdk.SafepointBegin#threshold=0 ms
|
||||||
|
jdk.SafepointStateSynchronization#enabled=false
|
||||||
|
jdk.SafepointStateSynchronization#threshold=0 ms
|
||||||
|
jdk.SafepointWaitBlocked#enabled=false
|
||||||
|
jdk.SafepointWaitBlocked#threshold=0 ms
|
||||||
|
jdk.SafepointCleanup#enabled=false
|
||||||
|
jdk.SafepointCleanup#threshold=0 ms
|
||||||
|
jdk.SafepointCleanupTask#enabled=false
|
||||||
|
jdk.SafepointCleanupTask#threshold=0 ms
|
||||||
|
jdk.SafepointEnd#enabled=false
|
||||||
|
jdk.SafepointEnd#threshold=0 ms
|
||||||
|
jdk.ExecuteVMOperation#enabled=true
|
||||||
|
jdk.ExecuteVMOperation#threshold=0 ms
|
||||||
|
jdk.Shutdown#enabled=true
|
||||||
|
jdk.Shutdown#stackTrace=true
|
||||||
|
jdk.ThreadDump#enabled=false
|
||||||
|
jdk.ThreadDump#period=60 s
|
||||||
|
jdk.IntFlag#enabled=true
|
||||||
|
jdk.IntFlag#period=beginChunk
|
||||||
|
jdk.UnsignedIntFlag#enabled=true
|
||||||
|
jdk.UnsignedIntFlag#period=beginChunk
|
||||||
|
jdk.LongFlag#enabled=true
|
||||||
|
jdk.LongFlag#period=beginChunk
|
||||||
|
jdk.UnsignedLongFlag#enabled=true
|
||||||
|
jdk.UnsignedLongFlag#period=beginChunk
|
||||||
|
jdk.DoubleFlag#enabled=true
|
||||||
|
jdk.DoubleFlag#period=beginChunk
|
||||||
|
jdk.BooleanFlag#enabled=true
|
||||||
|
jdk.BooleanFlag#period=beginChunk
|
||||||
|
jdk.StringFlag#enabled=true
|
||||||
|
jdk.StringFlag#period=beginChunk
|
||||||
|
jdk.IntFlagChanged#enabled=true
|
||||||
|
jdk.UnsignedIntFlagChanged#enabled=true
|
||||||
|
jdk.LongFlagChanged#enabled=true
|
||||||
|
jdk.UnsignedLongFlagChanged#enabled=true
|
||||||
|
jdk.DoubleFlagChanged#enabled=true
|
||||||
|
jdk.BooleanFlagChanged#enabled=true
|
||||||
|
jdk.StringFlagChanged#enabled=true
|
||||||
|
jdk.ObjectCount#enabled=false
|
||||||
|
jdk.ObjectCount#period=everyChunk
|
||||||
|
jdk.GCConfiguration#enabled=true
|
||||||
|
jdk.GCConfiguration#period=everyChunk
|
||||||
|
jdk.GCHeapConfiguration#enabled=true
|
||||||
|
jdk.GCHeapConfiguration#period=beginChunk
|
||||||
|
jdk.YoungGenerationConfiguration#enabled=true
|
||||||
|
jdk.YoungGenerationConfiguration#period=beginChunk
|
||||||
|
jdk.GCTLABConfiguration#enabled=true
|
||||||
|
jdk.GCTLABConfiguration#period=beginChunk
|
||||||
|
jdk.GCSurvivorConfiguration#enabled=true
|
||||||
|
jdk.GCSurvivorConfiguration#period=beginChunk
|
||||||
|
jdk.ObjectCountAfterGC#enabled=false
|
||||||
|
jdk.GCHeapSummary#enabled=true
|
||||||
|
jdk.PSHeapSummary#enabled=true
|
||||||
|
jdk.G1HeapSummary#enabled=true
|
||||||
|
jdk.MetaspaceSummary#enabled=true
|
||||||
|
jdk.MetaspaceGCThreshold#enabled=true
|
||||||
|
jdk.MetaspaceAllocationFailure#enabled=true
|
||||||
|
jdk.MetaspaceAllocationFailure#stackTrace=true
|
||||||
|
jdk.MetaspaceOOM#enabled=true
|
||||||
|
jdk.MetaspaceOOM#stackTrace=true
|
||||||
|
jdk.MetaspaceChunkFreeListSummary#enabled=true
|
||||||
|
jdk.GarbageCollection#enabled=true
|
||||||
|
jdk.GarbageCollection#threshold=0 ms
|
||||||
|
jdk.ParallelOldGarbageCollection#enabled=true
|
||||||
|
jdk.ParallelOldGarbageCollection#threshold=0 ms
|
||||||
|
jdk.YoungGarbageCollection#enabled=true
|
||||||
|
jdk.YoungGarbageCollection#threshold=0 ms
|
||||||
|
jdk.OldGarbageCollection#enabled=true
|
||||||
|
jdk.OldGarbageCollection#threshold=0 ms
|
||||||
|
jdk.G1GarbageCollection#enabled=true
|
||||||
|
jdk.G1GarbageCollection#threshold=0 ms
|
||||||
|
jdk.GCPhasePause#enabled=true
|
||||||
|
jdk.GCPhasePause#threshold=0 ms
|
||||||
|
jdk.GCPhasePauseLevel1#enabled=true
|
||||||
|
jdk.GCPhasePauseLevel1#threshold=0 ms
|
||||||
|
jdk.GCPhasePauseLevel2#enabled=true
|
||||||
|
jdk.GCPhasePauseLevel2#threshold=0 ms
|
||||||
|
jdk.GCPhasePauseLevel3#enabled=false
|
||||||
|
jdk.GCPhasePauseLevel3#threshold=0 ms
|
||||||
|
jdk.GCPhasePauseLevel4#enabled=false
|
||||||
|
jdk.GCPhasePauseLevel4#threshold=0 ms
|
||||||
|
jdk.GCPhaseConcurrent#enabled=true
|
||||||
|
jdk.GCPhaseConcurrent#threshold=0 ms
|
||||||
|
jdk.GCReferenceStatistics#enabled=true
|
||||||
|
jdk.PromotionFailed#enabled=true
|
||||||
|
jdk.EvacuationFailed#enabled=true
|
||||||
|
jdk.EvacuationInformation#enabled=true
|
||||||
|
jdk.G1MMU#enabled=true
|
||||||
|
jdk.G1EvacuationYoungStatistics#enabled=true
|
||||||
|
jdk.G1EvacuationOldStatistics#enabled=true
|
||||||
|
jdk.G1BasicIHOP#enabled=true
|
||||||
|
jdk.G1AdaptiveIHOP#enabled=true
|
||||||
|
jdk.PromoteObjectInNewPLAB#enabled=false
|
||||||
|
jdk.PromoteObjectOutsidePLAB#enabled=false
|
||||||
|
jdk.ConcurrentModeFailure#enabled=true
|
||||||
|
jdk.AllocationRequiringGC#enabled=false
|
||||||
|
jdk.AllocationRequiringGC#stackTrace=true
|
||||||
|
jdk.TenuringDistribution#enabled=true
|
||||||
|
jdk.G1HeapRegionInformation#enabled=false
|
||||||
|
jdk.G1HeapRegionInformation#period=everyChunk
|
||||||
|
jdk.G1HeapRegionTypeChange#enabled=false
|
||||||
|
jdk.OldObjectSample#enabled=false
|
||||||
|
jdk.OldObjectSample#stackTrace=true
|
||||||
|
jdk.OldObjectSample#cutoff=900 ms
|
||||||
|
jdk.CompilerConfiguration#enabled=true
|
||||||
|
jdk.CompilerConfiguration#period=beginChunk
|
||||||
|
jdk.CompilerStatistics#enabled=true
|
||||||
|
jdk.CompilerStatistics#period=everyChunk
|
||||||
|
jdk.Compilation#enabled=true
|
||||||
|
jdk.Compilation#threshold=100 ms
|
||||||
|
jdk.CompilerPhase#enabled=true
|
||||||
|
jdk.CompilerPhase#threshold=10 s
|
||||||
|
jdk.CompilationFailure#enabled=true
|
||||||
|
jdk.CompilerInlining#enabled=false
|
||||||
|
jdk.CodeSweeperConfiguration#enabled=true
|
||||||
|
jdk.CodeSweeperConfiguration#period=beginChunk
|
||||||
|
jdk.CodeSweeperStatistics#enabled=true
|
||||||
|
jdk.CodeSweeperStatistics#period=everyChunk
|
||||||
|
jdk.SweepCodeCache#enabled=true
|
||||||
|
jdk.SweepCodeCache#threshold=100 ms
|
||||||
|
jdk.CodeCacheConfiguration#enabled=true
|
||||||
|
jdk.CodeCacheConfiguration#period=beginChunk
|
||||||
|
jdk.CodeCacheStatistics#enabled=true
|
||||||
|
jdk.CodeCacheStatistics#period=everyChunk
|
||||||
|
jdk.CodeCacheFull#enabled=true
|
||||||
|
jdk.OSInformation#enabled=true
|
||||||
|
jdk.OSInformation#period=beginChunk
|
||||||
|
jdk.CPUInformation#enabled=true
|
||||||
|
jdk.CPUInformation#period=beginChunk
|
||||||
|
jdk.ThreadContextSwitchRate#enabled=true
|
||||||
|
jdk.ThreadContextSwitchRate#period=10 s
|
||||||
|
jdk.CPULoad#enabled=true
|
||||||
|
jdk.CPULoad#period=1000 ms
|
||||||
|
jdk.ThreadCPULoad#enabled=true
|
||||||
|
jdk.ThreadCPULoad#period=10 s
|
||||||
|
jdk.CPUTimeStampCounter#enabled=true
|
||||||
|
jdk.CPUTimeStampCounter#period=beginChunk
|
||||||
|
jdk.SystemProcess#enabled=true
|
||||||
|
jdk.SystemProcess#period=endChunk
|
||||||
|
jdk.NetworkUtilization#enabled=true
|
||||||
|
jdk.NetworkUtilization#period=5 s
|
||||||
|
jdk.InitialEnvironmentVariable#enabled=false
|
||||||
|
jdk.InitialEnvironmentVariable#period=beginChunk
|
||||||
|
jdk.PhysicalMemory#enabled=true
|
||||||
|
jdk.PhysicalMemory#period=everyChunk
|
||||||
|
jdk.ObjectAllocationInNewTLAB#enabled=true
|
||||||
|
jdk.ObjectAllocationInNewTLAB#stackTrace=true
|
||||||
|
jdk.ObjectAllocationOutsideTLAB#enabled=true
|
||||||
|
jdk.ObjectAllocationOutsideTLAB#stackTrace=true
|
||||||
|
jdk.NativeLibrary#enabled=false
|
||||||
|
jdk.NativeLibrary#period=everyChunk
|
||||||
|
jdk.ModuleRequire#enabled=false
|
||||||
|
jdk.ModuleRequire#period=endChunk
|
||||||
|
jdk.ModuleExport#enabled=false
|
||||||
|
jdk.ModuleExport#period=endChunk
|
||||||
|
jdk.FileForce#enabled=true
|
||||||
|
jdk.FileForce#stackTrace=true
|
||||||
|
jdk.FileForce#threshold=10 ms
|
||||||
|
jdk.FileRead#enabled=true
|
||||||
|
jdk.FileRead#stackTrace=true
|
||||||
|
jdk.FileRead#threshold=10 ms
|
||||||
|
jdk.FileWrite#enabled=true
|
||||||
|
jdk.FileWrite#stackTrace=true
|
||||||
|
jdk.FileWrite#threshold=10 ms
|
||||||
|
jdk.SocketRead#enabled=true
|
||||||
|
jdk.SocketRead#stackTrace=true
|
||||||
|
jdk.SocketRead#threshold=10 ms
|
||||||
|
jdk.SocketWrite#enabled=true
|
||||||
|
jdk.SocketWrite#stackTrace=true
|
||||||
|
jdk.SocketWrite#threshold=10 ms
|
||||||
|
jdk.JavaExceptionThrow#enabled=false
|
||||||
|
jdk.JavaExceptionThrow#stackTrace=true
|
||||||
|
jdk.JavaErrorThrow#enabled=true
|
||||||
|
jdk.JavaErrorThrow#stackTrace=true
|
||||||
|
jdk.ExceptionStatistics#enabled=true
|
||||||
|
jdk.ExceptionStatistics#period=everyChunk
|
||||||
|
jdk.ActiveRecording#enabled=true
|
||||||
|
jdk.ActiveSetting#enabled=true
|
||||||
|
jdk.DataLoss#enabled=true
|
||||||
|
jdk.DumpReason#enabled=true
|
||||||
|
jdk.ZPageAllocation#enabled=true
|
||||||
|
jdk.ZPageAllocation#threshold=10 ms
|
||||||
|
jdk.ZThreadPhase#enabled=true
|
||||||
|
jdk.ZThreadPhase#threshold=0 ms
|
||||||
|
jdk.ZStatisticsCounter#threshold=10 ms
|
||||||
|
jdk.ZStatisticsCounter#enabled=true
|
||||||
|
jdk.ZStatisticsSampler#enabled=true
|
||||||
|
jdk.ZStatisticsSampler#threshold=10 ms
|
||||||
|
datadog.Scope#enabled=true
|
||||||
|
datadog.Scope#threshold=10 ms
|
|
@ -0,0 +1,28 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
public class ControllerFactoryTest {
|
||||||
|
|
||||||
|
@Mock private Config config;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* We assume that tests for this module are run only on JVMs that support JFR. Ideally we would
|
||||||
|
* want to have a conditional annotation to this, but currently it is somewhat hard to do well,
|
||||||
|
* partially because jfr is available in some java8 versions and not others. Currently we just run
|
||||||
|
* tests with java11 that is guaranteed to have JFR.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCreateController() throws UnsupportedEnvironmentException {
|
||||||
|
assertEquals(
|
||||||
|
"com.datadog.profiling.controller.openjdk.OpenJdkController",
|
||||||
|
ControllerFactory.createController(config).getClass().getName());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Map;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
public class JfpUtilsTest {
|
||||||
|
|
||||||
|
private static final String CONFIG_ENTRY = "jdk.ThreadAllocationStatistics#enabled";
|
||||||
|
private static final String CONFIG_OVERRIDE_ENTRY = "test.continuous.override#value";
|
||||||
|
|
||||||
|
static final String OVERRIDES =
|
||||||
|
OpenJdkControllerTest.class.getClassLoader().getResource("overrides.jfp").getFile();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLoadingContinuousConfig() throws IOException {
|
||||||
|
final Map<String, String> config = JfpUtils.readNamedJfpResource(OpenJdkController.JFP, null);
|
||||||
|
assertEquals("true", config.get(CONFIG_ENTRY));
|
||||||
|
assertNull(config.get(CONFIG_OVERRIDE_ENTRY));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLoadingContinuousConfigWithOverride() throws IOException {
|
||||||
|
final Map<String, String> config =
|
||||||
|
JfpUtils.readNamedJfpResource(OpenJdkController.JFP, OVERRIDES);
|
||||||
|
assertEquals("true", config.get(CONFIG_ENTRY));
|
||||||
|
assertEquals("200", config.get(CONFIG_OVERRIDE_ENTRY));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import static com.datadog.profiling.controller.openjdk.JfpUtilsTest.OVERRIDES;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.ConfigurationException;
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.io.IOException;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
public class OpenJdkControllerTest {
|
||||||
|
|
||||||
|
private static final String TEST_NAME = "recording name";
|
||||||
|
|
||||||
|
@Mock private Config config;
|
||||||
|
private OpenJdkController controller;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setup() throws ConfigurationException, ClassNotFoundException {
|
||||||
|
when(config.getProfilingTemplateOverrideFile()).thenReturn(OVERRIDES);
|
||||||
|
controller = new OpenJdkController(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateContinuousRecording() throws IOException {
|
||||||
|
final Recording recording = controller.createRecording(TEST_NAME).stop().getRecording();
|
||||||
|
assertEquals(TEST_NAME, recording.getName());
|
||||||
|
assertEquals(
|
||||||
|
JfpUtils.readNamedJfpResource(OpenJdkController.JFP, OVERRIDES), recording.getSettings());
|
||||||
|
assertEquals(OpenJdkController.RECORDING_MAX_SIZE, recording.getMaxSize());
|
||||||
|
assertEquals(OpenJdkController.RECORDING_MAX_AGE, recording.getMaxAge());
|
||||||
|
recording.close();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||||
|
import static org.mockito.Mockito.never;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
import jdk.jfr.RecordingState;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
import org.mockito.junit.jupiter.MockitoSettings;
|
||||||
|
import org.mockito.quality.Strictness;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
// Proper unused stub detection doesn't work in junit5 yet,
|
||||||
|
// see https://github.com/mockito/mockito/issues/1540
|
||||||
|
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||||
|
public class OpenJdkOngoingRecordingTest {
|
||||||
|
|
||||||
|
private static final String TEST_NAME = "recording name";
|
||||||
|
|
||||||
|
@Mock private Instant start;
|
||||||
|
@Mock private Instant end;
|
||||||
|
@Mock private Recording recording;
|
||||||
|
|
||||||
|
private OpenJdkOngoingRecording ongoingRecording;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setup() {
|
||||||
|
when(recording.getState()).thenReturn(RecordingState.RUNNING);
|
||||||
|
when(recording.getName()).thenReturn(TEST_NAME);
|
||||||
|
|
||||||
|
ongoingRecording = new OpenJdkOngoingRecording(recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStop() {
|
||||||
|
assertEquals(recording, ongoingRecording.stop().getRecording());
|
||||||
|
|
||||||
|
verify(recording).stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStopOnStopped() {
|
||||||
|
when(recording.getState()).thenReturn(RecordingState.STOPPED);
|
||||||
|
|
||||||
|
assertThrows(
|
||||||
|
IllegalStateException.class,
|
||||||
|
() -> {
|
||||||
|
ongoingRecording.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
verify(recording, never()).stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSnapshot() {
|
||||||
|
final OpenJdkRecordingData recordingData = ongoingRecording.snapshot(start, end);
|
||||||
|
assertEquals(TEST_NAME, recordingData.getName());
|
||||||
|
assertEquals(start, recordingData.getStart());
|
||||||
|
assertEquals(end, recordingData.getEnd());
|
||||||
|
assertNotEquals(
|
||||||
|
recording, recordingData.getRecording(), "make sure we didn't get our mocked recording");
|
||||||
|
|
||||||
|
// We got real recording so we should clean it up
|
||||||
|
recordingData.release();
|
||||||
|
|
||||||
|
verify(recording, never()).stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSnapshotOnStopped() {
|
||||||
|
when(recording.getState()).thenReturn(RecordingState.STOPPED);
|
||||||
|
|
||||||
|
assertThrows(
|
||||||
|
IllegalStateException.class,
|
||||||
|
() -> {
|
||||||
|
ongoingRecording.snapshot(start, end);
|
||||||
|
});
|
||||||
|
|
||||||
|
verify(recording, never()).stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClose() {
|
||||||
|
ongoingRecording.close();
|
||||||
|
|
||||||
|
verify(recording).close();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,102 @@
|
||||||
|
package com.datadog.profiling.controller.openjdk;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.time.Instant;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
import org.mockito.junit.jupiter.MockitoSettings;
|
||||||
|
import org.mockito.quality.Strictness;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
// Proper unused stub detection doesn't work in junit5 yet,
|
||||||
|
// see https://github.com/mockito/mockito/issues/1540
|
||||||
|
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||||
|
public class OpenJdkRecordingDataTest {
|
||||||
|
|
||||||
|
private static final String TEST_NAME = "recording name";
|
||||||
|
|
||||||
|
@Mock Instant start;
|
||||||
|
@Mock Instant end;
|
||||||
|
@Mock Instant customStart;
|
||||||
|
@Mock Instant customEnd;
|
||||||
|
@Mock private InputStream stream;
|
||||||
|
@Mock private InputStream customStream;
|
||||||
|
@Mock private Recording recording;
|
||||||
|
|
||||||
|
private OpenJdkRecordingData recordingData;
|
||||||
|
private OpenJdkRecordingData customRecordingData;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setup() throws IOException {
|
||||||
|
when(recording.getStream(start, end)).thenReturn(stream);
|
||||||
|
when(recording.getStream(customStart, customEnd)).thenReturn(customStream);
|
||||||
|
when(recording.getStartTime()).thenReturn(start);
|
||||||
|
when(recording.getStopTime()).thenReturn(end);
|
||||||
|
when(recording.getName()).thenReturn(TEST_NAME);
|
||||||
|
|
||||||
|
recordingData = new OpenJdkRecordingData(recording);
|
||||||
|
customRecordingData = new OpenJdkRecordingData(recording, customStart, customEnd);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetStream() throws IOException {
|
||||||
|
assertEquals(stream, recordingData.getStream());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRelease() {
|
||||||
|
recordingData.release();
|
||||||
|
verify(recording).close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetName() {
|
||||||
|
assertEquals(TEST_NAME, recordingData.getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testToString() {
|
||||||
|
assertThat(recordingData.toString(), containsString(TEST_NAME));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetStart() {
|
||||||
|
assertEquals(start, recordingData.getStart());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetEnd() {
|
||||||
|
assertEquals(end, recordingData.getEnd());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCustomGetStream() throws IOException {
|
||||||
|
assertEquals(customStream, customRecordingData.getStream());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCustomGetStart() {
|
||||||
|
assertEquals(customStart, customRecordingData.getStart());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCustomGetEnd() {
|
||||||
|
assertEquals(customEnd, customRecordingData.getEnd());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getRecording() {
|
||||||
|
assertEquals(recording, recordingData.getRecording());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
mock-maker-inline
|
|
@ -0,0 +1,2 @@
|
||||||
|
test.continuous.override#value=200
|
||||||
|
jdk.SafepointBegin#enabled=continuous-test-value
|
|
@ -0,0 +1,30 @@
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
|
||||||
|
// We have some general logging paths that are hard to test
|
||||||
|
minimumInstructionCoverage = 0.8
|
||||||
|
|
||||||
|
excludedClassesCoverage += [
|
||||||
|
// ControllerFactory gets better tested in actual controller implementations
|
||||||
|
'com.datadog.profiling.controller.ControllerFactory',
|
||||||
|
// There are some code paths that is impossible/very-very hard to hit
|
||||||
|
'com.datadog.profiling.controller.ProfilingSystem.StartRecording',
|
||||||
|
'com.datadog.profiling.controller.ProfilingSystem.StopRecording'
|
||||||
|
]
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.slf4j
|
||||||
|
compile project(':dd-trace-api')
|
||||||
|
|
||||||
|
testCompile deps.junit5
|
||||||
|
testCompile deps.guava
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-core', version: '3.1.0'
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-junit-jupiter', version: '3.1.0'
|
||||||
|
// Mockito dependency above pulls older version of Bytebuddy that fails to work on java13,
|
||||||
|
// so force correct version here. Note: we can remove this once Mockito upgrades.
|
||||||
|
testCompile deps.bytebuddy
|
||||||
|
testCompile deps.bytebuddyagent
|
||||||
|
testCompile group: 'org.awaitility', name: 'awaitility', version: '4.0.1'
|
||||||
|
}
|
||||||
|
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
/** Exception thrown when the profiling system is badly configured. */
|
||||||
|
public class ConfigurationException extends Exception {
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
public ConfigurationException(final Throwable cause) {
|
||||||
|
super(cause);
|
||||||
|
}
|
||||||
|
|
||||||
|
public ConfigurationException(final String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface for the low lever flight recorder control functionality. Needed since we will likely
|
||||||
|
* want to support multiple version later.
|
||||||
|
*/
|
||||||
|
public interface Controller {
|
||||||
|
/**
|
||||||
|
* Creates a continuous recording using the specified template.
|
||||||
|
*
|
||||||
|
* @param recordingName the name under which the recording will be known.
|
||||||
|
* @return the recording object created.
|
||||||
|
*/
|
||||||
|
OngoingRecording createRecording(String recordingName);
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.lang.reflect.InvocationTargetException;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
|
||||||
|
/** Factory used to get a {@link Controller}. */
|
||||||
|
@Slf4j
|
||||||
|
public final class ControllerFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the created controller.
|
||||||
|
*
|
||||||
|
* @return the created controller.
|
||||||
|
* @throws UnsupportedEnvironmentException if there is controller available for the platform we're
|
||||||
|
* running in. See the exception message for specifics.
|
||||||
|
*/
|
||||||
|
public static Controller createController(final Config config)
|
||||||
|
throws UnsupportedEnvironmentException {
|
||||||
|
try {
|
||||||
|
Class.forName("com.oracle.jrockit.jfr.Producer");
|
||||||
|
throw new UnsupportedEnvironmentException(
|
||||||
|
"The JFR controller is currently not supported on the Oracle JDK <= JDK 11!");
|
||||||
|
} catch (final ClassNotFoundException e) {
|
||||||
|
// Fall through - until we support Oracle JDK 7 & 8, this is a good thing. ;)
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
final Class<? extends Controller> clazz =
|
||||||
|
Class.forName("com.datadog.profiling.controller.openjdk.OpenJdkController")
|
||||||
|
.asSubclass(Controller.class);
|
||||||
|
return clazz.getDeclaredConstructor(Config.class).newInstance(config);
|
||||||
|
} catch (final ClassNotFoundException
|
||||||
|
| NoSuchMethodException
|
||||||
|
| InstantiationException
|
||||||
|
| IllegalAccessException
|
||||||
|
| InvocationTargetException e) {
|
||||||
|
throw new UnsupportedEnvironmentException(
|
||||||
|
"The JFR controller could not find a supported JFR API", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.time.Instant;
|
||||||
|
|
||||||
|
/** Interface that represents ongoing recording in profiling system */
|
||||||
|
public interface OngoingRecording extends Closeable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop recording.
|
||||||
|
*
|
||||||
|
* @return {@link RecordingData} with current recording information
|
||||||
|
*/
|
||||||
|
RecordingData stop();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create snapshot from running recording. Note: recording continues to run after this method is
|
||||||
|
* called.
|
||||||
|
*
|
||||||
|
* @param start start time of the snapshot
|
||||||
|
* @param end end time of the snapshot
|
||||||
|
* @return {@link RecordingData} with snapshot information
|
||||||
|
*/
|
||||||
|
RecordingData snapshot(final Instant start, final Instant end);
|
||||||
|
|
||||||
|
/** Close recording without capturing any data */
|
||||||
|
@Override
|
||||||
|
void close();
|
||||||
|
}
|
|
@ -0,0 +1,195 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import com.datadog.profiling.util.ProfilingThreadFactory;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
|
||||||
|
/** Sets up the profiling strategy and schedules the profiling recordings. */
|
||||||
|
@Slf4j
|
||||||
|
public final class ProfilingSystem {
|
||||||
|
static final String RECORDING_NAME = "dd-profiling";
|
||||||
|
|
||||||
|
private static final long TERMINATION_TIMEOUT = 10;
|
||||||
|
|
||||||
|
private final ScheduledExecutorService executorService;
|
||||||
|
private final Controller controller;
|
||||||
|
// For now only support one callback. Multiplex as needed.
|
||||||
|
private final RecordingDataListener dataListener;
|
||||||
|
|
||||||
|
private final Duration startupDelay;
|
||||||
|
private final Duration uploadPeriod;
|
||||||
|
|
||||||
|
private OngoingRecording recording;
|
||||||
|
private boolean started = false;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*
|
||||||
|
* @param controller implementation specific controller of profiling machinery
|
||||||
|
* @param dataListener the listener for data being produced
|
||||||
|
* @param startupDelay delay before starting jfr
|
||||||
|
* @param startupDelayRandomRange randomization range for startup delay
|
||||||
|
* @param uploadPeriod how often to upload data
|
||||||
|
* @throws ConfigurationException if the configuration information was bad.
|
||||||
|
*/
|
||||||
|
public ProfilingSystem(
|
||||||
|
final Controller controller,
|
||||||
|
final RecordingDataListener dataListener,
|
||||||
|
final Duration startupDelay,
|
||||||
|
final Duration startupDelayRandomRange,
|
||||||
|
final Duration uploadPeriod)
|
||||||
|
throws ConfigurationException {
|
||||||
|
this(
|
||||||
|
controller,
|
||||||
|
dataListener,
|
||||||
|
startupDelay,
|
||||||
|
startupDelayRandomRange,
|
||||||
|
uploadPeriod,
|
||||||
|
Executors.newScheduledThreadPool(
|
||||||
|
1, new ProfilingThreadFactory("dd-profiler-recording-scheduler")),
|
||||||
|
ThreadLocalRandom.current());
|
||||||
|
}
|
||||||
|
|
||||||
|
ProfilingSystem(
|
||||||
|
final Controller controller,
|
||||||
|
final RecordingDataListener dataListener,
|
||||||
|
final Duration baseStartupDelay,
|
||||||
|
final Duration startupDelayRandomRange,
|
||||||
|
final Duration uploadPeriod,
|
||||||
|
final ScheduledExecutorService executorService,
|
||||||
|
final ThreadLocalRandom threadLocalRandom)
|
||||||
|
throws ConfigurationException {
|
||||||
|
this.controller = controller;
|
||||||
|
this.dataListener = dataListener;
|
||||||
|
this.uploadPeriod = uploadPeriod;
|
||||||
|
this.executorService = executorService;
|
||||||
|
|
||||||
|
if (baseStartupDelay.isNegative()) {
|
||||||
|
throw new ConfigurationException("Startup delay must not be negative.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (startupDelayRandomRange.isNegative()) {
|
||||||
|
throw new ConfigurationException("Startup delay random range must not be negative.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (uploadPeriod.isNegative() || uploadPeriod.isZero()) {
|
||||||
|
throw new ConfigurationException("Upload period must be positive.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: is is important to not keep reference to the threadLocalRandom beyond the constructor
|
||||||
|
// since it is expected to be thread local.
|
||||||
|
startupDelay = randomizeDuration(threadLocalRandom, baseStartupDelay, startupDelayRandomRange);
|
||||||
|
}
|
||||||
|
|
||||||
|
public final void start() {
|
||||||
|
log.info(
|
||||||
|
"Starting profiling system: startupDelay={}ms, uploadPeriod={}ms",
|
||||||
|
startupDelay.toMillis(),
|
||||||
|
uploadPeriod.toMillis());
|
||||||
|
|
||||||
|
// Delay JFR initialization. This code is run from 'premain' and there is a known bug in JVM
|
||||||
|
// which makes it crash if JFR is run before 'main' starts.
|
||||||
|
// See https://bugs.openjdk.java.net/browse/JDK-8227011
|
||||||
|
executorService.schedule(
|
||||||
|
() -> {
|
||||||
|
try {
|
||||||
|
final Instant now = Instant.now();
|
||||||
|
recording = controller.createRecording(RECORDING_NAME);
|
||||||
|
executorService.scheduleAtFixedRate(
|
||||||
|
new SnapshotRecording(now),
|
||||||
|
uploadPeriod.toMillis(),
|
||||||
|
uploadPeriod.toMillis(),
|
||||||
|
TimeUnit.MILLISECONDS);
|
||||||
|
started = true;
|
||||||
|
} catch (final Throwable t) {
|
||||||
|
log.error("Fatal exception during profiling startup", t);
|
||||||
|
throw t;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
startupDelay.toMillis(),
|
||||||
|
TimeUnit.MILLISECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Shuts down the profiling system. */
|
||||||
|
public final void shutdown() {
|
||||||
|
executorService.shutdownNow();
|
||||||
|
|
||||||
|
try {
|
||||||
|
executorService.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS);
|
||||||
|
} catch (final InterruptedException e) {
|
||||||
|
// Note: this should only happen in main thread right before exiting, so eating up interrupted
|
||||||
|
// state should be fine.
|
||||||
|
log.error("Wait for executor shutdown interrupted");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Here we assume that all other threads have been shutdown and we can close running
|
||||||
|
// recording
|
||||||
|
if (recording != null) {
|
||||||
|
recording.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
started = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isStarted() {
|
||||||
|
return started;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** VisibleForTesting */
|
||||||
|
final Duration getStartupDelay() {
|
||||||
|
return startupDelay;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Duration randomizeDuration(
|
||||||
|
final ThreadLocalRandom random, final Duration duration, final Duration range) {
|
||||||
|
return duration.plus(Duration.ofMillis(random.nextLong(range.toMillis())));
|
||||||
|
}
|
||||||
|
|
||||||
|
private final class SnapshotRecording implements Runnable {
|
||||||
|
|
||||||
|
private Instant lastSnapshot;
|
||||||
|
|
||||||
|
SnapshotRecording(final Instant startTime) {
|
||||||
|
lastSnapshot = startTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
final RecordingType recordingType = RecordingType.CONTINUOUS;
|
||||||
|
try {
|
||||||
|
final RecordingData recordingData = recording.snapshot(lastSnapshot, Instant.now());
|
||||||
|
// The hope here is that we do not get chunk rotated after taking snapshot and before we
|
||||||
|
// take this timestamp otherwise we will start losing data.
|
||||||
|
lastSnapshot = Instant.now();
|
||||||
|
if (recordingData != null) {
|
||||||
|
dataListener.onNewData(recordingType, recordingData);
|
||||||
|
}
|
||||||
|
} catch (final Exception e) {
|
||||||
|
log.error("Exception in profiling thread, continuing", e);
|
||||||
|
} catch (final Throwable t) {
|
||||||
|
log.error("Fatal exception in profiling thread, exiting", t);
|
||||||
|
throw t;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.time.Instant;
|
||||||
|
|
||||||
|
/** Platform agnostic API for operations required when retrieving data using the ProfilingSystem. */
|
||||||
|
public interface RecordingData {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the data stream.
|
||||||
|
* @throws IOException if another IO-related problem occured.
|
||||||
|
*/
|
||||||
|
InputStream getStream() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Releases the resources associated with the recording, for example the underlying file.
|
||||||
|
*
|
||||||
|
* <p>Forgetting to releasing this when done streaming, will need to one or more of the following:
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>Memory leak
|
||||||
|
* <li>File leak
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>Please don't forget to call release when done streaming...
|
||||||
|
*/
|
||||||
|
void release();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the name of the recording from which the data is originating.
|
||||||
|
*
|
||||||
|
* @return the name of the recording from which the data is originating.
|
||||||
|
*/
|
||||||
|
String getName();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the requested start time for the recording.
|
||||||
|
*
|
||||||
|
* <p>Note that this doesn't necessarily have to match the time for the actual data recorded.
|
||||||
|
*
|
||||||
|
* @return the requested start time.
|
||||||
|
*/
|
||||||
|
Instant getStart();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the requested end time for the recording.
|
||||||
|
*
|
||||||
|
* <p>Note that this doesn't necessarily have to match the time for the actual data recorded.
|
||||||
|
*
|
||||||
|
* @return the requested end time.
|
||||||
|
*/
|
||||||
|
Instant getEnd();
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
/** Listener for getting notified when new recording data is becoming available. */
|
||||||
|
public interface RecordingDataListener {
|
||||||
|
/**
|
||||||
|
* Called when new recording data becomes available. Handle quickly, e.g. typically schedule
|
||||||
|
* streaming of the new available data in another thread. Do not forget to {@link
|
||||||
|
* RecordingData#release()} when the data has been uploaded.
|
||||||
|
*
|
||||||
|
* @param type type of the recording
|
||||||
|
* @param data the new data available
|
||||||
|
*/
|
||||||
|
void onNewData(RecordingType type, RecordingData data);
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import lombok.Getter;
|
||||||
|
|
||||||
|
public enum RecordingType {
|
||||||
|
CONTINUOUS("continuous");
|
||||||
|
|
||||||
|
@Getter private final String name;
|
||||||
|
|
||||||
|
RecordingType(final String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
/** Exception thrown when the environment does not support a {@link Controller}. */
|
||||||
|
public final class UnsupportedEnvironmentException extends Exception {
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
public UnsupportedEnvironmentException(final String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public UnsupportedEnvironmentException(final String message, final Throwable cause) {
|
||||||
|
super(message, cause);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package com.datadog.profiling.util;
|
||||||
|
|
||||||
|
import java.util.concurrent.ThreadFactory;
|
||||||
|
|
||||||
|
// FIXME: we should unify all thread factories in common library
|
||||||
|
public final class ProfilingThreadFactory implements ThreadFactory {
|
||||||
|
private static final ThreadGroup THREAD_GROUP = new ThreadGroup("Datadog Profiler");
|
||||||
|
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
public ProfilingThreadFactory(final String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Thread newThread(final Runnable r) {
|
||||||
|
final Thread t = new Thread(THREAD_GROUP, r, name);
|
||||||
|
t.setDaemon(true);
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertSame;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
public class ConfigurationExceptionTest {
|
||||||
|
|
||||||
|
private static final String MESSAGE = "message";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMessageConstructor() {
|
||||||
|
assertEquals(MESSAGE, new ConfigurationException(MESSAGE).getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCauseConstructor() {
|
||||||
|
final Throwable cause = new RuntimeException();
|
||||||
|
final Exception exception = new ConfigurationException(cause);
|
||||||
|
assertSame(cause, exception.getCause());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||||
|
import static org.junit.jupiter.api.condition.JRE.JAVA_8;
|
||||||
|
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.condition.EnabledOnJre;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
/** Note: some additional tests for this class are located in profiling-controller-openjdk module */
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
public class ControllerFactoryTest {
|
||||||
|
|
||||||
|
@Mock private Config config;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@EnabledOnJre({JAVA_8})
|
||||||
|
public void testCreateControllerJava8() {
|
||||||
|
assertThrows(
|
||||||
|
UnsupportedEnvironmentException.class,
|
||||||
|
() -> {
|
||||||
|
ControllerFactory.createController(config);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,350 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import static com.datadog.profiling.controller.RecordingType.CONTINUOUS;
|
||||||
|
import static org.awaitility.Awaitility.await;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
|
import static org.mockito.ArgumentMatchers.anyInt;
|
||||||
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
|
import static org.mockito.Mockito.atLeastOnce;
|
||||||
|
import static org.mockito.Mockito.doAnswer;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.never;
|
||||||
|
import static org.mockito.Mockito.timeout;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
import org.mockito.junit.jupiter.MockitoSettings;
|
||||||
|
import org.mockito.quality.Strictness;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
// Proper unused stub detection doesn't work in junit5 yet,
|
||||||
|
// see https://github.com/mockito/mockito/issues/1540
|
||||||
|
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||||
|
public class ProfilingSystemTest {
|
||||||
|
|
||||||
|
// Time in milliseconds when all things should have been done by
|
||||||
|
// Should be noticeably bigger than one recording iteration
|
||||||
|
private static final long REASONABLE_TIMEOUT = 5000;
|
||||||
|
|
||||||
|
private final ScheduledThreadPoolExecutor pool = new ScheduledThreadPoolExecutor(1);
|
||||||
|
|
||||||
|
@Mock private ThreadLocalRandom threadLocalRandom;
|
||||||
|
@Mock private Controller controller;
|
||||||
|
@Mock private OngoingRecording recording;
|
||||||
|
@Mock private RecordingData recordingData;
|
||||||
|
@Mock private RecordingDataListener listener;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setup() {
|
||||||
|
when(controller.createRecording(ProfilingSystem.RECORDING_NAME)).thenReturn(recording);
|
||||||
|
when(threadLocalRandom.nextInt(eq(1), anyInt())).thenReturn(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
public void tearDown() {
|
||||||
|
pool.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testShutdown() throws ConfigurationException {
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ZERO,
|
||||||
|
Duration.ofMillis(300),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
startProfilingSystem(system);
|
||||||
|
verify(controller).createRecording(any());
|
||||||
|
system.shutdown();
|
||||||
|
|
||||||
|
verify(recording).close();
|
||||||
|
assertTrue(pool.isTerminated());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testShutdownWithRunningProfilingRecording() throws ConfigurationException {
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ZERO,
|
||||||
|
Duration.ofMillis(300),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
startProfilingSystem(system);
|
||||||
|
verify(controller).createRecording(any());
|
||||||
|
system.shutdown();
|
||||||
|
|
||||||
|
verify(recording).close();
|
||||||
|
assertTrue(pool.isTerminated());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testShutdownInterruption() throws ConfigurationException {
|
||||||
|
final Thread mainThread = Thread.currentThread();
|
||||||
|
doAnswer(
|
||||||
|
(InvocationOnMock invocation) -> {
|
||||||
|
while (!pool.isShutdown()) {
|
||||||
|
try {
|
||||||
|
Thread.sleep(100);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
// Ignore InterruptedException to make sure this threads lives through executor
|
||||||
|
// shutdown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Interrupting main thread to make sure this is handled properly
|
||||||
|
mainThread.interrupt();
|
||||||
|
return null;
|
||||||
|
})
|
||||||
|
.when(listener)
|
||||||
|
.onNewData(any(), any());
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
Duration.ofMillis(100),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
startProfilingSystem(system);
|
||||||
|
// Make sure we actually started the recording before terminating
|
||||||
|
verify(controller, timeout(300)).createRecording(any());
|
||||||
|
system.shutdown();
|
||||||
|
assertTrue(true, "Shutdown exited cleanly after interruption");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCanShutDownWithoutStarting() throws ConfigurationException {
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
Duration.ofMillis(300),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
system.shutdown();
|
||||||
|
assertTrue(pool.isTerminated());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDoesntSendDataIfNotStarted() throws InterruptedException, ConfigurationException {
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
Duration.ofMillis(1));
|
||||||
|
Thread.sleep(50);
|
||||||
|
system.shutdown();
|
||||||
|
verify(controller, never()).createRecording(any());
|
||||||
|
verify(listener, never()).onNewData(any(), any());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDoesntSendPeriodicRecordingIfPeriodicRecordingIsDisabled()
|
||||||
|
throws InterruptedException, ConfigurationException {
|
||||||
|
when(recording.snapshot(any(), any())).thenReturn(recordingData);
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
Duration.ofMillis(10));
|
||||||
|
startProfilingSystem(system);
|
||||||
|
Thread.sleep(200);
|
||||||
|
system.shutdown();
|
||||||
|
verify(listener, atLeastOnce()).onNewData(CONTINUOUS, recordingData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProfilingSystemNegativeStartupDelay() {
|
||||||
|
assertThrows(
|
||||||
|
ConfigurationException.class,
|
||||||
|
() -> {
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller, listener, Duration.ofMillis(-10), Duration.ZERO, Duration.ofMillis(200));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProfilingSystemNegativeStartupRandomRangeDelay() {
|
||||||
|
assertThrows(
|
||||||
|
ConfigurationException.class,
|
||||||
|
() -> {
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(-20),
|
||||||
|
Duration.ofMillis(200));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProfilingSystemNegativeUploadPeriod() {
|
||||||
|
assertThrows(
|
||||||
|
ConfigurationException.class,
|
||||||
|
() -> {
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(20),
|
||||||
|
Duration.ofMillis(-200));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Ensure that we continue recording after one recording fails to get created */
|
||||||
|
@Test
|
||||||
|
public void testRecordingSnapshotError() throws ConfigurationException {
|
||||||
|
final Duration uploadPeriod = Duration.ofMillis(300);
|
||||||
|
final List<RecordingData> generatedRecordingData = new ArrayList<>();
|
||||||
|
when(recording.snapshot(any(), any()))
|
||||||
|
.thenThrow(new RuntimeException("Test"))
|
||||||
|
.thenAnswer(generateMockRecordingData(generatedRecordingData));
|
||||||
|
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
uploadPeriod,
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
startProfilingSystem(system);
|
||||||
|
|
||||||
|
final ArgumentCaptor<RecordingData> captor = ArgumentCaptor.forClass(RecordingData.class);
|
||||||
|
verify(listener, timeout(REASONABLE_TIMEOUT).times(2))
|
||||||
|
.onNewData(eq(CONTINUOUS), captor.capture());
|
||||||
|
assertEquals(generatedRecordingData, captor.getAllValues());
|
||||||
|
|
||||||
|
system.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRecordingSnapshotNoData() throws ConfigurationException {
|
||||||
|
final Duration uploadPeriod = Duration.ofMillis(300);
|
||||||
|
final List<RecordingData> generatedRecordingData = new ArrayList<>();
|
||||||
|
when(recording.snapshot(any(), any()))
|
||||||
|
.thenReturn(null)
|
||||||
|
.thenAnswer(generateMockRecordingData(generatedRecordingData));
|
||||||
|
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
Duration.ofMillis(10),
|
||||||
|
Duration.ofMillis(5),
|
||||||
|
uploadPeriod,
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
startProfilingSystem(system);
|
||||||
|
|
||||||
|
final ArgumentCaptor<RecordingData> captor = ArgumentCaptor.forClass(RecordingData.class);
|
||||||
|
verify(listener, timeout(REASONABLE_TIMEOUT).times(2))
|
||||||
|
.onNewData(eq(CONTINUOUS), captor.capture());
|
||||||
|
assertEquals(generatedRecordingData, captor.getAllValues());
|
||||||
|
|
||||||
|
system.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRandomizedStartupDelay() throws ConfigurationException {
|
||||||
|
final Duration startupDelay = Duration.ofMillis(100);
|
||||||
|
final Duration startupDelayRandomRange = Duration.ofMillis(500);
|
||||||
|
final Duration additionalRandomDelay = Duration.ofMillis(300);
|
||||||
|
|
||||||
|
when(threadLocalRandom.nextLong(startupDelayRandomRange.toMillis()))
|
||||||
|
.thenReturn(additionalRandomDelay.toMillis());
|
||||||
|
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
startupDelay,
|
||||||
|
startupDelayRandomRange,
|
||||||
|
Duration.ofMillis(100),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
|
||||||
|
final Duration randomizedDelay = system.getStartupDelay();
|
||||||
|
|
||||||
|
assertEquals(startupDelay.plus(additionalRandomDelay), randomizedDelay);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFixedStartupDelay() throws ConfigurationException {
|
||||||
|
final Duration startupDelay = Duration.ofMillis(100);
|
||||||
|
|
||||||
|
final ProfilingSystem system =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller,
|
||||||
|
listener,
|
||||||
|
startupDelay,
|
||||||
|
Duration.ZERO,
|
||||||
|
Duration.ofMillis(100),
|
||||||
|
pool,
|
||||||
|
threadLocalRandom);
|
||||||
|
|
||||||
|
assertEquals(startupDelay, system.getStartupDelay());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Answer<Object> generateMockRecordingData(
|
||||||
|
final List<RecordingData> generatedRecordingData) {
|
||||||
|
return (InvocationOnMock invocation) -> {
|
||||||
|
final RecordingData recordingData = mock(RecordingData.class);
|
||||||
|
when(recordingData.getStart()).thenReturn(invocation.getArgument(0, Instant.class));
|
||||||
|
when(recordingData.getEnd()).thenReturn(invocation.getArgument(1, Instant.class));
|
||||||
|
generatedRecordingData.add(recordingData);
|
||||||
|
return recordingData;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private void startProfilingSystem(final ProfilingSystem system) {
|
||||||
|
system.start();
|
||||||
|
await().until(system::isStarted);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package com.datadog.profiling.controller;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertSame;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
public class UnsupportedEnvironmentExceptionTest {
|
||||||
|
|
||||||
|
private static final String MESSAGE = "message";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMessageConstructor() {
|
||||||
|
assertEquals(MESSAGE, new UnsupportedEnvironmentException(MESSAGE).getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMessageCauseConstructor() {
|
||||||
|
final Throwable cause = new RuntimeException();
|
||||||
|
final Exception exception = new UnsupportedEnvironmentException(MESSAGE, cause);
|
||||||
|
assertEquals(MESSAGE, exception.getMessage());
|
||||||
|
assertSame(cause, exception.getCause());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
package com.datadog.profiling.util;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
|
||||||
|
import java.util.concurrent.ThreadFactory;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
public class ProfilingThreadFactoryTest {
|
||||||
|
|
||||||
|
private final ThreadFactory factory = new ProfilingThreadFactory("test-name");
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testThreadName() {
|
||||||
|
final Thread thread = factory.newThread(() -> {});
|
||||||
|
assertEquals("test-name", thread.getName());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
mock-maker-inline
|
|
@ -0,0 +1,18 @@
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
|
||||||
|
excludedClassesCoverage += [
|
||||||
|
// These classes are using only for testing
|
||||||
|
'com.datadog.profiling.testing.*',
|
||||||
|
]
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.guava
|
||||||
|
compile deps.okhttp
|
||||||
|
compile group: 'org.javadelight', name: 'delight-fileupload', version: '0.0.5'
|
||||||
|
compile group: 'javax.servlet', name: 'javax.servlet-api', version: '4.0.1'
|
||||||
|
compile group: 'com.squareup.okhttp3', name: 'mockwebserver', version: versions.okhttp
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We use Java8 features, but there is no code needing JFR libraries */
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
|
@ -0,0 +1,38 @@
|
||||||
|
package com.datadog.profiling.testing;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableMultimap;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import delight.fileupload.FileUpload;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import okhttp3.MediaType;
|
||||||
|
import okhttp3.mockwebserver.RecordedRequest;
|
||||||
|
|
||||||
|
public final class ProfilingTestUtils {
|
||||||
|
|
||||||
|
private static final MediaType OCTET_STREAM = MediaType.parse("application/octet-stream");
|
||||||
|
|
||||||
|
public static Multimap<String, Object> parseProfilingRequestParameters(
|
||||||
|
final RecordedRequest request) {
|
||||||
|
return FileUpload.parse(request.getBody().readByteArray(), request.getHeader("Content-Type"))
|
||||||
|
.stream()
|
||||||
|
.collect(
|
||||||
|
ImmutableMultimap::<String, Object>builder,
|
||||||
|
(builder, value) ->
|
||||||
|
builder.put(
|
||||||
|
value.getFieldName(),
|
||||||
|
OCTET_STREAM.toString().equals(value.getContentType())
|
||||||
|
? value.get()
|
||||||
|
: value.getString()),
|
||||||
|
(builder1, builder2) -> builder1.putAll(builder2.build()))
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Map<String, String> parseTags(final Collection<Object> params) {
|
||||||
|
return params
|
||||||
|
.stream()
|
||||||
|
.map(p -> ((String) p).split(":", 2))
|
||||||
|
.collect(Collectors.toMap(p -> p[0], p -> p[1]));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Set properties before any plugins get loaded
|
||||||
|
ext {
|
||||||
|
jmcVersion = '8.0.0-SNAPSHOT'
|
||||||
|
}
|
||||||
|
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
|
||||||
|
excludedClassesCoverage += [
|
||||||
|
// This is just a static declaration that is hard to test
|
||||||
|
'com.datadog.profiling.uploader.VersionInfo',
|
||||||
|
// Large parts of this class are jvm specific which makes jacoco really confused since we run it only for 'default' jvm
|
||||||
|
'com.datadog.profiling.uploader.util.PidHelper'
|
||||||
|
]
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.slf4j
|
||||||
|
compile project(':dd-trace-api')
|
||||||
|
|
||||||
|
compile project(':dd-java-agent:agent-profiling:profiling-controller')
|
||||||
|
|
||||||
|
compile "org.openjdk.jmc:common:$jmcVersion"
|
||||||
|
|
||||||
|
compile deps.guava
|
||||||
|
compile deps.okhttp
|
||||||
|
compile group: 'com.github.jnr', name: 'jnr-posix', version: '3.0.52'
|
||||||
|
|
||||||
|
testCompile deps.junit5
|
||||||
|
testCompile project(':dd-java-agent:agent-profiling:profiling-testing')
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-core', version: '3.1.0'
|
||||||
|
testCompile group: 'org.mockito', name: 'mockito-junit-jupiter', version: '3.1.0'
|
||||||
|
testCompile deps.bytebuddy
|
||||||
|
testCompile deps.bytebuddyagent
|
||||||
|
testCompile group: 'com.squareup.okhttp3', name: 'mockwebserver', version: versions.okhttp
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We use Java8 features, but there is no code needing JFR libraries */
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
|
@ -0,0 +1,25 @@
|
||||||
|
package com.datadog.profiling.uploader;
|
||||||
|
|
||||||
|
enum CompressionType {
|
||||||
|
/** No compression */
|
||||||
|
OFF,
|
||||||
|
/** Default compression */
|
||||||
|
ON,
|
||||||
|
/** Unknown compression config value */
|
||||||
|
UNKNOWN;
|
||||||
|
|
||||||
|
static CompressionType of(final String type) {
|
||||||
|
if (type == null) {
|
||||||
|
return UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (type.toLowerCase()) {
|
||||||
|
case "off":
|
||||||
|
return OFF;
|
||||||
|
case "on":
|
||||||
|
return ON;
|
||||||
|
default:
|
||||||
|
return UNKNOWN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,346 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.uploader;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.RecordingData;
|
||||||
|
import com.datadog.profiling.controller.RecordingType;
|
||||||
|
import com.datadog.profiling.uploader.util.PidHelper;
|
||||||
|
import com.datadog.profiling.uploader.util.StreamUtils;
|
||||||
|
import com.datadog.profiling.util.ProfilingThreadFactory;
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Proxy;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.util.ArrayDeque;
|
||||||
|
import java.util.Deque;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.SynchronousQueue;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
import okhttp3.Call;
|
||||||
|
import okhttp3.Callback;
|
||||||
|
import okhttp3.ConnectionPool;
|
||||||
|
import okhttp3.Credentials;
|
||||||
|
import okhttp3.Dispatcher;
|
||||||
|
import okhttp3.Headers;
|
||||||
|
import okhttp3.MediaType;
|
||||||
|
import okhttp3.MultipartBody;
|
||||||
|
import okhttp3.OkHttpClient;
|
||||||
|
import okhttp3.Request;
|
||||||
|
import okhttp3.RequestBody;
|
||||||
|
import okhttp3.Response;
|
||||||
|
|
||||||
|
/** The class for uploading recordings to the backend. */
|
||||||
|
@Slf4j
|
||||||
|
public final class RecordingUploader {
|
||||||
|
private static final MediaType OCTET_STREAM = MediaType.parse("application/octet-stream");
|
||||||
|
|
||||||
|
static final String RECORDING_NAME_PARAM = "recording-name";
|
||||||
|
static final String FORMAT_PARAM = "format";
|
||||||
|
static final String TYPE_PARAM = "type";
|
||||||
|
static final String RUNTIME_PARAM = "runtime";
|
||||||
|
|
||||||
|
static final String RECORDING_START_PARAM = "recording-start";
|
||||||
|
static final String RECORDING_END_PARAM = "recording-end";
|
||||||
|
|
||||||
|
// TODO: We should rename parameter to just `data`
|
||||||
|
static final String DATA_PARAM = "chunk-data";
|
||||||
|
|
||||||
|
static final String TAGS_PARAM = "tags[]";
|
||||||
|
|
||||||
|
static final String HEADER_DD_API_KEY = "DD-API-KEY";
|
||||||
|
|
||||||
|
static final String JAVA_LANG = "java";
|
||||||
|
static final String DATADOG_META_LANG = "Datadog-Meta-Lang";
|
||||||
|
|
||||||
|
static final int MAX_RUNNING_REQUESTS = 10;
|
||||||
|
static final int MAX_ENQUEUED_REQUESTS = 20;
|
||||||
|
|
||||||
|
static final String RECORDING_FORMAT = "jfr";
|
||||||
|
static final String RECORDING_TYPE_PREFIX = "jfr-";
|
||||||
|
static final String RECORDING_RUNTIME = "jvm";
|
||||||
|
|
||||||
|
static final int TERMINATION_TIMEOUT = 5;
|
||||||
|
|
||||||
|
private static final Headers DATA_HEADERS =
|
||||||
|
Headers.of(
|
||||||
|
"Content-Disposition", "form-data; name=\"" + DATA_PARAM + "\"; filename=\"recording\"");
|
||||||
|
|
||||||
|
private static final Callback RESPONSE_CALLBACK =
|
||||||
|
new Callback() {
|
||||||
|
@Override
|
||||||
|
public void onFailure(final Call call, final IOException e) {
|
||||||
|
log.error("Failed to upload recording", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onResponse(final Call call, final Response response) {
|
||||||
|
if (response.isSuccessful()) {
|
||||||
|
log.debug("Upload done");
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
"Failed to upload recording: unexpected response code {} {}",
|
||||||
|
response.message(),
|
||||||
|
response.code());
|
||||||
|
}
|
||||||
|
response.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static final int SEED_EXPECTED_REQUEST_SIZE = 2 * 1024 * 1024; // 2MB;
|
||||||
|
static final int REQUEST_SIZE_HISTORY_SIZE = 10;
|
||||||
|
static final double REQUEST_SIZE_COEFFICIENT = 1.2;
|
||||||
|
|
||||||
|
private final ExecutorService okHttpExecutorService;
|
||||||
|
private final OkHttpClient client;
|
||||||
|
private final String apiKey;
|
||||||
|
private final String url;
|
||||||
|
private final List<String> tags;
|
||||||
|
private final Compression compression;
|
||||||
|
private final Deque<Integer> requestSizeHistory;
|
||||||
|
|
||||||
|
public RecordingUploader(final Config config) {
|
||||||
|
url = config.getProfilingUrl();
|
||||||
|
apiKey = config.getProfilingApiKey();
|
||||||
|
|
||||||
|
/*
|
||||||
|
FIXME: currently `Config` class cannot get access to some pieces of information we need here:
|
||||||
|
* PID (see PidHelper for details),
|
||||||
|
* Profiler version
|
||||||
|
Since Config returns unmodifiable map we have to do copy here.
|
||||||
|
Ideally we should improve this logic and avoid copy, but performace impact is very limtied
|
||||||
|
since we are doing this once on startup only.
|
||||||
|
*/
|
||||||
|
final Map<String, String> tagsMap = new HashMap<>(config.getMergedProfilingTags());
|
||||||
|
tagsMap.put(VersionInfo.PROFILER_VERSION_TAG, VersionInfo.VERSION);
|
||||||
|
// PID can be null if we cannot find it out from the system
|
||||||
|
if (PidHelper.PID != null) {
|
||||||
|
tagsMap.put(PidHelper.PID_TAG, PidHelper.PID.toString());
|
||||||
|
}
|
||||||
|
tags = tagsToList(tagsMap);
|
||||||
|
|
||||||
|
// This is the same thing OkHttp Dispatcher is doing except thread naming and deamonization
|
||||||
|
okHttpExecutorService =
|
||||||
|
new ThreadPoolExecutor(
|
||||||
|
0,
|
||||||
|
Integer.MAX_VALUE,
|
||||||
|
60,
|
||||||
|
TimeUnit.SECONDS,
|
||||||
|
new SynchronousQueue<>(),
|
||||||
|
new ProfilingThreadFactory("dd-profiler-http-dispatcher"));
|
||||||
|
// Reusing connections causes non daemon threads to be created which causes agent to prevent app
|
||||||
|
// from exiting. See https://github.com/square/okhttp/issues/4029 for some details.
|
||||||
|
final ConnectionPool connectionPool =
|
||||||
|
new ConnectionPool(MAX_RUNNING_REQUESTS, 1, TimeUnit.SECONDS);
|
||||||
|
|
||||||
|
// Use same timeout everywhere for simplicity
|
||||||
|
final Duration requestTimeout = Duration.ofSeconds(config.getProfilingUploadTimeout());
|
||||||
|
final OkHttpClient.Builder clientBuilder =
|
||||||
|
new OkHttpClient.Builder()
|
||||||
|
.connectTimeout(requestTimeout)
|
||||||
|
.writeTimeout(requestTimeout)
|
||||||
|
.readTimeout(requestTimeout)
|
||||||
|
.callTimeout(requestTimeout)
|
||||||
|
.dispatcher(new Dispatcher(okHttpExecutorService))
|
||||||
|
.connectionPool(connectionPool);
|
||||||
|
|
||||||
|
if (config.getProfilingProxyHost() != null) {
|
||||||
|
final Proxy proxy =
|
||||||
|
new Proxy(
|
||||||
|
Proxy.Type.HTTP,
|
||||||
|
new InetSocketAddress(
|
||||||
|
config.getProfilingProxyHost(), config.getProfilingProxyPort()));
|
||||||
|
clientBuilder.proxy(proxy);
|
||||||
|
if (config.getProfilingProxyUsername() != null) {
|
||||||
|
// Empty password by default
|
||||||
|
final String password =
|
||||||
|
config.getProfilingProxyPassword() == null ? "" : config.getProfilingProxyPassword();
|
||||||
|
clientBuilder.proxyAuthenticator(
|
||||||
|
(route, response) -> {
|
||||||
|
final String credential =
|
||||||
|
Credentials.basic(config.getProfilingProxyUsername(), password);
|
||||||
|
return response
|
||||||
|
.request()
|
||||||
|
.newBuilder()
|
||||||
|
.header("Proxy-Authorization", credential)
|
||||||
|
.build();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
client = clientBuilder.build();
|
||||||
|
client.dispatcher().setMaxRequests(MAX_RUNNING_REQUESTS);
|
||||||
|
// We are mainly talking to the same(ish) host so we need to raise this limit
|
||||||
|
client.dispatcher().setMaxRequestsPerHost(MAX_RUNNING_REQUESTS);
|
||||||
|
|
||||||
|
compression = getCompression(CompressionType.of(config.getProfilingUploadCompression()));
|
||||||
|
|
||||||
|
requestSizeHistory = new ArrayDeque<>(REQUEST_SIZE_HISTORY_SIZE);
|
||||||
|
requestSizeHistory.add(SEED_EXPECTED_REQUEST_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void upload(final RecordingType type, final RecordingData data) {
|
||||||
|
try {
|
||||||
|
if (canEnqueueMoreRequests()) {
|
||||||
|
makeUploadRequest(type, data);
|
||||||
|
} else {
|
||||||
|
log.error("Cannot upload data: too many enqueued requests!");
|
||||||
|
}
|
||||||
|
} catch (final IllegalStateException | IOException e) {
|
||||||
|
log.error("Problem uploading recording!", e);
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
data.getStream().close();
|
||||||
|
} catch (final IllegalStateException | IOException e) {
|
||||||
|
log.error("Problem closing recording stream", e);
|
||||||
|
}
|
||||||
|
data.release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void shutdown() {
|
||||||
|
okHttpExecutorService.shutdownNow();
|
||||||
|
try {
|
||||||
|
okHttpExecutorService.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS);
|
||||||
|
} catch (final InterruptedException e) {
|
||||||
|
// Note: this should only happen in main thread right before exiting, so eating up interrupted
|
||||||
|
// state should be fine.
|
||||||
|
log.error("Wait for executor shutdown interrupted");
|
||||||
|
}
|
||||||
|
|
||||||
|
client.connectionPool().evictAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
@FunctionalInterface
|
||||||
|
private interface Compression {
|
||||||
|
RequestBody compress(InputStream is, int expectedSize) throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Compression getCompression(final CompressionType type) {
|
||||||
|
log.debug("Uploader compression type={}", type);
|
||||||
|
final StreamUtils.BytesConsumer<RequestBody> consumer =
|
||||||
|
(bytes, offset, length) -> RequestBody.create(OCTET_STREAM, bytes, offset, length);
|
||||||
|
final Compression compression;
|
||||||
|
// currently only gzip and off are supported
|
||||||
|
// this needs to be updated once more compression types are added
|
||||||
|
switch (type) {
|
||||||
|
case ON:
|
||||||
|
{
|
||||||
|
compression = (is, expectedSize) -> StreamUtils.gzipStream(is, expectedSize, consumer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case OFF:
|
||||||
|
{
|
||||||
|
compression = (is, expectedSize) -> StreamUtils.readStream(is, expectedSize, consumer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
log.warn("Unrecognizable compression type: {}. Defaulting to 'on'.", type);
|
||||||
|
compression = (is, expectedSize) -> StreamUtils.gzipStream(is, expectedSize, consumer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return compression;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void makeUploadRequest(final RecordingType type, final RecordingData data)
|
||||||
|
throws IOException {
|
||||||
|
final int expectedRequestSize = getExpectedRequestSize();
|
||||||
|
// TODO: it would be really nice to avoid copy here, but:
|
||||||
|
// * if JFR doesn't write file to disk we seem to not be able to get size of the recording
|
||||||
|
// without reading whole stream
|
||||||
|
// * OkHTTP doesn't provide direct way to send uploads from streams - and workarounds would
|
||||||
|
// require stream that allows 'repeatable reads' because we may need to resend that data.
|
||||||
|
final RequestBody body = compression.compress(data.getStream(), expectedRequestSize);
|
||||||
|
log.debug(
|
||||||
|
"Uploading recording {} [{}] (Size={}/{} bytes)",
|
||||||
|
data.getName(),
|
||||||
|
type,
|
||||||
|
body.contentLength(),
|
||||||
|
expectedRequestSize);
|
||||||
|
|
||||||
|
// The body data is stored in byte array so we naturally get size limit that will fit into int
|
||||||
|
updateUploadSizesHistory((int) body.contentLength());
|
||||||
|
|
||||||
|
final MultipartBody.Builder bodyBuilder =
|
||||||
|
new MultipartBody.Builder()
|
||||||
|
.setType(MultipartBody.FORM)
|
||||||
|
.addFormDataPart(RECORDING_NAME_PARAM, data.getName())
|
||||||
|
.addFormDataPart(FORMAT_PARAM, RECORDING_FORMAT)
|
||||||
|
.addFormDataPart(TYPE_PARAM, RECORDING_TYPE_PREFIX + type.getName())
|
||||||
|
.addFormDataPart(RUNTIME_PARAM, RECORDING_RUNTIME)
|
||||||
|
// Note that toString is well defined for instants - ISO-8601
|
||||||
|
.addFormDataPart(RECORDING_START_PARAM, data.getStart().toString())
|
||||||
|
.addFormDataPart(RECORDING_END_PARAM, data.getEnd().toString());
|
||||||
|
for (final String tag : tags) {
|
||||||
|
bodyBuilder.addFormDataPart(TAGS_PARAM, tag);
|
||||||
|
}
|
||||||
|
bodyBuilder.addPart(DATA_HEADERS, body);
|
||||||
|
final RequestBody requestBody = bodyBuilder.build();
|
||||||
|
|
||||||
|
final Request request =
|
||||||
|
new Request.Builder()
|
||||||
|
.url(url)
|
||||||
|
.addHeader(HEADER_DD_API_KEY, apiKey)
|
||||||
|
// Note: this header is used to disable tracing of profiling requests
|
||||||
|
.addHeader(DATADOG_META_LANG, JAVA_LANG)
|
||||||
|
.post(requestBody)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
client.newCall(request).enqueue(RESPONSE_CALLBACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
private int getExpectedRequestSize() {
|
||||||
|
synchronized (requestSizeHistory) {
|
||||||
|
// We have added seed value, so history cannot be empty
|
||||||
|
int size = 0;
|
||||||
|
for (final int s : requestSizeHistory) {
|
||||||
|
if (s > size) {
|
||||||
|
size = s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (int) (size * REQUEST_SIZE_COEFFICIENT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void updateUploadSizesHistory(final int newSize) {
|
||||||
|
synchronized (requestSizeHistory) {
|
||||||
|
while (requestSizeHistory.size() >= REQUEST_SIZE_HISTORY_SIZE) {
|
||||||
|
requestSizeHistory.removeLast();
|
||||||
|
}
|
||||||
|
requestSizeHistory.offerFirst(newSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean canEnqueueMoreRequests() {
|
||||||
|
return client.dispatcher().queuedCallsCount() < MAX_ENQUEUED_REQUESTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> tagsToList(final Map<String, String> tags) {
|
||||||
|
return tags.entrySet()
|
||||||
|
.stream()
|
||||||
|
.filter(e -> e.getValue() != null && !e.getValue().isEmpty())
|
||||||
|
.map(e -> e.getKey() + ":" + e.getValue())
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package com.datadog.profiling.uploader;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
|
||||||
|
@Slf4j
|
||||||
|
public class VersionInfo {
|
||||||
|
|
||||||
|
static final String PROFILER_VERSION_TAG = "profiler_version";
|
||||||
|
static final String VERSION;
|
||||||
|
|
||||||
|
static {
|
||||||
|
String version = "unknown";
|
||||||
|
try {
|
||||||
|
final InputStream is =
|
||||||
|
VersionInfo.class.getClassLoader().getResourceAsStream("agent-profiling.version");
|
||||||
|
if (is != null) {
|
||||||
|
final BufferedReader reader = new BufferedReader(new InputStreamReader(is));
|
||||||
|
version = reader.lines().collect(Collectors.joining(System.lineSeparator())).trim();
|
||||||
|
} else {
|
||||||
|
log.error("No version file found");
|
||||||
|
}
|
||||||
|
} catch (final Exception e) {
|
||||||
|
log.error("Cannot read version file", e);
|
||||||
|
}
|
||||||
|
VERSION = version;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package com.datadog.profiling.uploader.util;
|
||||||
|
|
||||||
|
import jnr.posix.POSIX;
|
||||||
|
import jnr.posix.POSIXFactory;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get PID In reasonably cross-platform way
|
||||||
|
*
|
||||||
|
* <p>FIXME: ideally we would like to be able to send PID with root span as well, but currently this
|
||||||
|
* end up causing packaging problems. We should revisit this later.
|
||||||
|
*/
|
||||||
|
@Slf4j
|
||||||
|
public class PidHelper {
|
||||||
|
|
||||||
|
public static final String PID_TAG = "process_id";
|
||||||
|
public static final Long PID = getPid();
|
||||||
|
|
||||||
|
private static Long getPid() {
|
||||||
|
try {
|
||||||
|
final Class<?> processHandler = Class.forName("java.lang.ProcessHandle");
|
||||||
|
final Object object = processHandler.getMethod("current").invoke(null);
|
||||||
|
return (Long) processHandler.getMethod("pid").invoke(object);
|
||||||
|
} catch (final Exception e) {
|
||||||
|
log.debug("Cannot get PID through JVM API, trying POSIX instead", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
final POSIX posix = POSIXFactory.getPOSIX();
|
||||||
|
return (long) posix.getpid();
|
||||||
|
} catch (final Exception e) {
|
||||||
|
log.debug("Cannot get PID through POSIX API, giving up", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,220 @@
|
||||||
|
package com.datadog.profiling.uploader.util;
|
||||||
|
|
||||||
|
import java.io.BufferedInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.zip.GZIPOutputStream;
|
||||||
|
import org.openjdk.jmc.common.io.IOToolkit;
|
||||||
|
|
||||||
|
/** A collection of I/O stream related helper methods */
|
||||||
|
public final class StreamUtils {
|
||||||
|
|
||||||
|
// JMC's IOToolkit hides this from us...
|
||||||
|
static final int ZIP_MAGIC[] = new int[] {80, 75, 3, 4};
|
||||||
|
static final int GZ_MAGIC[] = new int[] {31, 139};
|
||||||
|
/**
|
||||||
|
* Consumes array or bytes along with offset and length and turns it into something usable.
|
||||||
|
*
|
||||||
|
* <p>Main idea here is that we may end up having array with valuable data siting somehere in the
|
||||||
|
* middle and we can avoid additional copies by allowing user to deal with this directly and
|
||||||
|
* convert it into whatever format it needs in most efficient way.
|
||||||
|
*
|
||||||
|
* @param <T> result type
|
||||||
|
*/
|
||||||
|
@FunctionalInterface
|
||||||
|
public interface BytesConsumer<T> {
|
||||||
|
T consume(byte[] bytes, int offset, int length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read a stream into a consumer gzip-compressing content. If the stream is already compressed
|
||||||
|
* (gzip, zip) the original data will be returned.
|
||||||
|
*
|
||||||
|
* @param is the input stream
|
||||||
|
* @return zipped contents of the input stream or the the original content if the stream is
|
||||||
|
* already compressed
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static <T> T gzipStream(
|
||||||
|
InputStream is, final int expectedSize, final BytesConsumer<T> consumer) throws IOException {
|
||||||
|
is = ensureMarkSupported(is);
|
||||||
|
if (isCompressed(is)) {
|
||||||
|
return readStream(is, expectedSize, consumer);
|
||||||
|
} else {
|
||||||
|
final FastByteArrayOutputStream baos = new FastByteArrayOutputStream(expectedSize);
|
||||||
|
try (final OutputStream zipped = new GZIPOutputStream(baos)) {
|
||||||
|
copy(is, zipped);
|
||||||
|
}
|
||||||
|
return baos.consume(consumer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read a stream into a consumer.
|
||||||
|
*
|
||||||
|
* <p>Note: the idea here comes from Guava's {@link com.google.common.io.ByteStreams}, but we
|
||||||
|
* cannot use that directly because it is not public and is not flexible enough.
|
||||||
|
*
|
||||||
|
* @param is the input stream
|
||||||
|
* @param expectedSize expected result size to preallocate buffers
|
||||||
|
* @param consumer consumer to convert byte array to result
|
||||||
|
* @return the stream data
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static <T> T readStream(
|
||||||
|
final InputStream is, final int expectedSize, final BytesConsumer<T> consumer)
|
||||||
|
throws IOException {
|
||||||
|
final byte[] bytes = new byte[expectedSize];
|
||||||
|
int remaining = expectedSize;
|
||||||
|
|
||||||
|
while (remaining > 0) {
|
||||||
|
final int offset = expectedSize - remaining;
|
||||||
|
final int read = is.read(bytes, offset, remaining);
|
||||||
|
if (read == -1) {
|
||||||
|
// end of stream before reading expectedSize bytes just return the bytes read so far
|
||||||
|
// 'offset' here is offset in 'bytes' buffer - which essentially represents length of data
|
||||||
|
// read so far.
|
||||||
|
return consumer.consume(bytes, 0, offset);
|
||||||
|
}
|
||||||
|
remaining -= read;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the stream was longer, so read the rest manually
|
||||||
|
final List<BufferChunk> additionalChunks = new ArrayList<>();
|
||||||
|
int additionalChunksLength = 0;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
final BufferChunk chunk = new BufferChunk(Math.max(32, is.available()));
|
||||||
|
final int readLength = chunk.readFrom(is);
|
||||||
|
if (readLength < 0) {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
additionalChunks.add(chunk);
|
||||||
|
additionalChunksLength += readLength;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now assemble resulting array
|
||||||
|
final byte[] result = new byte[bytes.length + additionalChunksLength];
|
||||||
|
System.arraycopy(bytes, 0, result, 0, bytes.length);
|
||||||
|
int offset = bytes.length;
|
||||||
|
for (final BufferChunk chunk : additionalChunks) {
|
||||||
|
offset += chunk.appendToArray(result, offset);
|
||||||
|
}
|
||||||
|
return consumer.consume(result, 0, result.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class BufferChunk {
|
||||||
|
|
||||||
|
private int size = 0;
|
||||||
|
private final byte[] buf;
|
||||||
|
|
||||||
|
public BufferChunk(final int initialSize) {
|
||||||
|
buf = new byte[initialSize];
|
||||||
|
}
|
||||||
|
|
||||||
|
public int readFrom(final InputStream is) throws IOException {
|
||||||
|
size = is.read(buf, 0, buf.length);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int appendToArray(final byte[] array, final int offset) {
|
||||||
|
System.arraycopy(buf, 0, array, offset, size);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper ByteArrayOutputStream that avoids some data copies
|
||||||
|
private static final class FastByteArrayOutputStream extends ByteArrayOutputStream {
|
||||||
|
|
||||||
|
public FastByteArrayOutputStream(final int size) {
|
||||||
|
super(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ByteArrayOutputStream's API doesn't allow us to get data without a copy. We add this method
|
||||||
|
* to support this.
|
||||||
|
*/
|
||||||
|
<T> T consume(final BytesConsumer<T> consumer) {
|
||||||
|
return consumer.consume(buf, 0, count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy an input stream into an output stream
|
||||||
|
*
|
||||||
|
* @param is input
|
||||||
|
* @param os output
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static void copy(final InputStream is, final OutputStream os) throws IOException {
|
||||||
|
int length;
|
||||||
|
final byte[] buffer = new byte[8192];
|
||||||
|
while ((length = is.read(buffer)) > 0) {
|
||||||
|
os.write(buffer, 0, length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether the stream is compressed using a supported format
|
||||||
|
*
|
||||||
|
* @param is input stream; must support {@linkplain InputStream#mark(int)}
|
||||||
|
* @return {@literal true} if the stream is compressed in a supported format
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static boolean isCompressed(final InputStream is) throws IOException {
|
||||||
|
checkMarkSupported(is);
|
||||||
|
return isGzip(is) || isZip(is);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether the stream represents GZip data
|
||||||
|
*
|
||||||
|
* @param is input stream; must support {@linkplain InputStream#mark(int)}
|
||||||
|
* @return {@literal true} if the stream represents GZip data
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static boolean isGzip(final InputStream is) throws IOException {
|
||||||
|
checkMarkSupported(is);
|
||||||
|
is.mark(GZ_MAGIC.length);
|
||||||
|
try {
|
||||||
|
return IOToolkit.hasMagic(is, GZ_MAGIC);
|
||||||
|
} finally {
|
||||||
|
is.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether the stream represents Zip data
|
||||||
|
*
|
||||||
|
* @param is input stream; must support {@linkplain InputStream#mark(int)}
|
||||||
|
* @return {@literal true} if the stream represents Zip data
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static boolean isZip(final InputStream is) throws IOException {
|
||||||
|
checkMarkSupported(is);
|
||||||
|
is.mark(ZIP_MAGIC.length);
|
||||||
|
try {
|
||||||
|
return IOToolkit.hasMagic(is, ZIP_MAGIC);
|
||||||
|
} finally {
|
||||||
|
is.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static InputStream ensureMarkSupported(InputStream is) {
|
||||||
|
if (!is.markSupported()) {
|
||||||
|
is = new BufferedInputStream(is);
|
||||||
|
}
|
||||||
|
return is;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void checkMarkSupported(final InputStream is) throws IOException {
|
||||||
|
if (!is.markSupported()) {
|
||||||
|
throw new IOException("Can not check headers on streams not supporting mark() method");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
package com.datadog.profiling.uploader;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
|
import org.junit.jupiter.params.provider.EnumSource;
|
||||||
|
|
||||||
|
class CompressionTypeTest {
|
||||||
|
@Test
|
||||||
|
void testDefault() {
|
||||||
|
assertEquals(CompressionType.UNKNOWN, CompressionType.of(""));
|
||||||
|
assertEquals(CompressionType.UNKNOWN, CompressionType.of(null));
|
||||||
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@EnumSource(CompressionType.class)
|
||||||
|
void testOn(final CompressionType type) {
|
||||||
|
for (final String checkType : permutateCase(type.name())) {
|
||||||
|
assertEquals(type, CompressionType.of(checkType));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static List<String> permutateCase(String input) {
|
||||||
|
final List<String> output = new ArrayList<>();
|
||||||
|
input = input.toLowerCase();
|
||||||
|
// fast track for all-upper and all-lower
|
||||||
|
output.add(input);
|
||||||
|
output.add(input.toUpperCase());
|
||||||
|
|
||||||
|
// use bit operations to generate permutations
|
||||||
|
long mask = 0L;
|
||||||
|
for (int i = 0; i < input.length(); i++) {
|
||||||
|
final StringBuilder sb = new StringBuilder();
|
||||||
|
mask += 1;
|
||||||
|
long check = mask;
|
||||||
|
for (int j = 0; j < input.length(); j++) {
|
||||||
|
sb.append(
|
||||||
|
((check & 0x1) == 0x1) ? Character.toUpperCase(input.charAt(j)) : input.charAt(j));
|
||||||
|
check = check >> 1;
|
||||||
|
}
|
||||||
|
output.add(sb.toString());
|
||||||
|
}
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,436 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Datadog
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.datadog.profiling.uploader;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.spy;
|
||||||
|
import static org.mockito.Mockito.times;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
import static org.mockito.Mockito.withSettings;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.RecordingData;
|
||||||
|
import com.datadog.profiling.controller.RecordingType;
|
||||||
|
import com.datadog.profiling.testing.ProfilingTestUtils;
|
||||||
|
import com.datadog.profiling.uploader.util.PidHelper;
|
||||||
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.zip.GZIPInputStream;
|
||||||
|
import okhttp3.Credentials;
|
||||||
|
import okhttp3.HttpUrl;
|
||||||
|
import okhttp3.mockwebserver.MockResponse;
|
||||||
|
import okhttp3.mockwebserver.MockWebServer;
|
||||||
|
import okhttp3.mockwebserver.RecordedRequest;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
|
import org.junit.jupiter.params.provider.ValueSource;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
/** Unit tests for the recording uploader. */
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
public class RecordingUploaderTest {
|
||||||
|
|
||||||
|
private static final String APIKEY_VALUE = "testkey";
|
||||||
|
private static final String URL_PATH = "/lalala";
|
||||||
|
private static final String RECORDING_RESOURCE = "test-recording.jfr";
|
||||||
|
private static final String RECODING_NAME_PREFIX = "test-recording-";
|
||||||
|
private static final RecordingType RECORDING_TYPE = RecordingType.CONTINUOUS;
|
||||||
|
|
||||||
|
private static final Map<String, String> TAGS;
|
||||||
|
|
||||||
|
static {
|
||||||
|
// Not using Guava's ImmutableMap because we want to test null value
|
||||||
|
final Map<String, String> tags = new HashMap<>();
|
||||||
|
tags.put("foo", "bar");
|
||||||
|
tags.put("baz", "123");
|
||||||
|
tags.put("null", null);
|
||||||
|
tags.put("empty", "");
|
||||||
|
TAGS = tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We sort tags to have expected parameters to have expected result
|
||||||
|
private static final Map<String, String> EXPECTED_TAGS =
|
||||||
|
ImmutableMap.of(
|
||||||
|
"baz",
|
||||||
|
"123",
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
PidHelper.PID_TAG,
|
||||||
|
PidHelper.PID.toString(),
|
||||||
|
VersionInfo.PROFILER_VERSION_TAG,
|
||||||
|
"Stubbed-Test-Version");
|
||||||
|
|
||||||
|
private static final int SEQUENCE_NUMBER = 123;
|
||||||
|
private static final int RECORDING_START = 1000;
|
||||||
|
private static final int RECORDING_END = 1100;
|
||||||
|
|
||||||
|
// TODO: Add a test to verify overall request timeout rather than IO timeout
|
||||||
|
private final Duration REQUEST_TIMEOUT = Duration.ofSeconds(10);
|
||||||
|
private final Duration REQUEST_IO_OPERATION_TIMEOUT = Duration.ofSeconds(5);
|
||||||
|
|
||||||
|
private final Duration FOREVER_REQUEST_TIMEOUT = Duration.ofSeconds(1000);
|
||||||
|
|
||||||
|
@Mock private Config config;
|
||||||
|
|
||||||
|
private final MockWebServer server = new MockWebServer();
|
||||||
|
private HttpUrl url;
|
||||||
|
|
||||||
|
private RecordingUploader uploader;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setup() throws IOException {
|
||||||
|
server.start();
|
||||||
|
url = server.url(URL_PATH);
|
||||||
|
|
||||||
|
when(config.getProfilingUrl()).thenReturn(server.url(URL_PATH).toString());
|
||||||
|
when(config.getProfilingApiKey()).thenReturn(APIKEY_VALUE);
|
||||||
|
when(config.getMergedProfilingTags()).thenReturn(TAGS);
|
||||||
|
when(config.getProfilingUploadTimeout()).thenReturn((int) REQUEST_TIMEOUT.getSeconds());
|
||||||
|
|
||||||
|
uploader = new RecordingUploader(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
public void tearDown() throws IOException {
|
||||||
|
uploader.shutdown();
|
||||||
|
try {
|
||||||
|
server.shutdown();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
// Looks like this happens for some unclear reason, but should not affect tests
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@ValueSource(strings = {"on", "off", "invalid"})
|
||||||
|
public void testRequestParameters(final String compression)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
when(config.getProfilingUploadCompression()).thenReturn(compression);
|
||||||
|
uploader = new RecordingUploader(config);
|
||||||
|
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
uploader.upload(RECORDING_TYPE, mockRecordingData(RECORDING_RESOURCE));
|
||||||
|
|
||||||
|
final RecordedRequest recordedRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
assertEquals(url, recordedRequest.getRequestUrl());
|
||||||
|
|
||||||
|
assertEquals(APIKEY_VALUE, recordedRequest.getHeader("DD-API-KEY"));
|
||||||
|
|
||||||
|
final Multimap<String, Object> parameters =
|
||||||
|
ProfilingTestUtils.parseProfilingRequestParameters(recordedRequest);
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(RECODING_NAME_PREFIX + SEQUENCE_NUMBER),
|
||||||
|
parameters.get(RecordingUploader.RECORDING_NAME_PARAM));
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(RecordingUploader.RECORDING_FORMAT),
|
||||||
|
parameters.get(RecordingUploader.FORMAT_PARAM));
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(RecordingUploader.RECORDING_TYPE_PREFIX + RECORDING_TYPE.getName()),
|
||||||
|
parameters.get(RecordingUploader.TYPE_PARAM));
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(RecordingUploader.RECORDING_RUNTIME),
|
||||||
|
parameters.get(RecordingUploader.RUNTIME_PARAM));
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(Instant.ofEpochSecond(RECORDING_START).toString()),
|
||||||
|
parameters.get(RecordingUploader.RECORDING_START_PARAM));
|
||||||
|
assertEquals(
|
||||||
|
ImmutableList.of(Instant.ofEpochSecond(RECORDING_END).toString()),
|
||||||
|
parameters.get(RecordingUploader.RECORDING_END_PARAM));
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
EXPECTED_TAGS, ProfilingTestUtils.parseTags(parameters.get(RecordingUploader.TAGS_PARAM)));
|
||||||
|
|
||||||
|
final byte[] expectedBytes =
|
||||||
|
ByteStreams.toByteArray(
|
||||||
|
Thread.currentThread().getContextClassLoader().getResourceAsStream(RECORDING_RESOURCE));
|
||||||
|
|
||||||
|
byte[] uploadedBytes =
|
||||||
|
(byte[]) Iterables.getFirst(parameters.get(RecordingUploader.DATA_PARAM), new byte[] {});
|
||||||
|
if (compression.equals("on") || compression.equals("invalid")) {
|
||||||
|
uploadedBytes = unGzip(uploadedBytes);
|
||||||
|
}
|
||||||
|
assertArrayEquals(expectedBytes, uploadedBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRequestWithProxy() throws IOException, InterruptedException {
|
||||||
|
final String backendHost = "intake.profiling.datadoghq.com:1234";
|
||||||
|
final String backendUrl = "http://intake.profiling.datadoghq.com:1234" + URL_PATH;
|
||||||
|
when(config.getProfilingUrl())
|
||||||
|
.thenReturn("http://intake.profiling.datadoghq.com:1234" + URL_PATH);
|
||||||
|
when(config.getProfilingProxyHost()).thenReturn(server.url("").host());
|
||||||
|
when(config.getProfilingProxyPort()).thenReturn(server.url("").port());
|
||||||
|
when(config.getProfilingProxyUsername()).thenReturn("username");
|
||||||
|
when(config.getProfilingProxyPassword()).thenReturn("password");
|
||||||
|
|
||||||
|
uploader = new RecordingUploader(config);
|
||||||
|
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(407).addHeader("Proxy-Authenticate: Basic"));
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
uploader.upload(RECORDING_TYPE, mockRecordingData(RECORDING_RESOURCE));
|
||||||
|
|
||||||
|
final RecordedRequest recordedFirstRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
assertEquals(server.url(""), recordedFirstRequest.getRequestUrl());
|
||||||
|
assertEquals(APIKEY_VALUE, recordedFirstRequest.getHeader("DD-API-KEY"));
|
||||||
|
assertNull(recordedFirstRequest.getHeader("Proxy-Authorization"));
|
||||||
|
assertEquals(backendHost, recordedFirstRequest.getHeader("Host"));
|
||||||
|
assertEquals(
|
||||||
|
String.format("POST %s HTTP/1.1", backendUrl), recordedFirstRequest.getRequestLine());
|
||||||
|
|
||||||
|
final RecordedRequest recordedSecondRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
assertEquals(server.url(""), recordedSecondRequest.getRequestUrl());
|
||||||
|
assertEquals(APIKEY_VALUE, recordedSecondRequest.getHeader("DD-API-KEY"));
|
||||||
|
assertEquals(
|
||||||
|
Credentials.basic("username", "password"),
|
||||||
|
recordedSecondRequest.getHeader("Proxy-Authorization"));
|
||||||
|
assertEquals(backendHost, recordedSecondRequest.getHeader("Host"));
|
||||||
|
assertEquals(
|
||||||
|
String.format("POST %s HTTP/1.1", backendUrl), recordedSecondRequest.getRequestLine());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRequestWithProxyDefaultPassword() throws IOException, InterruptedException {
|
||||||
|
final String backendUrl = "http://intake.profiling.datadoghq.com:1234" + URL_PATH;
|
||||||
|
when(config.getProfilingUrl())
|
||||||
|
.thenReturn("http://intake.profiling.datadoghq.com:1234" + URL_PATH);
|
||||||
|
when(config.getProfilingProxyHost()).thenReturn(server.url("").host());
|
||||||
|
when(config.getProfilingProxyPort()).thenReturn(server.url("").port());
|
||||||
|
when(config.getProfilingProxyUsername()).thenReturn("username");
|
||||||
|
|
||||||
|
uploader = new RecordingUploader(config);
|
||||||
|
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(407).addHeader("Proxy-Authenticate: Basic"));
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
uploader.upload(RECORDING_TYPE, mockRecordingData(RECORDING_RESOURCE));
|
||||||
|
|
||||||
|
final RecordedRequest recordedFirstRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
final RecordedRequest recordedSecondRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
assertEquals(
|
||||||
|
Credentials.basic("username", ""), recordedSecondRequest.getHeader("Proxy-Authorization"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRecordingClosed() throws IOException {
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test500Response() throws IOException, InterruptedException {
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(500));
|
||||||
|
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
assertNotNull(server.takeRequest(5, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConnectionRefused() throws IOException, InterruptedException {
|
||||||
|
server.shutdown();
|
||||||
|
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTimeout() throws IOException, InterruptedException {
|
||||||
|
server.enqueue(
|
||||||
|
new MockResponse()
|
||||||
|
.setHeadersDelay(
|
||||||
|
REQUEST_IO_OPERATION_TIMEOUT.plus(Duration.ofMillis(1000)).toMillis(),
|
||||||
|
TimeUnit.MILLISECONDS));
|
||||||
|
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
assertNotNull(server.takeRequest(5, TimeUnit.SECONDS));
|
||||||
|
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnfinishedRecording() throws IOException {
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
when(recording.getStream()).thenThrow(new IllegalStateException("test exception"));
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
verify(recording).release();
|
||||||
|
verify(recording, times(2)).getStream();
|
||||||
|
verifyNoMoreInteractions(recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testHeaders() throws IOException, InterruptedException {
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
uploader.upload(RECORDING_TYPE, mockRecordingData(RECORDING_RESOURCE));
|
||||||
|
|
||||||
|
final RecordedRequest recordedRequest = server.takeRequest(5, TimeUnit.SECONDS);
|
||||||
|
assertEquals(
|
||||||
|
RecordingUploader.JAVA_LANG,
|
||||||
|
recordedRequest.getHeader(RecordingUploader.DATADOG_META_LANG));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testEnqueuedRequestsExecuted() throws IOException, InterruptedException {
|
||||||
|
// We have to block all parallel requests to make sure queue is kept full
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
server.enqueue(
|
||||||
|
new MockResponse()
|
||||||
|
.setHeadersDelay(
|
||||||
|
// 1 second should be enough to schedule all requests and not hit timeout
|
||||||
|
Duration.ofMillis(1000).toMillis(), TimeUnit.MILLISECONDS)
|
||||||
|
.setResponseCode(200));
|
||||||
|
}
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
final RecordingData additionalRecording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, additionalRecording);
|
||||||
|
|
||||||
|
// Make sure all expected requests happened
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
assertNotNull(server.takeRequest(5, TimeUnit.SECONDS));
|
||||||
|
}
|
||||||
|
|
||||||
|
assertNotNull(server.takeRequest(2000, TimeUnit.MILLISECONDS), "Got enqueued request");
|
||||||
|
|
||||||
|
verify(additionalRecording.getStream()).close();
|
||||||
|
verify(additionalRecording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTooManyRequests() throws IOException, InterruptedException {
|
||||||
|
// We need to make sure that initial requests that fill up the queue hang to the duration of the
|
||||||
|
// test. So we specify insanely large timeout here.
|
||||||
|
when(config.getProfilingUploadTimeout()).thenReturn((int) FOREVER_REQUEST_TIMEOUT.getSeconds());
|
||||||
|
uploader = new RecordingUploader(config);
|
||||||
|
|
||||||
|
// We have to block all parallel requests to make sure queue is kept full
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
server.enqueue(
|
||||||
|
new MockResponse()
|
||||||
|
.setHeadersDelay(FOREVER_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)
|
||||||
|
.setResponseCode(200));
|
||||||
|
}
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200));
|
||||||
|
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
final List<RecordingData> hangingRequests = new ArrayList<>();
|
||||||
|
// We schedule one additional request to check case when request would be rejected immediately
|
||||||
|
// rather than added to the queue.
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_ENQUEUED_REQUESTS + 1; i++) {
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
hangingRequests.add(recording);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure all expected requests happened
|
||||||
|
for (int i = 0; i < RecordingUploader.MAX_RUNNING_REQUESTS; i++) {
|
||||||
|
assertNotNull(server.takeRequest(5, TimeUnit.SECONDS));
|
||||||
|
}
|
||||||
|
// Recordings after RecordingUploader.MAX_RUNNING_REQUESTS will not be executed because number
|
||||||
|
// or parallel requests has been reached.
|
||||||
|
assertNull(server.takeRequest(100, TimeUnit.MILLISECONDS), "No more requests");
|
||||||
|
|
||||||
|
for (final RecordingData recording : hangingRequests) {
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testShutdown() throws IOException, InterruptedException {
|
||||||
|
uploader.shutdown();
|
||||||
|
|
||||||
|
final RecordingData recording = mockRecordingData(RECORDING_RESOURCE);
|
||||||
|
uploader.upload(RECORDING_TYPE, recording);
|
||||||
|
|
||||||
|
assertNull(server.takeRequest(100, TimeUnit.MILLISECONDS), "No more requests");
|
||||||
|
|
||||||
|
verify(recording.getStream()).close();
|
||||||
|
verify(recording).release();
|
||||||
|
}
|
||||||
|
|
||||||
|
private RecordingData mockRecordingData(final String recordingResource) throws IOException {
|
||||||
|
final RecordingData recordingData = mock(RecordingData.class, withSettings().lenient());
|
||||||
|
when(recordingData.getStream())
|
||||||
|
.thenReturn(
|
||||||
|
spy(
|
||||||
|
Thread.currentThread()
|
||||||
|
.getContextClassLoader()
|
||||||
|
.getResourceAsStream(recordingResource)));
|
||||||
|
when(recordingData.getName()).thenReturn(RECODING_NAME_PREFIX + SEQUENCE_NUMBER);
|
||||||
|
when(recordingData.getStart()).thenReturn(Instant.ofEpochSecond(RECORDING_START));
|
||||||
|
when(recordingData.getEnd()).thenReturn(Instant.ofEpochSecond(RECORDING_END));
|
||||||
|
return recordingData;
|
||||||
|
}
|
||||||
|
|
||||||
|
private byte[] unGzip(final byte[] compressed) throws IOException {
|
||||||
|
final InputStream stream = new GZIPInputStream(new ByteArrayInputStream(compressed));
|
||||||
|
final ByteArrayOutputStream result = new ByteArrayOutputStream();
|
||||||
|
ByteStreams.copy(stream, result);
|
||||||
|
return result.toByteArray();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package com.datadog.profiling.uploader.util;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
public class PidHelperTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPid() throws IOException {
|
||||||
|
assertTrue(
|
||||||
|
PidHelper.PID > 0,
|
||||||
|
"Expect PID to be present since we run tests in systems where we can load it");
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,114 @@
|
||||||
|
package com.datadog.profiling.uploader.util;
|
||||||
|
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||||
|
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.zip.GZIPInputStream;
|
||||||
|
import java.util.zip.GZIPOutputStream;
|
||||||
|
import java.util.zip.ZipEntry;
|
||||||
|
import java.util.zip.ZipOutputStream;
|
||||||
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
class StreamUtilsTest {
|
||||||
|
|
||||||
|
private final int DEFAULT_EXPECTED_SIZE = 100;
|
||||||
|
private final StreamUtils.BytesConsumer<byte[]> CONSUME_TO_BYTES =
|
||||||
|
(bytes, offset, length) -> Arrays.copyOfRange(bytes, offset, offset + length);
|
||||||
|
|
||||||
|
private static byte[] testRecordingBytes;
|
||||||
|
private static byte[] testRecordingGzippedBytes;
|
||||||
|
private static byte[] testRecordingZippedBytes;
|
||||||
|
|
||||||
|
@BeforeAll
|
||||||
|
public static void setupClass() throws IOException {
|
||||||
|
testRecordingBytes = ByteStreams.toByteArray(testRecordingStream());
|
||||||
|
|
||||||
|
final ByteArrayOutputStream gzippedStream = new ByteArrayOutputStream();
|
||||||
|
ByteStreams.copy(testRecordingStream(), new GZIPOutputStream(gzippedStream));
|
||||||
|
testRecordingGzippedBytes = gzippedStream.toByteArray();
|
||||||
|
|
||||||
|
final ByteArrayOutputStream zippedStream = new ByteArrayOutputStream();
|
||||||
|
ByteStreams.copy(testRecordingStream(), createZipOutputStream(zippedStream));
|
||||||
|
testRecordingZippedBytes = zippedStream.toByteArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void readStreamNoCompression() throws IOException {
|
||||||
|
final int expectedSize = 1; // Try very small value to test 'undershoot' logic
|
||||||
|
final byte[] bytes =
|
||||||
|
StreamUtils.readStream(testRecordingStream(), expectedSize, CONSUME_TO_BYTES);
|
||||||
|
assertArrayEquals(testRecordingBytes, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void readStreamNoCompressionLargeExpectedSize() throws IOException {
|
||||||
|
final int expectedSize = testRecordingBytes.length * 2; // overshoot the size
|
||||||
|
final byte[] bytes =
|
||||||
|
StreamUtils.readStream(testRecordingStream(), expectedSize, CONSUME_TO_BYTES);
|
||||||
|
assertArrayEquals(testRecordingBytes, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void gzipStream() throws IOException {
|
||||||
|
final int expectedSize = 1; // Try very small value to test 'undershoot' logic
|
||||||
|
final byte[] gzippedBytes =
|
||||||
|
StreamUtils.gzipStream(testRecordingStream(), expectedSize, CONSUME_TO_BYTES);
|
||||||
|
|
||||||
|
final byte[] uncompressedBytes =
|
||||||
|
ByteStreams.toByteArray(new GZIPInputStream(new ByteArrayInputStream(gzippedBytes)));
|
||||||
|
assertArrayEquals(testRecordingBytes, uncompress(gzippedBytes));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void gzipStreamLargeExpectedSize() throws IOException {
|
||||||
|
final int expectedSize = testRecordingBytes.length * 2; // overshoot the size
|
||||||
|
final byte[] gzippedBytes =
|
||||||
|
StreamUtils.gzipStream(testRecordingStream(), expectedSize, CONSUME_TO_BYTES);
|
||||||
|
|
||||||
|
assertArrayEquals(testRecordingBytes, uncompress(gzippedBytes));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void alreadyGzipStream() throws IOException {
|
||||||
|
final byte[] bytes =
|
||||||
|
StreamUtils.gzipStream(
|
||||||
|
new ByteArrayInputStream(testRecordingGzippedBytes),
|
||||||
|
DEFAULT_EXPECTED_SIZE,
|
||||||
|
CONSUME_TO_BYTES);
|
||||||
|
|
||||||
|
assertArrayEquals(testRecordingGzippedBytes, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void alreadyZipStream() throws IOException {
|
||||||
|
final byte[] bytes =
|
||||||
|
StreamUtils.gzipStream(
|
||||||
|
new ByteArrayInputStream(testRecordingZippedBytes),
|
||||||
|
DEFAULT_EXPECTED_SIZE,
|
||||||
|
CONSUME_TO_BYTES);
|
||||||
|
|
||||||
|
assertArrayEquals(testRecordingZippedBytes, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static InputStream testRecordingStream() {
|
||||||
|
return StreamUtilsTest.class.getResourceAsStream("/test-recording.jfr");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ZipOutputStream createZipOutputStream(final OutputStream stream)
|
||||||
|
throws IOException {
|
||||||
|
final ZipOutputStream result = new ZipOutputStream(stream);
|
||||||
|
result.putNextEntry(new ZipEntry("test"));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte[] uncompress(final byte[] bytes) throws IOException {
|
||||||
|
return ByteStreams.toByteArray(new GZIPInputStream(new ByteArrayInputStream(bytes)));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
Stubbed-Test-Version
|
Binary file not shown.
|
@ -0,0 +1,89 @@
|
||||||
|
package com.datadog.profiling.agent;
|
||||||
|
|
||||||
|
import com.datadog.profiling.controller.ConfigurationException;
|
||||||
|
import com.datadog.profiling.controller.Controller;
|
||||||
|
import com.datadog.profiling.controller.ControllerFactory;
|
||||||
|
import com.datadog.profiling.controller.ProfilingSystem;
|
||||||
|
import com.datadog.profiling.controller.UnsupportedEnvironmentException;
|
||||||
|
import com.datadog.profiling.uploader.RecordingUploader;
|
||||||
|
import datadog.trace.api.Config;
|
||||||
|
import java.lang.ref.WeakReference;
|
||||||
|
import java.time.Duration;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
|
||||||
|
/** Profiling agent implementation */
|
||||||
|
@Slf4j
|
||||||
|
public class ProfilingAgent {
|
||||||
|
|
||||||
|
private static volatile ProfilingSystem PROFILER;
|
||||||
|
|
||||||
|
public static synchronized void run() throws IllegalArgumentException {
|
||||||
|
if (PROFILER == null) {
|
||||||
|
final Config config = Config.get();
|
||||||
|
if (!config.isProfilingEnabled()) {
|
||||||
|
log.info("Profiling: disabled");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (config.getProfilingApiKey() == null) {
|
||||||
|
log.info("Profiling: no API key, profiling disabled");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
final Controller controller = ControllerFactory.createController(config);
|
||||||
|
|
||||||
|
final RecordingUploader uploader = new RecordingUploader(config);
|
||||||
|
|
||||||
|
final Duration startupDelay = Duration.ofSeconds(config.getProfilingStartupDelay());
|
||||||
|
final Duration uploadPeriod = Duration.ofSeconds(config.getProfilingUploadPeriod());
|
||||||
|
|
||||||
|
// Randomize startup delay for up to one upload period. Consider having separate setting for
|
||||||
|
// this in the future
|
||||||
|
final Duration startupDelayRandomRange = uploadPeriod;
|
||||||
|
|
||||||
|
PROFILER =
|
||||||
|
new ProfilingSystem(
|
||||||
|
controller, uploader::upload, startupDelay, startupDelayRandomRange, uploadPeriod);
|
||||||
|
PROFILER.start();
|
||||||
|
log.info("Profiling has started!");
|
||||||
|
|
||||||
|
try {
|
||||||
|
/*
|
||||||
|
Note: shutdown hooks are tricky because JVM holds reference for them forever preventing
|
||||||
|
GC for anything that is reachable from it.
|
||||||
|
This means that if/when we implement functionality to manually shutdown profiler we would
|
||||||
|
need to not forget to add code that removes this shutdown hook from JVM.
|
||||||
|
*/
|
||||||
|
Runtime.getRuntime().addShutdownHook(new ShutdownHook(PROFILER, uploader));
|
||||||
|
} catch (final IllegalStateException ex) {
|
||||||
|
// The JVM is already shutting down.
|
||||||
|
}
|
||||||
|
} catch (final UnsupportedEnvironmentException | ConfigurationException e) {
|
||||||
|
log.warn("Failed to initialize profiling agent!", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class ShutdownHook extends Thread {
|
||||||
|
private final WeakReference<ProfilingSystem> profilerRef;
|
||||||
|
private final WeakReference<RecordingUploader> uploaderRef;
|
||||||
|
|
||||||
|
private ShutdownHook(final ProfilingSystem profiler, final RecordingUploader uploader) {
|
||||||
|
profilerRef = new WeakReference<>(profiler);
|
||||||
|
uploaderRef = new WeakReference<>(uploader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
final ProfilingSystem profiler = profilerRef.get();
|
||||||
|
if (profiler != null) {
|
||||||
|
profiler.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
final RecordingUploader uploader = uploaderRef.get();
|
||||||
|
if (uploader != null) {
|
||||||
|
uploader.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -20,6 +20,7 @@ dependencies {
|
||||||
implementation deps.autoservice
|
implementation deps.autoservice
|
||||||
|
|
||||||
compile project(':dd-trace-ot')
|
compile project(':dd-trace-ot')
|
||||||
|
compile project(':dd-trace-ot:jfr-openjdk')
|
||||||
|
|
||||||
testCompile deps.opentracing
|
testCompile deps.opentracing
|
||||||
testCompile project(':dd-java-agent:testing')
|
testCompile project(':dd-java-agent:testing')
|
||||||
|
|
|
@ -63,6 +63,9 @@ project(':dd-java-agent:instrumentation').afterEvaluate {
|
||||||
project(':dd-java-agent:agent-jmxfetch').afterEvaluate {
|
project(':dd-java-agent:agent-jmxfetch').afterEvaluate {
|
||||||
includeShadowJar(it.tasks.shadowJar, 'agent-jmxfetch')
|
includeShadowJar(it.tasks.shadowJar, 'agent-jmxfetch')
|
||||||
}
|
}
|
||||||
|
project(':dd-java-agent:agent-profiling').afterEvaluate {
|
||||||
|
includeShadowJar(it.tasks.shadowJar, 'agent-profiling')
|
||||||
|
}
|
||||||
|
|
||||||
task sharedShadowJar(type: ShadowJar) {
|
task sharedShadowJar(type: ShadowJar) {
|
||||||
configurations = [project.configurations.sharedShadowInclude]
|
configurations = [project.configurations.sharedShadowInclude]
|
||||||
|
|
|
@ -12,59 +12,60 @@ import spock.lang.Timeout
|
||||||
class CustomLogManagerTest extends Specification {
|
class CustomLogManagerTest extends Specification {
|
||||||
|
|
||||||
private static final String DEFAULT_LOG_LEVEL = "debug"
|
private static final String DEFAULT_LOG_LEVEL = "debug"
|
||||||
|
private static final String PROFILING_API_KEY = "some-api-key"
|
||||||
|
|
||||||
// Run all tests using forked jvm because groovy has already set the global log manager
|
// Run all tests using forked jvm because groovy has already set the global log manager
|
||||||
def "agent services starts up in premain with no custom log manager set"() {
|
def "agent services starts up in premain with no custom log manager set"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, [:]
|
, ["DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
def "agent services starts up in premain if configured log manager on system classpath"() {
|
def "agent services starts up in premain if configured log manager on system classpath"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Djava.util.logging.manager=jvmbootstraptest.CustomLogManager"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Djava.util.logging.manager=jvmbootstraptest.CustomLogManager"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, [:]
|
, ["DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
def "agent services startup is delayed with java.util.logging.manager sysprop"() {
|
def "agent services startup is delayed with java.util.logging.manager sysprop"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Djava.util.logging.manager=jvmbootstraptest.MissingLogManager"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Djava.util.logging.manager=jvmbootstraptest.MissingLogManager"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, [:]
|
, ["DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
def "agent services startup delayed with tracer custom log manager setting"() {
|
def "agent services startup delayed with tracer custom log manager setting"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Ddd.app.customlogmanager=true"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Ddd.app.customlogmanager=true"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, [:]
|
, ["DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
def "agent services startup delayed with JBOSS_HOME environment variable"() {
|
def "agent services startup delayed with JBOSS_HOME environment variable"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, ["JBOSS_HOME": "/"]
|
, ["JBOSS_HOME": "/", "DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
def "agent services startup in premain forced by customlogmanager=false"() {
|
def "agent services startup in premain forced by customlogmanager=false"() {
|
||||||
expect:
|
expect:
|
||||||
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
IntegrationTestUtils.runOnSeparateJvm(LogManagerSetter.getName()
|
||||||
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Ddd.app.customlogmanager=false", "-Djava.util.logging.manager=jvmbootstraptest.CustomLogManager"] as String[]
|
, ["-Ddd.jmxfetch.enabled=true", "-Ddd.jmxfetch.refresh-beans-period=1", "-Ddd.profiling.enabled=true", "-Ddatadog.slf4j.simpleLogger.defaultLogLevel=$DEFAULT_LOG_LEVEL", "-Ddd.app.customlogmanager=false", "-Djava.util.logging.manager=jvmbootstraptest.CustomLogManager"] as String[]
|
||||||
, "" as String[]
|
, "" as String[]
|
||||||
, ["JBOSS_HOME": "/"]
|
, ["JBOSS_HOME": "/", "DD_PROFILING_APIKEY": PROFILING_API_KEY]
|
||||||
, true) == 0
|
, true) == 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,12 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(false),
|
isJmxfetchStarted(false),
|
||||||
true,
|
true,
|
||||||
"jmxfetch should start in premain when customlogmanager=false.");
|
"jmxfetch should start in premain when customlogmanager=false.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(false),
|
||||||
|
true,
|
||||||
|
"profiling should start in premain when customlogmanager=false.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (System.getProperty("java.util.logging.manager") != null) {
|
} else if (System.getProperty("java.util.logging.manager") != null) {
|
||||||
System.out.println("java.util.logging.manager != null");
|
System.out.println("java.util.logging.manager != null");
|
||||||
|
@ -37,6 +43,10 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(false),
|
isJmxfetchStarted(false),
|
||||||
false,
|
false,
|
||||||
"jmxfetch startup must be delayed when log manager system property is present.");
|
"jmxfetch startup must be delayed when log manager system property is present.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
assertProfilingStartupDelayed(
|
||||||
|
"profiling startup must be delayed when log manager system property is present.");
|
||||||
|
}
|
||||||
// Change back to a valid LogManager.
|
// Change back to a valid LogManager.
|
||||||
System.setProperty("java.util.logging.manager", CustomLogManager.class.getName());
|
System.setProperty("java.util.logging.manager", CustomLogManager.class.getName());
|
||||||
customAssert(
|
customAssert(
|
||||||
|
@ -49,6 +59,10 @@ public class LogManagerSetter {
|
||||||
isTracerInstalled(true), true, "tracer should be installed after loading LogManager.");
|
isTracerInstalled(true), true, "tracer should be installed after loading LogManager.");
|
||||||
customAssert(
|
customAssert(
|
||||||
isJmxfetchStarted(true), true, "jmxfetch should start after loading LogManager.");
|
isJmxfetchStarted(true), true, "jmxfetch should start after loading LogManager.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(true), true, "profiling should start after loading LogManager.");
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
customAssert(
|
customAssert(
|
||||||
isTracerInstalled(false),
|
isTracerInstalled(false),
|
||||||
|
@ -58,6 +72,12 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(false),
|
isJmxfetchStarted(false),
|
||||||
true,
|
true,
|
||||||
"jmxfetch should start in premain when custom log manager found on classpath.");
|
"jmxfetch should start in premain when custom log manager found on classpath.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(false),
|
||||||
|
true,
|
||||||
|
"profiling should start in premain when custom log manager found on classpath.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (System.getenv("JBOSS_HOME") != null) {
|
} else if (System.getenv("JBOSS_HOME") != null) {
|
||||||
System.out.println("JBOSS_HOME != null");
|
System.out.println("JBOSS_HOME != null");
|
||||||
|
@ -67,6 +87,10 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(false),
|
isJmxfetchStarted(false),
|
||||||
false,
|
false,
|
||||||
"jmxfetch startup must be delayed when JBOSS_HOME property is present.");
|
"jmxfetch startup must be delayed when JBOSS_HOME property is present.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
assertProfilingStartupDelayed(
|
||||||
|
"profiling startup must be delayed when JBOSS_HOME property is present.");
|
||||||
|
}
|
||||||
|
|
||||||
System.setProperty("java.util.logging.manager", CustomLogManager.class.getName());
|
System.setProperty("java.util.logging.manager", CustomLogManager.class.getName());
|
||||||
customAssert(
|
customAssert(
|
||||||
|
@ -83,6 +107,12 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(true),
|
isJmxfetchStarted(true),
|
||||||
true,
|
true,
|
||||||
"jmxfetch should start after loading with JBOSS_HOME set.");
|
"jmxfetch should start after loading with JBOSS_HOME set.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(true),
|
||||||
|
true,
|
||||||
|
"profiling should start after loading with JBOSS_HOME set.");
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
System.out.println("No custom log manager");
|
System.out.println("No custom log manager");
|
||||||
|
|
||||||
|
@ -94,6 +124,12 @@ public class LogManagerSetter {
|
||||||
isJmxfetchStarted(false),
|
isJmxfetchStarted(false),
|
||||||
true,
|
true,
|
||||||
"jmxfetch should start in premain when no custom log manager is set.");
|
"jmxfetch should start in premain when no custom log manager is set.");
|
||||||
|
if (isJFRSupported()) {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(false),
|
||||||
|
true,
|
||||||
|
"profiling should start in premain when no custom log manager is set.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,11 +152,22 @@ public class LogManagerSetter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean isJmxfetchStarted(final boolean wait) {
|
private static void assertProfilingStartupDelayed(final String message) {
|
||||||
// Wait up to 10 seconds for jmxfetch thread to appear
|
if (isJavaBefore9WithJFR()) {
|
||||||
|
customAssert(isProfilingStarted(false), false, message);
|
||||||
|
} else {
|
||||||
|
customAssert(
|
||||||
|
isProfilingStarted(false),
|
||||||
|
true,
|
||||||
|
"We can safely start profiler on java9+ since it doesn't indirectly trigger logger manager init");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isThreadStarted(final String name, final boolean wait) {
|
||||||
|
// Wait up to 10 seconds for thread to appear
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
for (final Thread thread : Thread.getAllStackTraces().keySet()) {
|
for (final Thread thread : Thread.getAllStackTraces().keySet()) {
|
||||||
if ("dd-jmx-collector".equals(thread.getName())) {
|
if (name.equals(thread.getName())) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -136,6 +183,14 @@ public class LogManagerSetter {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static boolean isJmxfetchStarted(final boolean wait) {
|
||||||
|
return isThreadStarted("dd-jmx-collector", wait);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isProfilingStarted(final boolean wait) {
|
||||||
|
return isThreadStarted("dd-profiler-recording-scheduler", wait);
|
||||||
|
}
|
||||||
|
|
||||||
private static boolean isTracerInstalled(final boolean wait) {
|
private static boolean isTracerInstalled(final boolean wait) {
|
||||||
// Wait up to 10 seconds for tracer to get installed
|
// Wait up to 10 seconds for tracer to get installed
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
|
@ -159,6 +214,10 @@ public class LogManagerSetter {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return isJFRSupported();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isJFRSupported() {
|
||||||
final String jfrClassResourceName = "jdk.jfr.Recording".replace('.', '/') + ".class";
|
final String jfrClassResourceName = "jdk.jfr.Recording".replace('.', '/') + ".class";
|
||||||
return Thread.currentThread().getContextClassLoader().getResourceAsStream(jfrClassResourceName)
|
return Thread.currentThread().getContextClassLoader().getResourceAsStream(jfrClassResourceName)
|
||||||
!= null;
|
!= null;
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
plugins {
|
||||||
|
id "com.github.johnrengelman.shadow" version "4.0.4"
|
||||||
|
}
|
||||||
|
|
||||||
|
ext {
|
||||||
|
minJavaVersionForTests = JavaVersion.VERSION_11
|
||||||
|
// Zulu has backported profiling support
|
||||||
|
forceJdk = ['ZULU8', 'ZULU11', '12']
|
||||||
|
jmcVersion = '8.0.0-SNAPSHOT'
|
||||||
|
}
|
||||||
|
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
|
||||||
|
description = 'Profiling Integration Tests.'
|
||||||
|
|
||||||
|
jar {
|
||||||
|
manifest {
|
||||||
|
attributes(
|
||||||
|
'Main-Class': 'datadog.smoketest.profiling.ProfilingTestApplication'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile project(':dd-trace-api')
|
||||||
|
|
||||||
|
testCompile project(':dd-smoke-tests')
|
||||||
|
testCompile project(':dd-java-agent:agent-profiling:profiling-testing')
|
||||||
|
testCompile "org.openjdk.jmc:common:$jmcVersion"
|
||||||
|
testCompile "org.openjdk.jmc:flightrecorder:$jmcVersion"
|
||||||
|
testCompile "org.openjdk.jmc:flightrecorder.rules:$jmcVersion"
|
||||||
|
testCompile "org.openjdk.jmc:flightrecorder.rules.jdk:$jmcVersion"
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks.withType(Test).configureEach {
|
||||||
|
dependsOn shadowJar
|
||||||
|
|
||||||
|
jvmArgs "-Ddatadog.smoketest.profiling.shadowJar.path=${tasks.shadowJar.archivePath}"
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
package datadog.smoketest.profiling;
|
||||||
|
|
||||||
|
import datadog.trace.api.Trace;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public class ProfilingTestApplication {
|
||||||
|
|
||||||
|
public static void main(final String[] args) throws InterruptedException {
|
||||||
|
long exitDelay = -1;
|
||||||
|
if (args.length > 0) {
|
||||||
|
exitDelay = TimeUnit.SECONDS.toMillis(Long.parseLong(args[0]));
|
||||||
|
}
|
||||||
|
final long startTime = System.currentTimeMillis();
|
||||||
|
while (true) {
|
||||||
|
tracedMethod();
|
||||||
|
if (exitDelay > 0 && exitDelay + startTime < System.currentTimeMillis()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
System.out.println("Exiting (" + exitDelay + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Trace
|
||||||
|
private static void tracedMethod() throws InterruptedException {
|
||||||
|
System.out.println("Tracing");
|
||||||
|
Thread.sleep(100);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
package datadog.smoketest
|
||||||
|
|
||||||
|
import com.datadog.profiling.testing.ProfilingTestUtils
|
||||||
|
import com.google.common.collect.Multimap
|
||||||
|
import okhttp3.mockwebserver.MockResponse
|
||||||
|
import okhttp3.mockwebserver.MockWebServer
|
||||||
|
import okhttp3.mockwebserver.RecordedRequest
|
||||||
|
import org.openjdk.jmc.common.item.IItemCollection
|
||||||
|
import org.openjdk.jmc.common.item.ItemFilters
|
||||||
|
import org.openjdk.jmc.flightrecorder.JfrLoaderToolkit
|
||||||
|
|
||||||
|
import java.time.Instant
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
|
||||||
|
class ProfilingIntegrationContinuousProfilesTest extends AbstractSmokeTest {
|
||||||
|
|
||||||
|
// This needs to give enough time for test app to start up and recording to happen
|
||||||
|
private static final int REQUEST_WAIT_TIMEOUT = 40
|
||||||
|
|
||||||
|
private final MockWebServer server = new MockWebServer()
|
||||||
|
|
||||||
|
@Override
|
||||||
|
ProcessBuilder createProcessBuilder() {
|
||||||
|
String profilingShadowJar = System.getProperty("datadog.smoketest.profiling.shadowJar.path")
|
||||||
|
|
||||||
|
List<String> command = new ArrayList<>()
|
||||||
|
command.add(javaPath())
|
||||||
|
command.addAll(defaultJavaProperties)
|
||||||
|
command.add("-Ddd.profiling.continuous.to.periodic.upload.ratio=0") // Disable periodic profiles
|
||||||
|
command.addAll((String[]) ["-jar", profilingShadowJar])
|
||||||
|
ProcessBuilder processBuilder = new ProcessBuilder(command)
|
||||||
|
processBuilder.directory(new File(buildDirectory))
|
||||||
|
return processBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
def setup() {
|
||||||
|
server.start(profilingPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
def cleanup() {
|
||||||
|
try {
|
||||||
|
server.shutdown()
|
||||||
|
} catch (final IOException e) {
|
||||||
|
// Looks like this happens for some unclear reason, but should not affect tests
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def "test continuous recording"() {
|
||||||
|
setup:
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200))
|
||||||
|
|
||||||
|
when:
|
||||||
|
RecordedRequest firstRequest = server.takeRequest(REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS)
|
||||||
|
Multimap<String, Object> firstRequestParameters =
|
||||||
|
ProfilingTestUtils.parseProfilingRequestParameters(firstRequest)
|
||||||
|
|
||||||
|
then:
|
||||||
|
firstRequest.getRequestUrl().toString() == profilingUrl
|
||||||
|
firstRequest.getHeader("DD-API-KEY") == PROFILING_API_KEY
|
||||||
|
|
||||||
|
firstRequestParameters.get("recording-name").get(0) == 'dd-profiling'
|
||||||
|
firstRequestParameters.get("format").get(0) == "jfr"
|
||||||
|
firstRequestParameters.get("type").get(0) == "jfr-continuous"
|
||||||
|
firstRequestParameters.get("runtime").get(0) == "jvm"
|
||||||
|
|
||||||
|
def firstStartTime = Instant.parse(firstRequestParameters.get("recording-start").get(0))
|
||||||
|
def firstEndTime = Instant.parse(firstRequestParameters.get("recording-end").get(0))
|
||||||
|
firstStartTime != null
|
||||||
|
firstEndTime != null
|
||||||
|
def duration = firstEndTime.toEpochMilli() - firstStartTime.toEpochMilli()
|
||||||
|
duration > TimeUnit.SECONDS.toMillis(PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS - 2)
|
||||||
|
duration < TimeUnit.SECONDS.toMillis(PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS + 2)
|
||||||
|
|
||||||
|
Map<String, String> requestTags = ProfilingTestUtils.parseTags(firstRequestParameters.get("tags[]"))
|
||||||
|
requestTags.get("service") == "smoke-test-java-app"
|
||||||
|
requestTags.get("language") == "jvm"
|
||||||
|
requestTags.get("runtime-id") != null
|
||||||
|
requestTags.get("host") == InetAddress.getLocalHost().getHostName()
|
||||||
|
|
||||||
|
firstRequestParameters.get("chunk-data").get(0) != null
|
||||||
|
|
||||||
|
when:
|
||||||
|
RecordedRequest secondRequest = server.takeRequest(REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS)
|
||||||
|
Multimap<String, Object> secondRequestParameters =
|
||||||
|
ProfilingTestUtils.parseProfilingRequestParameters(secondRequest)
|
||||||
|
|
||||||
|
then:
|
||||||
|
secondRequest.getRequestUrl().toString() == profilingUrl
|
||||||
|
secondRequest.getHeader("DD-API-KEY") == PROFILING_API_KEY
|
||||||
|
|
||||||
|
secondRequestParameters.get("recording-name").get(0) == 'dd-profiling'
|
||||||
|
def secondStartTime = Instant.parse(secondRequestParameters.get("recording-start").get(0))
|
||||||
|
def period = secondStartTime.toEpochMilli() - firstStartTime.toEpochMilli()
|
||||||
|
period > TimeUnit.SECONDS.toMillis(PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS - 2)
|
||||||
|
period < TimeUnit.SECONDS.toMillis(PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS + 2)
|
||||||
|
|
||||||
|
firstRequestParameters.get("chunk-data").get(0) != null
|
||||||
|
|
||||||
|
IItemCollection events = JfrLoaderToolkit.loadEvents(new ByteArrayInputStream(secondRequestParameters.get("chunk-data").get(0)))
|
||||||
|
IItemCollection scopeEvents = events.apply(ItemFilters.type("datadog.Scope"))
|
||||||
|
|
||||||
|
scopeEvents.size() > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package datadog.smoketest
|
||||||
|
|
||||||
|
|
||||||
|
import okhttp3.mockwebserver.MockResponse
|
||||||
|
import okhttp3.mockwebserver.MockWebServer
|
||||||
|
import okhttp3.mockwebserver.RecordedRequest
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
|
||||||
|
class ProfilingIntegrationShutdownTest extends AbstractSmokeTest {
|
||||||
|
|
||||||
|
// This needs to give enough time for test app to start up and recording to happen
|
||||||
|
private static final int REQUEST_WAIT_TIMEOUT = 40
|
||||||
|
|
||||||
|
// Run app enough time to get profiles
|
||||||
|
private static final int RUN_APP_FOR = PROFILING_START_DELAY_SECONDS + PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS * 2 + 1
|
||||||
|
|
||||||
|
private final MockWebServer server = new MockWebServer()
|
||||||
|
|
||||||
|
@Override
|
||||||
|
ProcessBuilder createProcessBuilder() {
|
||||||
|
String profilingShadowJar = System.getProperty("datadog.smoketest.profiling.shadowJar.path")
|
||||||
|
|
||||||
|
List<String> command = new ArrayList<>()
|
||||||
|
command.add(javaPath())
|
||||||
|
command.addAll(defaultJavaProperties)
|
||||||
|
command.addAll((String[]) ["-jar", profilingShadowJar])
|
||||||
|
command.add(Integer.toString(RUN_APP_FOR))
|
||||||
|
ProcessBuilder processBuilder = new ProcessBuilder(command)
|
||||||
|
processBuilder.directory(new File(buildDirectory))
|
||||||
|
return processBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
def setup() {
|
||||||
|
server.start(profilingPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
def cleanup() {
|
||||||
|
try {
|
||||||
|
server.shutdown()
|
||||||
|
} catch (final IOException e) {
|
||||||
|
// Looks like this happens for some unclear reason, but should not affect tests
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def "test that profiling agent doesn't prevent app from exiting"() {
|
||||||
|
setup:
|
||||||
|
server.enqueue(new MockResponse().setResponseCode(200))
|
||||||
|
|
||||||
|
when:
|
||||||
|
RecordedRequest request = server.takeRequest(REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS)
|
||||||
|
|
||||||
|
then:
|
||||||
|
request.bodySize > 0
|
||||||
|
|
||||||
|
then:
|
||||||
|
// Wait for the app exit with some extra time.
|
||||||
|
// The expectation is that agent doesn't prevent app from exiting.
|
||||||
|
serverProcess.waitFor(RUN_APP_FOR + 10, TimeUnit.SECONDS) == true
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1,11 +1,15 @@
|
||||||
package datadog.smoketest
|
package datadog.smoketest
|
||||||
|
|
||||||
|
import datadog.trace.agent.test.utils.PortUtils
|
||||||
import spock.lang.Shared
|
import spock.lang.Shared
|
||||||
import spock.lang.Specification
|
import spock.lang.Specification
|
||||||
|
|
||||||
abstract class AbstractSmokeTest extends Specification {
|
abstract class AbstractSmokeTest extends Specification {
|
||||||
|
|
||||||
|
public static final PROFILING_API_KEY = "org2_api_key"
|
||||||
|
public static final PROFILING_START_DELAY_SECONDS = 1
|
||||||
|
public static final int PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS = 5
|
||||||
|
|
||||||
@Shared
|
@Shared
|
||||||
protected String workingDirectory = System.getProperty("user.dir")
|
protected String workingDirectory = System.getProperty("user.dir")
|
||||||
@Shared
|
@Shared
|
||||||
|
@ -13,6 +17,10 @@ abstract class AbstractSmokeTest extends Specification {
|
||||||
@Shared
|
@Shared
|
||||||
protected String shadowJarPath = System.getProperty("datadog.smoketest.agent.shadowJar.path")
|
protected String shadowJarPath = System.getProperty("datadog.smoketest.agent.shadowJar.path")
|
||||||
@Shared
|
@Shared
|
||||||
|
protected int profilingPort
|
||||||
|
@Shared
|
||||||
|
protected String profilingUrl
|
||||||
|
@Shared
|
||||||
protected String[] defaultJavaProperties
|
protected String[] defaultJavaProperties
|
||||||
|
|
||||||
@Shared
|
@Shared
|
||||||
|
@ -23,10 +31,17 @@ abstract class AbstractSmokeTest extends Specification {
|
||||||
throw new AssertionError("Expected system properties not found. Smoke tests have to be run from Gradle. Please make sure that is the case.")
|
throw new AssertionError("Expected system properties not found. Smoke tests have to be run from Gradle. Please make sure that is the case.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
profilingPort = PortUtils.randomOpenPort()
|
||||||
|
profilingUrl = "http://localhost:${profilingPort}/"
|
||||||
|
|
||||||
defaultJavaProperties = [
|
defaultJavaProperties = [
|
||||||
"-javaagent:${shadowJarPath}",
|
"-javaagent:${shadowJarPath}",
|
||||||
"-Ddd.writer.type=LoggingWriter",
|
"-Ddd.writer.type=LoggingWriter",
|
||||||
"-Ddd.service.name=smoke-test-java-app",
|
"-Ddd.service.name=smoke-test-java-app",
|
||||||
|
"-Ddd.profiling.enabled=true",
|
||||||
|
"-Ddd.profiling.start-delay=${PROFILING_START_DELAY_SECONDS}",
|
||||||
|
"-Ddd.profiling.upload.period=${PROFILING_RECORDING_UPLOAD_PERIOD_SECONDS}",
|
||||||
|
"-Ddd.profiling.url=http://localhost:${profilingPort}",
|
||||||
"-Ddatadog.slf4j.simpleLogger.defaultLogLevel=debug",
|
"-Ddatadog.slf4j.simpleLogger.defaultLogLevel=debug",
|
||||||
"-Dorg.slf4j.simpleLogger.defaultLogLevel=debug"
|
"-Dorg.slf4j.simpleLogger.defaultLogLevel=debug"
|
||||||
]
|
]
|
||||||
|
@ -34,6 +49,7 @@ abstract class AbstractSmokeTest extends Specification {
|
||||||
ProcessBuilder processBuilder = createProcessBuilder()
|
ProcessBuilder processBuilder = createProcessBuilder()
|
||||||
|
|
||||||
processBuilder.environment().put("JAVA_HOME", System.getProperty("java.home"))
|
processBuilder.environment().put("JAVA_HOME", System.getProperty("java.home"))
|
||||||
|
processBuilder.environment().put("DD_PROFILING_APIKEY", PROFILING_API_KEY)
|
||||||
|
|
||||||
processBuilder.redirectErrorStream(true)
|
processBuilder.redirectErrorStream(true)
|
||||||
File log = new File("${buildDirectory}/reports/testProcess.${this.getClass().getName()}.log")
|
File log = new File("${buildDirectory}/reports/testProcess.${this.getClass().getName()}.log")
|
||||||
|
|
|
@ -8,6 +8,9 @@ import java.io.IOException;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Paths;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -54,9 +57,11 @@ public class Config {
|
||||||
public static final String PRIORITY_SAMPLING = "priority.sampling";
|
public static final String PRIORITY_SAMPLING = "priority.sampling";
|
||||||
public static final String TRACE_RESOLVER_ENABLED = "trace.resolver.enabled";
|
public static final String TRACE_RESOLVER_ENABLED = "trace.resolver.enabled";
|
||||||
public static final String SERVICE_MAPPING = "service.mapping";
|
public static final String SERVICE_MAPPING = "service.mapping";
|
||||||
|
|
||||||
public static final String GLOBAL_TAGS = "trace.global.tags";
|
public static final String GLOBAL_TAGS = "trace.global.tags";
|
||||||
public static final String SPAN_TAGS = "trace.span.tags";
|
public static final String SPAN_TAGS = "trace.span.tags";
|
||||||
public static final String JMX_TAGS = "trace.jmx.tags";
|
public static final String JMX_TAGS = "trace.jmx.tags";
|
||||||
|
|
||||||
public static final String TRACE_ANALYTICS_ENABLED = "trace.analytics.enabled";
|
public static final String TRACE_ANALYTICS_ENABLED = "trace.analytics.enabled";
|
||||||
public static final String TRACE_ANNOTATIONS = "trace.annotations";
|
public static final String TRACE_ANNOTATIONS = "trace.annotations";
|
||||||
public static final String TRACE_EXECUTORS_ALL = "trace.executors.all";
|
public static final String TRACE_EXECUTORS_ALL = "trace.executors.all";
|
||||||
|
@ -98,9 +103,28 @@ public class Config {
|
||||||
|
|
||||||
public static final String LOGS_INJECTION_ENABLED = "logs.injection";
|
public static final String LOGS_INJECTION_ENABLED = "logs.injection";
|
||||||
|
|
||||||
public static final String SERVICE_TAG = "service";
|
public static final String PROFILING_ENABLED = "profiling.enabled";
|
||||||
@Deprecated public static final String SERVICE = SERVICE_TAG; // To be removed in 0.34.0
|
public static final String PROFILING_URL = "profiling.url";
|
||||||
|
|
||||||
|
public static final String PROFILING_API_KEY = "profiling.api-key";
|
||||||
|
public static final String PROFILING_API_KEY_FILE = "profiling.api-key-file";
|
||||||
|
public static final String PROFILING_API_KEY_OLD = "profiling.apikey";
|
||||||
|
public static final String PROFILING_API_KEY_FILE_OLD = "profiling.apikey.file";
|
||||||
|
public static final String PROFILING_TAGS = "profiling.tags";
|
||||||
|
public static final String PROFILING_STARTUP_DELAY = "profiling.start-delay";
|
||||||
|
public static final String PROFILING_UPLOAD_PERIOD = "profiling.upload.period";
|
||||||
|
public static final String PROFILING_TEMPLATE_OVERRIDE_FILE =
|
||||||
|
"profiling.jfr-template-override-file";
|
||||||
|
public static final String PROFILING_UPLOAD_TIMEOUT = "profiling.upload.timeout";
|
||||||
|
public static final String PROFILING_UPLOAD_COMPRESSION = "profiling.upload.compression";
|
||||||
|
public static final String PROFILING_PROXY_HOST = "profiling.proxy.host";
|
||||||
|
public static final String PROFILING_PROXY_PORT = "profiling.proxy.port";
|
||||||
|
public static final String PROFILING_PROXY_USERNAME = "profiling.proxy.username";
|
||||||
|
public static final String PROFILING_PROXY_PASSWORD = "profiling.proxy.password";
|
||||||
|
|
||||||
public static final String RUNTIME_ID_TAG = "runtime-id";
|
public static final String RUNTIME_ID_TAG = "runtime-id";
|
||||||
|
public static final String SERVICE_TAG = "service";
|
||||||
|
public static final String HOST_TAG = "host";
|
||||||
public static final String LANGUAGE_TAG_KEY = "language";
|
public static final String LANGUAGE_TAG_KEY = "language";
|
||||||
public static final String LANGUAGE_TAG_VALUE = "jvm";
|
public static final String LANGUAGE_TAG_VALUE = "jvm";
|
||||||
|
|
||||||
|
@ -138,10 +162,19 @@ public class Config {
|
||||||
public static final int DEFAULT_JMX_FETCH_STATSD_PORT = 8125;
|
public static final int DEFAULT_JMX_FETCH_STATSD_PORT = 8125;
|
||||||
|
|
||||||
public static final boolean DEFAULT_METRICS_ENABLED = false;
|
public static final boolean DEFAULT_METRICS_ENABLED = false;
|
||||||
// No default constants for metrics statsd support -- falls back to jmx fetch values
|
// No default constants for metrics statsd support -- falls back to jmxfetch values
|
||||||
|
|
||||||
public static final boolean DEFAULT_LOGS_INJECTION_ENABLED = false;
|
public static final boolean DEFAULT_LOGS_INJECTION_ENABLED = false;
|
||||||
|
|
||||||
|
public static final boolean DEFAULT_PROFILING_ENABLED = false;
|
||||||
|
public static final String DEFAULT_PROFILING_URL =
|
||||||
|
"https://beta-intake.profile.datadoghq.com/v1/input";
|
||||||
|
public static final int DEFAULT_PROFILING_STARTUP_DELAY = 10;
|
||||||
|
public static final int DEFAULT_PROFILING_UPLOAD_PERIOD = 60; // 1 min
|
||||||
|
public static final int DEFAULT_PROFILING_UPLOAD_TIMEOUT = 30; // seconds
|
||||||
|
public static final String DEFAULT_PROFILING_UPLOAD_COMPRESSION = "on";
|
||||||
|
public static final int DEFAULT_PROFILING_PROXY_PORT = 8080;
|
||||||
|
|
||||||
private static final String SPLIT_BY_SPACE_OR_COMMA_REGEX = "[,\\s]+";
|
private static final String SPLIT_BY_SPACE_OR_COMMA_REGEX = "[,\\s]+";
|
||||||
|
|
||||||
private static final boolean DEFAULT_TRACE_REPORT_HOSTNAME = false;
|
private static final boolean DEFAULT_TRACE_REPORT_HOSTNAME = false;
|
||||||
|
@ -211,7 +244,6 @@ public class Config {
|
||||||
@Getter private final Integer healthMetricsStatsdPort;
|
@Getter private final Integer healthMetricsStatsdPort;
|
||||||
|
|
||||||
@Getter private final boolean logsInjectionEnabled;
|
@Getter private final boolean logsInjectionEnabled;
|
||||||
|
|
||||||
@Getter private final boolean reportHostName;
|
@Getter private final boolean reportHostName;
|
||||||
|
|
||||||
@Getter private final String traceAnnotations;
|
@Getter private final String traceAnnotations;
|
||||||
|
@ -228,6 +260,20 @@ public class Config {
|
||||||
@Getter private final Double traceSampleRate;
|
@Getter private final Double traceSampleRate;
|
||||||
@Getter private final Double traceRateLimit;
|
@Getter private final Double traceRateLimit;
|
||||||
|
|
||||||
|
@Getter private final boolean profilingEnabled;
|
||||||
|
@Getter private final String profilingUrl;
|
||||||
|
@Getter private final String profilingApiKey;
|
||||||
|
private final Map<String, String> profilingTags;
|
||||||
|
@Getter private final int profilingStartupDelay;
|
||||||
|
@Getter private final int profilingUploadPeriod;
|
||||||
|
@Getter private final String profilingTemplateOverrideFile;
|
||||||
|
@Getter private final int profilingUploadTimeout;
|
||||||
|
@Getter private final String profilingUploadCompression;
|
||||||
|
@Getter private final String profilingProxyHost;
|
||||||
|
@Getter private final int profilingProxyPort;
|
||||||
|
@Getter private final String profilingProxyUsername;
|
||||||
|
@Getter private final String profilingProxyPassword;
|
||||||
|
|
||||||
// Values from an optionally provided properties file
|
// Values from an optionally provided properties file
|
||||||
private static Properties propertiesFromConfigFile;
|
private static Properties propertiesFromConfigFile;
|
||||||
|
|
||||||
|
@ -336,7 +382,6 @@ public class Config {
|
||||||
|
|
||||||
logsInjectionEnabled =
|
logsInjectionEnabled =
|
||||||
getBooleanSettingFromEnvironment(LOGS_INJECTION_ENABLED, DEFAULT_LOGS_INJECTION_ENABLED);
|
getBooleanSettingFromEnvironment(LOGS_INJECTION_ENABLED, DEFAULT_LOGS_INJECTION_ENABLED);
|
||||||
|
|
||||||
reportHostName =
|
reportHostName =
|
||||||
getBooleanSettingFromEnvironment(TRACE_REPORT_HOSTNAME, DEFAULT_TRACE_REPORT_HOSTNAME);
|
getBooleanSettingFromEnvironment(TRACE_REPORT_HOSTNAME, DEFAULT_TRACE_REPORT_HOSTNAME);
|
||||||
|
|
||||||
|
@ -358,6 +403,61 @@ public class Config {
|
||||||
traceSampleRate = getDoubleSettingFromEnvironment(TRACE_SAMPLE_RATE, null);
|
traceSampleRate = getDoubleSettingFromEnvironment(TRACE_SAMPLE_RATE, null);
|
||||||
traceRateLimit = getDoubleSettingFromEnvironment(TRACE_RATE_LIMIT, DEFAULT_TRACE_RATE_LIMIT);
|
traceRateLimit = getDoubleSettingFromEnvironment(TRACE_RATE_LIMIT, DEFAULT_TRACE_RATE_LIMIT);
|
||||||
|
|
||||||
|
profilingEnabled =
|
||||||
|
getBooleanSettingFromEnvironment(PROFILING_ENABLED, DEFAULT_PROFILING_ENABLED);
|
||||||
|
profilingUrl = getSettingFromEnvironment(PROFILING_URL, DEFAULT_PROFILING_URL);
|
||||||
|
// Note: We do not want APiKey to be loaded from property for security reasons
|
||||||
|
// Note: we do not use defined default here
|
||||||
|
// FIXME: We should use better authentication mechanism
|
||||||
|
final String profilingApiKeyFile = getSettingFromEnvironment(PROFILING_API_KEY_FILE, null);
|
||||||
|
String tmpProfilingApiKey =
|
||||||
|
System.getenv(propertyNameToEnvironmentVariableName(PROFILING_API_KEY));
|
||||||
|
if (profilingApiKeyFile != null) {
|
||||||
|
try {
|
||||||
|
tmpProfilingApiKey =
|
||||||
|
new String(Files.readAllBytes(Paths.get(profilingApiKeyFile)), StandardCharsets.UTF_8)
|
||||||
|
.trim();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
log.error("Cannot read API key from file {}, skipping", profilingApiKeyFile, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (tmpProfilingApiKey == null) {
|
||||||
|
final String oldProfilingApiKeyFile =
|
||||||
|
getSettingFromEnvironment(PROFILING_API_KEY_FILE_OLD, null);
|
||||||
|
tmpProfilingApiKey =
|
||||||
|
System.getenv(propertyNameToEnvironmentVariableName(PROFILING_API_KEY_OLD));
|
||||||
|
if (oldProfilingApiKeyFile != null) {
|
||||||
|
try {
|
||||||
|
tmpProfilingApiKey =
|
||||||
|
new String(
|
||||||
|
Files.readAllBytes(Paths.get(oldProfilingApiKeyFile)), StandardCharsets.UTF_8)
|
||||||
|
.trim();
|
||||||
|
} catch (final IOException e) {
|
||||||
|
log.error("Cannot read API key from file {}, skipping", profilingApiKeyFile, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
profilingApiKey = tmpProfilingApiKey;
|
||||||
|
|
||||||
|
profilingTags = getMapSettingFromEnvironment(PROFILING_TAGS, null);
|
||||||
|
profilingStartupDelay =
|
||||||
|
getIntegerSettingFromEnvironment(PROFILING_STARTUP_DELAY, DEFAULT_PROFILING_STARTUP_DELAY);
|
||||||
|
profilingUploadPeriod =
|
||||||
|
getIntegerSettingFromEnvironment(PROFILING_UPLOAD_PERIOD, DEFAULT_PROFILING_UPLOAD_PERIOD);
|
||||||
|
profilingTemplateOverrideFile =
|
||||||
|
getSettingFromEnvironment(PROFILING_TEMPLATE_OVERRIDE_FILE, null);
|
||||||
|
profilingUploadTimeout =
|
||||||
|
getIntegerSettingFromEnvironment(
|
||||||
|
PROFILING_UPLOAD_TIMEOUT, DEFAULT_PROFILING_UPLOAD_TIMEOUT);
|
||||||
|
profilingUploadCompression =
|
||||||
|
getSettingFromEnvironment(
|
||||||
|
PROFILING_UPLOAD_COMPRESSION, DEFAULT_PROFILING_UPLOAD_COMPRESSION);
|
||||||
|
profilingProxyHost = getSettingFromEnvironment(PROFILING_PROXY_HOST, null);
|
||||||
|
profilingProxyPort =
|
||||||
|
getIntegerSettingFromEnvironment(PROFILING_PROXY_PORT, DEFAULT_PROFILING_PROXY_PORT);
|
||||||
|
profilingProxyUsername = getSettingFromEnvironment(PROFILING_PROXY_USERNAME, null);
|
||||||
|
profilingProxyPassword = getSettingFromEnvironment(PROFILING_PROXY_PASSWORD, null);
|
||||||
|
|
||||||
log.debug("New instance: {}", this);
|
log.debug("New instance: {}", this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,7 +570,6 @@ public class Config {
|
||||||
|
|
||||||
logsInjectionEnabled =
|
logsInjectionEnabled =
|
||||||
getBooleanSettingFromEnvironment(LOGS_INJECTION_ENABLED, DEFAULT_LOGS_INJECTION_ENABLED);
|
getBooleanSettingFromEnvironment(LOGS_INJECTION_ENABLED, DEFAULT_LOGS_INJECTION_ENABLED);
|
||||||
|
|
||||||
reportHostName =
|
reportHostName =
|
||||||
getPropertyBooleanValue(properties, TRACE_REPORT_HOSTNAME, parent.reportHostName);
|
getPropertyBooleanValue(properties, TRACE_REPORT_HOSTNAME, parent.reportHostName);
|
||||||
|
|
||||||
|
@ -494,6 +593,31 @@ public class Config {
|
||||||
traceSampleRate = getPropertyDoubleValue(properties, TRACE_SAMPLE_RATE, parent.traceSampleRate);
|
traceSampleRate = getPropertyDoubleValue(properties, TRACE_SAMPLE_RATE, parent.traceSampleRate);
|
||||||
traceRateLimit = getPropertyDoubleValue(properties, TRACE_RATE_LIMIT, parent.traceRateLimit);
|
traceRateLimit = getPropertyDoubleValue(properties, TRACE_RATE_LIMIT, parent.traceRateLimit);
|
||||||
|
|
||||||
|
profilingEnabled =
|
||||||
|
getPropertyBooleanValue(properties, PROFILING_ENABLED, parent.profilingEnabled);
|
||||||
|
profilingUrl = properties.getProperty(PROFILING_URL, parent.profilingUrl);
|
||||||
|
profilingApiKey = properties.getProperty(PROFILING_API_KEY, parent.profilingApiKey);
|
||||||
|
profilingTags = getPropertyMapValue(properties, PROFILING_TAGS, parent.profilingTags);
|
||||||
|
profilingStartupDelay =
|
||||||
|
getPropertyIntegerValue(properties, PROFILING_STARTUP_DELAY, parent.profilingStartupDelay);
|
||||||
|
profilingUploadPeriod =
|
||||||
|
getPropertyIntegerValue(properties, PROFILING_UPLOAD_PERIOD, parent.profilingUploadPeriod);
|
||||||
|
profilingTemplateOverrideFile =
|
||||||
|
properties.getProperty(
|
||||||
|
PROFILING_TEMPLATE_OVERRIDE_FILE, parent.profilingTemplateOverrideFile);
|
||||||
|
profilingUploadTimeout =
|
||||||
|
getPropertyIntegerValue(
|
||||||
|
properties, PROFILING_UPLOAD_TIMEOUT, parent.profilingUploadTimeout);
|
||||||
|
profilingUploadCompression =
|
||||||
|
properties.getProperty(PROFILING_UPLOAD_COMPRESSION, parent.profilingUploadCompression);
|
||||||
|
profilingProxyHost = properties.getProperty(PROFILING_PROXY_HOST, parent.profilingProxyHost);
|
||||||
|
profilingProxyPort =
|
||||||
|
getPropertyIntegerValue(properties, PROFILING_PROXY_PORT, parent.profilingProxyPort);
|
||||||
|
profilingProxyUsername =
|
||||||
|
properties.getProperty(PROFILING_PROXY_USERNAME, parent.profilingProxyUsername);
|
||||||
|
profilingProxyPassword =
|
||||||
|
properties.getProperty(PROFILING_PROXY_PASSWORD, parent.profilingProxyPassword);
|
||||||
|
|
||||||
log.debug("New instance: {}", this);
|
log.debug("New instance: {}", this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,6 +660,26 @@ public class Config {
|
||||||
return Collections.unmodifiableMap(result);
|
return Collections.unmodifiableMap(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<String, String> getMergedProfilingTags() {
|
||||||
|
final Map<String, String> runtimeTags = getRuntimeTags();
|
||||||
|
final String host = getHostName();
|
||||||
|
final Map<String, String> result =
|
||||||
|
newHashMap(
|
||||||
|
globalTags.size()
|
||||||
|
+ profilingTags.size()
|
||||||
|
+ runtimeTags.size()
|
||||||
|
+ 3 /* for serviceName and host and language */);
|
||||||
|
result.put(HOST_TAG, host); // Host goes first to allow to override it
|
||||||
|
result.putAll(globalTags);
|
||||||
|
result.putAll(profilingTags);
|
||||||
|
result.putAll(runtimeTags);
|
||||||
|
// service name set here instead of getRuntimeTags because apm already manages the service tag
|
||||||
|
// and may chose to override it.
|
||||||
|
result.put(SERVICE_TAG, serviceName);
|
||||||
|
result.put(LANGUAGE_TAG_KEY, LANGUAGE_TAG_VALUE);
|
||||||
|
return Collections.unmodifiableMap(result);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the sample rate for the specified instrumentation or {@link
|
* Returns the sample rate for the specified instrumentation or {@link
|
||||||
* #DEFAULT_ANALYTICS_SAMPLE_RATE} if none specified.
|
* #DEFAULT_ANALYTICS_SAMPLE_RATE} if none specified.
|
||||||
|
|
|
@ -16,6 +16,7 @@ import static datadog.trace.api.Config.HEADER_TAGS
|
||||||
import static datadog.trace.api.Config.HEALTH_METRICS_ENABLED
|
import static datadog.trace.api.Config.HEALTH_METRICS_ENABLED
|
||||||
import static datadog.trace.api.Config.HEALTH_METRICS_STATSD_HOST
|
import static datadog.trace.api.Config.HEALTH_METRICS_STATSD_HOST
|
||||||
import static datadog.trace.api.Config.HEALTH_METRICS_STATSD_PORT
|
import static datadog.trace.api.Config.HEALTH_METRICS_STATSD_PORT
|
||||||
|
import static datadog.trace.api.Config.HOST_TAG
|
||||||
import static datadog.trace.api.Config.HTTP_CLIENT_ERROR_STATUSES
|
import static datadog.trace.api.Config.HTTP_CLIENT_ERROR_STATUSES
|
||||||
import static datadog.trace.api.Config.HTTP_CLIENT_HOST_SPLIT_BY_DOMAIN
|
import static datadog.trace.api.Config.HTTP_CLIENT_HOST_SPLIT_BY_DOMAIN
|
||||||
import static datadog.trace.api.Config.HTTP_SERVER_ERROR_STATUSES
|
import static datadog.trace.api.Config.HTTP_SERVER_ERROR_STATUSES
|
||||||
|
@ -26,9 +27,26 @@ import static datadog.trace.api.Config.JMX_FETCH_REFRESH_BEANS_PERIOD
|
||||||
import static datadog.trace.api.Config.JMX_FETCH_STATSD_HOST
|
import static datadog.trace.api.Config.JMX_FETCH_STATSD_HOST
|
||||||
import static datadog.trace.api.Config.JMX_FETCH_STATSD_PORT
|
import static datadog.trace.api.Config.JMX_FETCH_STATSD_PORT
|
||||||
import static datadog.trace.api.Config.JMX_TAGS
|
import static datadog.trace.api.Config.JMX_TAGS
|
||||||
|
import static datadog.trace.api.Config.LANGUAGE_TAG_KEY
|
||||||
|
import static datadog.trace.api.Config.LANGUAGE_TAG_VALUE
|
||||||
import static datadog.trace.api.Config.PARTIAL_FLUSH_MIN_SPANS
|
import static datadog.trace.api.Config.PARTIAL_FLUSH_MIN_SPANS
|
||||||
import static datadog.trace.api.Config.PREFIX
|
import static datadog.trace.api.Config.PREFIX
|
||||||
import static datadog.trace.api.Config.PRIORITY_SAMPLING
|
import static datadog.trace.api.Config.PRIORITY_SAMPLING
|
||||||
|
import static datadog.trace.api.Config.PROFILING_API_KEY
|
||||||
|
import static datadog.trace.api.Config.PROFILING_API_KEY_FILE
|
||||||
|
import static datadog.trace.api.Config.PROFILING_API_KEY_FILE_OLD
|
||||||
|
import static datadog.trace.api.Config.PROFILING_ENABLED
|
||||||
|
import static datadog.trace.api.Config.PROFILING_PROXY_HOST
|
||||||
|
import static datadog.trace.api.Config.PROFILING_PROXY_PASSWORD
|
||||||
|
import static datadog.trace.api.Config.PROFILING_PROXY_PORT
|
||||||
|
import static datadog.trace.api.Config.PROFILING_PROXY_USERNAME
|
||||||
|
import static datadog.trace.api.Config.PROFILING_STARTUP_DELAY
|
||||||
|
import static datadog.trace.api.Config.PROFILING_TAGS
|
||||||
|
import static datadog.trace.api.Config.PROFILING_TEMPLATE_OVERRIDE_FILE
|
||||||
|
import static datadog.trace.api.Config.PROFILING_UPLOAD_COMPRESSION
|
||||||
|
import static datadog.trace.api.Config.PROFILING_UPLOAD_PERIOD
|
||||||
|
import static datadog.trace.api.Config.PROFILING_UPLOAD_TIMEOUT
|
||||||
|
import static datadog.trace.api.Config.PROFILING_URL
|
||||||
import static datadog.trace.api.Config.PROPAGATION_STYLE_EXTRACT
|
import static datadog.trace.api.Config.PROPAGATION_STYLE_EXTRACT
|
||||||
import static datadog.trace.api.Config.PROPAGATION_STYLE_INJECT
|
import static datadog.trace.api.Config.PROPAGATION_STYLE_INJECT
|
||||||
import static datadog.trace.api.Config.RUNTIME_CONTEXT_FIELD_INJECTION
|
import static datadog.trace.api.Config.RUNTIME_CONTEXT_FIELD_INJECTION
|
||||||
|
@ -67,6 +85,9 @@ class ConfigTest extends DDSpecification {
|
||||||
private static final DD_AGENT_PORT_LEGACY_ENV = "DD_AGENT_PORT"
|
private static final DD_AGENT_PORT_LEGACY_ENV = "DD_AGENT_PORT"
|
||||||
private static final DD_TRACE_REPORT_HOSTNAME = "DD_TRACE_REPORT_HOSTNAME"
|
private static final DD_TRACE_REPORT_HOSTNAME = "DD_TRACE_REPORT_HOSTNAME"
|
||||||
|
|
||||||
|
private static final DD_PROFILING_API_KEY = "DD_PROFILING_API_KEY"
|
||||||
|
private static final DD_PROFILING_API_KEY_OLD = "DD_PROFILING_APIKEY"
|
||||||
|
|
||||||
def "verify defaults"() {
|
def "verify defaults"() {
|
||||||
when:
|
when:
|
||||||
Config config = provider()
|
Config config = provider()
|
||||||
|
@ -100,9 +121,24 @@ class ConfigTest extends DDSpecification {
|
||||||
config.jmxFetchRefreshBeansPeriod == null
|
config.jmxFetchRefreshBeansPeriod == null
|
||||||
config.jmxFetchStatsdHost == null
|
config.jmxFetchStatsdHost == null
|
||||||
config.jmxFetchStatsdPort == DEFAULT_JMX_FETCH_STATSD_PORT
|
config.jmxFetchStatsdPort == DEFAULT_JMX_FETCH_STATSD_PORT
|
||||||
|
|
||||||
config.healthMetricsEnabled == false
|
config.healthMetricsEnabled == false
|
||||||
config.healthMetricsStatsdHost == null
|
config.healthMetricsStatsdHost == null
|
||||||
config.healthMetricsStatsdPort == null
|
config.healthMetricsStatsdPort == null
|
||||||
|
|
||||||
|
config.profilingEnabled == false
|
||||||
|
config.profilingUrl == Config.DEFAULT_PROFILING_URL
|
||||||
|
config.profilingApiKey == null
|
||||||
|
config.mergedProfilingTags == [(HOST_TAG): config.getHostName(), (RUNTIME_ID_TAG): config.getRuntimeId(), (SERVICE_TAG): config.serviceName, (LANGUAGE_TAG_KEY): LANGUAGE_TAG_VALUE]
|
||||||
|
config.profilingStartupDelay == 10
|
||||||
|
config.profilingUploadPeriod == 60
|
||||||
|
config.profilingTemplateOverrideFile == null
|
||||||
|
config.profilingUploadTimeout == 30
|
||||||
|
config.profilingProxyHost == null
|
||||||
|
config.profilingProxyPort == Config.DEFAULT_PROFILING_PROXY_PORT
|
||||||
|
config.profilingProxyUsername == null
|
||||||
|
config.profilingProxyPassword == null
|
||||||
|
|
||||||
config.toString().contains("unnamed-java-app")
|
config.toString().contains("unnamed-java-app")
|
||||||
|
|
||||||
where:
|
where:
|
||||||
|
@ -154,6 +190,20 @@ class ConfigTest extends DDSpecification {
|
||||||
prop.setProperty(TRACE_SAMPLE_RATE, ".5")
|
prop.setProperty(TRACE_SAMPLE_RATE, ".5")
|
||||||
prop.setProperty(TRACE_RATE_LIMIT, "200")
|
prop.setProperty(TRACE_RATE_LIMIT, "200")
|
||||||
|
|
||||||
|
prop.setProperty(PROFILING_ENABLED, "true")
|
||||||
|
prop.setProperty(PROFILING_URL, "new url")
|
||||||
|
prop.setProperty(PROFILING_API_KEY, "new api key")
|
||||||
|
prop.setProperty(PROFILING_TAGS, "f:6,host:test-host")
|
||||||
|
prop.setProperty(PROFILING_STARTUP_DELAY, "1111")
|
||||||
|
prop.setProperty(PROFILING_UPLOAD_PERIOD, "1112")
|
||||||
|
prop.setProperty(PROFILING_TEMPLATE_OVERRIDE_FILE, "/path")
|
||||||
|
prop.setProperty(PROFILING_UPLOAD_TIMEOUT, "1116")
|
||||||
|
prop.setProperty(PROFILING_UPLOAD_COMPRESSION, "off")
|
||||||
|
prop.setProperty(PROFILING_PROXY_HOST, "proxy-host")
|
||||||
|
prop.setProperty(PROFILING_PROXY_PORT, "1118")
|
||||||
|
prop.setProperty(PROFILING_PROXY_USERNAME, "proxy-username")
|
||||||
|
prop.setProperty(PROFILING_PROXY_PASSWORD, "proxy-password")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
Config config = Config.get(prop)
|
Config config = Config.get(prop)
|
||||||
|
|
||||||
|
@ -186,6 +236,7 @@ class ConfigTest extends DDSpecification {
|
||||||
config.jmxFetchRefreshBeansPeriod == 200
|
config.jmxFetchRefreshBeansPeriod == 200
|
||||||
config.jmxFetchStatsdHost == "statsd host"
|
config.jmxFetchStatsdHost == "statsd host"
|
||||||
config.jmxFetchStatsdPort == 321
|
config.jmxFetchStatsdPort == 321
|
||||||
|
|
||||||
config.healthMetricsEnabled == true
|
config.healthMetricsEnabled == true
|
||||||
config.healthMetricsStatsdHost == "metrics statsd host"
|
config.healthMetricsStatsdHost == "metrics statsd host"
|
||||||
config.healthMetricsStatsdPort == 654
|
config.healthMetricsStatsdPort == 654
|
||||||
|
@ -193,6 +244,20 @@ class ConfigTest extends DDSpecification {
|
||||||
config.traceSamplingOperationRules == [b: "1"]
|
config.traceSamplingOperationRules == [b: "1"]
|
||||||
config.traceSampleRate == 0.5
|
config.traceSampleRate == 0.5
|
||||||
config.traceRateLimit == 200
|
config.traceRateLimit == 200
|
||||||
|
|
||||||
|
config.profilingEnabled == true
|
||||||
|
config.profilingUrl == "new url"
|
||||||
|
config.profilingApiKey == "new api key" // we can still override via internal properties object
|
||||||
|
config.mergedProfilingTags == [b: "2", f: "6", (HOST_TAG): "test-host", (RUNTIME_ID_TAG): config.getRuntimeId(), (SERVICE_TAG): config.serviceName, (LANGUAGE_TAG_KEY): LANGUAGE_TAG_VALUE]
|
||||||
|
config.profilingStartupDelay == 1111
|
||||||
|
config.profilingUploadPeriod == 1112
|
||||||
|
config.profilingUploadCompression == "off"
|
||||||
|
config.profilingTemplateOverrideFile == "/path"
|
||||||
|
config.profilingUploadTimeout == 1116
|
||||||
|
config.profilingProxyHost == "proxy-host"
|
||||||
|
config.profilingProxyPort == 1118
|
||||||
|
config.profilingProxyUsername == "proxy-username"
|
||||||
|
config.profilingProxyPassword == "proxy-password"
|
||||||
}
|
}
|
||||||
|
|
||||||
def "specify overrides via system properties"() {
|
def "specify overrides via system properties"() {
|
||||||
|
@ -235,6 +300,20 @@ class ConfigTest extends DDSpecification {
|
||||||
System.setProperty(PREFIX + TRACE_SAMPLE_RATE, ".5")
|
System.setProperty(PREFIX + TRACE_SAMPLE_RATE, ".5")
|
||||||
System.setProperty(PREFIX + TRACE_RATE_LIMIT, "200")
|
System.setProperty(PREFIX + TRACE_RATE_LIMIT, "200")
|
||||||
|
|
||||||
|
System.setProperty(PREFIX + PROFILING_ENABLED, "true")
|
||||||
|
System.setProperty(PREFIX + PROFILING_URL, "new url")
|
||||||
|
System.setProperty(PREFIX + PROFILING_API_KEY, "new api key")
|
||||||
|
System.setProperty(PREFIX + PROFILING_TAGS, "f:6,host:test-host")
|
||||||
|
System.setProperty(PREFIX + PROFILING_STARTUP_DELAY, "1111")
|
||||||
|
System.setProperty(PREFIX + PROFILING_UPLOAD_PERIOD, "1112")
|
||||||
|
System.setProperty(PREFIX + PROFILING_TEMPLATE_OVERRIDE_FILE, "/path")
|
||||||
|
System.setProperty(PREFIX + PROFILING_UPLOAD_TIMEOUT, "1116")
|
||||||
|
System.setProperty(PREFIX + PROFILING_UPLOAD_COMPRESSION, "off")
|
||||||
|
System.setProperty(PREFIX + PROFILING_PROXY_HOST, "proxy-host")
|
||||||
|
System.setProperty(PREFIX + PROFILING_PROXY_PORT, "1118")
|
||||||
|
System.setProperty(PREFIX + PROFILING_PROXY_USERNAME, "proxy-username")
|
||||||
|
System.setProperty(PREFIX + PROFILING_PROXY_PASSWORD, "proxy-password")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
Config config = new Config()
|
Config config = new Config()
|
||||||
|
|
||||||
|
@ -267,6 +346,7 @@ class ConfigTest extends DDSpecification {
|
||||||
config.jmxFetchRefreshBeansPeriod == 200
|
config.jmxFetchRefreshBeansPeriod == 200
|
||||||
config.jmxFetchStatsdHost == "statsd host"
|
config.jmxFetchStatsdHost == "statsd host"
|
||||||
config.jmxFetchStatsdPort == 321
|
config.jmxFetchStatsdPort == 321
|
||||||
|
|
||||||
config.healthMetricsEnabled == true
|
config.healthMetricsEnabled == true
|
||||||
config.healthMetricsStatsdHost == "metrics statsd host"
|
config.healthMetricsStatsdHost == "metrics statsd host"
|
||||||
config.healthMetricsStatsdPort == 654
|
config.healthMetricsStatsdPort == 654
|
||||||
|
@ -274,6 +354,20 @@ class ConfigTest extends DDSpecification {
|
||||||
config.traceSamplingOperationRules == [b: "1"]
|
config.traceSamplingOperationRules == [b: "1"]
|
||||||
config.traceSampleRate == 0.5
|
config.traceSampleRate == 0.5
|
||||||
config.traceRateLimit == 200
|
config.traceRateLimit == 200
|
||||||
|
|
||||||
|
config.profilingEnabled == true
|
||||||
|
config.profilingUrl == "new url"
|
||||||
|
config.profilingApiKey == null // system properties cannot be used to provide a key
|
||||||
|
config.mergedProfilingTags == [b: "2", f: "6", (HOST_TAG): "test-host", (RUNTIME_ID_TAG): config.getRuntimeId(), (SERVICE_TAG): config.serviceName, (LANGUAGE_TAG_KEY): LANGUAGE_TAG_VALUE]
|
||||||
|
config.profilingStartupDelay == 1111
|
||||||
|
config.profilingUploadPeriod == 1112
|
||||||
|
config.profilingTemplateOverrideFile == "/path"
|
||||||
|
config.profilingUploadTimeout == 1116
|
||||||
|
config.profilingUploadCompression == "off"
|
||||||
|
config.profilingProxyHost == "proxy-host"
|
||||||
|
config.profilingProxyPort == 1118
|
||||||
|
config.profilingProxyUsername == "proxy-username"
|
||||||
|
config.profilingProxyPassword == "proxy-password"
|
||||||
}
|
}
|
||||||
|
|
||||||
def "specify overrides via env vars"() {
|
def "specify overrides via env vars"() {
|
||||||
|
@ -285,6 +379,7 @@ class ConfigTest extends DDSpecification {
|
||||||
environmentVariables.set(DD_PROPAGATION_STYLE_INJECT, "Datadog B3")
|
environmentVariables.set(DD_PROPAGATION_STYLE_INJECT, "Datadog B3")
|
||||||
environmentVariables.set(DD_JMXFETCH_METRICS_CONFIGS_ENV, "some/file")
|
environmentVariables.set(DD_JMXFETCH_METRICS_CONFIGS_ENV, "some/file")
|
||||||
environmentVariables.set(DD_TRACE_REPORT_HOSTNAME, "true")
|
environmentVariables.set(DD_TRACE_REPORT_HOSTNAME, "true")
|
||||||
|
environmentVariables.set(DD_PROFILING_API_KEY, "test-api-key")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
def config = new Config()
|
def config = new Config()
|
||||||
|
@ -297,6 +392,7 @@ class ConfigTest extends DDSpecification {
|
||||||
config.propagationStylesToInject.toList() == [Config.PropagationStyle.DATADOG, Config.PropagationStyle.B3]
|
config.propagationStylesToInject.toList() == [Config.PropagationStyle.DATADOG, Config.PropagationStyle.B3]
|
||||||
config.jmxFetchMetricsConfigs == ["some/file"]
|
config.jmxFetchMetricsConfigs == ["some/file"]
|
||||||
config.reportHostName == true
|
config.reportHostName == true
|
||||||
|
config.profilingApiKey == "test-api-key"
|
||||||
}
|
}
|
||||||
|
|
||||||
def "sys props override env vars"() {
|
def "sys props override env vars"() {
|
||||||
|
@ -405,6 +501,7 @@ class ConfigTest extends DDSpecification {
|
||||||
false | false | true | true | 777 // env var gets picked up instead.
|
false | false | true | true | 777 // env var gets picked up instead.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: this seems to be a repeated test
|
||||||
def "sys props override properties"() {
|
def "sys props override properties"() {
|
||||||
setup:
|
setup:
|
||||||
Properties properties = new Properties()
|
Properties properties = new Properties()
|
||||||
|
@ -890,4 +987,50 @@ class ConfigTest extends DDSpecification {
|
||||||
["foo", "baz"] | 0.5f
|
["foo", "baz"] | 0.5f
|
||||||
["baz", "foo"] | 0.7f
|
["baz", "foo"] | 0.7f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def "verify api key loaded from file: #path"() {
|
||||||
|
setup:
|
||||||
|
environmentVariables.set(DD_PROFILING_API_KEY, "default-api-key")
|
||||||
|
System.setProperty(PREFIX + PROFILING_API_KEY_FILE, path)
|
||||||
|
|
||||||
|
when:
|
||||||
|
def config = new Config()
|
||||||
|
|
||||||
|
then:
|
||||||
|
config.profilingApiKey == expectedKey
|
||||||
|
|
||||||
|
where:
|
||||||
|
path | expectedKey
|
||||||
|
getClass().getClassLoader().getResource("apikey").getFile() | "test-api-key"
|
||||||
|
"/path/that/doesnt/exist" | "default-api-key"
|
||||||
|
}
|
||||||
|
|
||||||
|
def "verify api key loaded from file for old option name: #path"() {
|
||||||
|
setup:
|
||||||
|
environmentVariables.set(DD_PROFILING_API_KEY_OLD, "default-api-key")
|
||||||
|
System.setProperty(PREFIX + PROFILING_API_KEY_FILE_OLD, path)
|
||||||
|
|
||||||
|
when:
|
||||||
|
def config = new Config()
|
||||||
|
|
||||||
|
then:
|
||||||
|
config.profilingApiKey == expectedKey
|
||||||
|
|
||||||
|
where:
|
||||||
|
path | expectedKey
|
||||||
|
getClass().getClassLoader().getResource("apikey.old").getFile() | "test-api-key-old"
|
||||||
|
"/path/that/doesnt/exist" | "default-api-key"
|
||||||
|
}
|
||||||
|
|
||||||
|
def "verify api key loaded from new option when both new and old are set"() {
|
||||||
|
setup:
|
||||||
|
System.setProperty(PREFIX + PROFILING_API_KEY_FILE_OLD, getClass().getClassLoader().getResource("apikey.old").getFile())
|
||||||
|
System.setProperty(PREFIX + PROFILING_API_KEY_FILE, getClass().getClassLoader().getResource("apikey").getFile())
|
||||||
|
|
||||||
|
when:
|
||||||
|
def config = new Config()
|
||||||
|
|
||||||
|
then:
|
||||||
|
config.profilingApiKey == "test-api-key"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
test-api-key
|
|
@ -0,0 +1 @@
|
||||||
|
test-api-key-old
|
|
@ -0,0 +1,41 @@
|
||||||
|
// Set properties before any plugins get loaded
|
||||||
|
ext {
|
||||||
|
minJavaVersionForTests = JavaVersion.VERSION_11
|
||||||
|
}
|
||||||
|
|
||||||
|
apply from: "${rootDir}/gradle/java.gradle"
|
||||||
|
apply plugin: 'idea'
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compile deps.slf4j
|
||||||
|
compile project(':dd-trace-ot')
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Setup here is as following:
|
||||||
|
* We compile with Java11 compiler to get JFR definitions.
|
||||||
|
* We specify source/target as Java8 to get code that is loadable on Java8 - JFR defs are Java8 compatible.
|
||||||
|
* We force IDEA to treat this as Java11 project with 'idea' plugin below.
|
||||||
|
* We run tests only on Java11+.
|
||||||
|
*/
|
||||||
|
sourceCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
targetCompatibility = JavaVersion.VERSION_1_8
|
||||||
|
|
||||||
|
[JavaCompile, GroovyCompile].each {
|
||||||
|
tasks.withType(it) {
|
||||||
|
doFirst {
|
||||||
|
// Disable '-processing' because some annotations are not claimed.
|
||||||
|
// Disable '-options' because we are compiling for java8 without specifying bootstrap - intentionally.
|
||||||
|
// Disable '-path' because we do not have some of the paths seem to be missing.
|
||||||
|
options.compilerArgs.addAll(['-Xlint:all,-processing,-options,-path', '-Werror'])
|
||||||
|
options.fork = true
|
||||||
|
options.forkOptions.javaHome = file(System.env.JAVA_11_HOME)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idea {
|
||||||
|
module {
|
||||||
|
jdkName = '11'
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package datadog.opentracing.jfr.openjdk;
|
||||||
|
|
||||||
|
import datadog.opentracing.DDTraceOTInfo;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
public class BlackList {
|
||||||
|
|
||||||
|
private static final String VERSION = DDTraceOTInfo.JAVA_VERSION.split("\\.")[0];
|
||||||
|
private static final Set<String> VERSION_BLACK_LIST;
|
||||||
|
|
||||||
|
static {
|
||||||
|
final Set<String> blackList = new HashSet<>();
|
||||||
|
// Java 9 and 10 throw seg fault on MacOS if events are used in premain.
|
||||||
|
// Since these versions are not LTS we just disable profiling events for them.
|
||||||
|
blackList.add("9");
|
||||||
|
blackList.add("10");
|
||||||
|
VERSION_BLACK_LIST = Collections.unmodifiableSet(blackList);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void checkBlackList() throws ClassNotFoundException {
|
||||||
|
if (VERSION_BLACK_LIST.contains(VERSION)) {
|
||||||
|
throw new ClassNotFoundException("Blacklisted java version: " + DDTraceOTInfo.JAVA_VERSION);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
package datadog.opentracing.jfr.openjdk;
|
||||||
|
|
||||||
|
import datadog.opentracing.DDSpanContext;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEvent;
|
||||||
|
import jdk.jfr.Category;
|
||||||
|
import jdk.jfr.Description;
|
||||||
|
import jdk.jfr.Event;
|
||||||
|
import jdk.jfr.Label;
|
||||||
|
import jdk.jfr.Name;
|
||||||
|
import jdk.jfr.StackTrace;
|
||||||
|
|
||||||
|
@Name("datadog.Scope")
|
||||||
|
@Label("Scope")
|
||||||
|
@Description("Datadog event corresponding to a scope.")
|
||||||
|
@Category("Datadog")
|
||||||
|
@StackTrace(false)
|
||||||
|
public final class ScopeEvent extends Event implements DDScopeEvent {
|
||||||
|
|
||||||
|
private static final int IDS_RADIX = 16;
|
||||||
|
|
||||||
|
private final transient DDSpanContext spanContext;
|
||||||
|
|
||||||
|
@Label("Trace Id")
|
||||||
|
private String traceId;
|
||||||
|
|
||||||
|
@Label("Span Id")
|
||||||
|
private String spanId;
|
||||||
|
|
||||||
|
@Label("Parent Id")
|
||||||
|
private String parentId;
|
||||||
|
|
||||||
|
@Label("Service Name")
|
||||||
|
private String serviceName;
|
||||||
|
|
||||||
|
@Label("Resource Name")
|
||||||
|
private String resourceName;
|
||||||
|
|
||||||
|
@Label("Operation Name")
|
||||||
|
private String operationName;
|
||||||
|
|
||||||
|
ScopeEvent(final DDSpanContext spanContext) {
|
||||||
|
this.spanContext = spanContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void start() {
|
||||||
|
if (isEnabled()) {
|
||||||
|
begin();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void finish() {
|
||||||
|
end();
|
||||||
|
if (shouldCommit()) {
|
||||||
|
traceId = spanContext.getTraceId().toString(IDS_RADIX);
|
||||||
|
spanId = spanContext.getSpanId().toString(IDS_RADIX);
|
||||||
|
parentId = spanContext.getParentId().toString(IDS_RADIX);
|
||||||
|
serviceName = spanContext.getServiceName();
|
||||||
|
resourceName = spanContext.getResourceName();
|
||||||
|
operationName = spanContext.getOperationName();
|
||||||
|
commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
package datadog.opentracing.jfr.openjdk;
|
||||||
|
|
||||||
|
import datadog.opentracing.DDSpanContext;
|
||||||
|
import datadog.opentracing.jfr.DDNoopScopeEvent;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEvent;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEventFactory;
|
||||||
|
import jdk.jfr.EventType;
|
||||||
|
|
||||||
|
/** Event factory for {@link ScopeEvent} */
|
||||||
|
public class ScopeEventFactory implements DDScopeEventFactory {
|
||||||
|
|
||||||
|
private final EventType eventType;
|
||||||
|
|
||||||
|
public ScopeEventFactory() throws ClassNotFoundException {
|
||||||
|
BlackList.checkBlackList();
|
||||||
|
// Note: Loading ScopeEvent when ScopeEventFactory is loaded is important because it also loads
|
||||||
|
// JFR classes - which may not be present on some JVMs
|
||||||
|
eventType = EventType.getEventType(ScopeEvent.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DDScopeEvent create(final DDSpanContext context) {
|
||||||
|
return eventType.isEnabled() ? new ScopeEvent(context) : DDNoopScopeEvent.INSTANCE;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.List;
|
||||||
|
import jdk.jfr.Recording;
|
||||||
|
import jdk.jfr.consumer.RecordingFile;
|
||||||
|
|
||||||
|
public class JfrHelper {
|
||||||
|
|
||||||
|
public static Object startRecording() {
|
||||||
|
final Recording recording = new Recording();
|
||||||
|
recording.start();
|
||||||
|
return recording;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static List<?> stopRecording(final Object object) throws IOException {
|
||||||
|
final Recording recording = (Recording) object;
|
||||||
|
final Path output = Files.createTempFile("recording", ".jfr");
|
||||||
|
output.toFile().deleteOnExit();
|
||||||
|
recording.dump(output);
|
||||||
|
recording.stop();
|
||||||
|
recording.close();
|
||||||
|
|
||||||
|
return RecordingFile.readAllEvents(output);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
import datadog.opentracing.jfr.DDNoopScopeEvent
|
||||||
|
import datadog.opentracing.jfr.openjdk.ScopeEvent
|
||||||
|
import datadog.opentracing.jfr.openjdk.ScopeEventFactory
|
||||||
|
import spock.lang.Requires
|
||||||
|
import spock.lang.Specification
|
||||||
|
|
||||||
|
@Requires({ jvm.java11Compatible })
|
||||||
|
class ScopeEventFactoryTest extends Specification {
|
||||||
|
|
||||||
|
def factory = new ScopeEventFactory()
|
||||||
|
|
||||||
|
def "Returns noop event if profiling is not running"() {
|
||||||
|
when:
|
||||||
|
def event = factory.create(null)
|
||||||
|
|
||||||
|
then:
|
||||||
|
event == DDNoopScopeEvent.INSTANCE
|
||||||
|
}
|
||||||
|
|
||||||
|
def "Returns real event if profiling is running"() {
|
||||||
|
setup:
|
||||||
|
def recording = JfrHelper.startRecording()
|
||||||
|
|
||||||
|
when:
|
||||||
|
def event = factory.create(null)
|
||||||
|
JfrHelper.stopRecording(recording)
|
||||||
|
|
||||||
|
then:
|
||||||
|
event instanceof ScopeEvent
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,99 @@
|
||||||
|
import datadog.opentracing.DDSpanContext
|
||||||
|
import datadog.opentracing.DDTracer
|
||||||
|
import datadog.opentracing.PendingTrace
|
||||||
|
import datadog.trace.api.sampling.PrioritySampling
|
||||||
|
import datadog.trace.common.sampling.RateByServiceSampler
|
||||||
|
import datadog.trace.common.writer.ListWriter
|
||||||
|
import datadog.trace.context.TraceScope
|
||||||
|
import io.opentracing.Scope
|
||||||
|
import io.opentracing.Span
|
||||||
|
import spock.lang.Requires
|
||||||
|
import spock.lang.Specification
|
||||||
|
|
||||||
|
import java.time.Duration
|
||||||
|
|
||||||
|
import static datadog.trace.api.Config.DEFAULT_SERVICE_NAME
|
||||||
|
|
||||||
|
@Requires({ jvm.java11Compatible })
|
||||||
|
class ScopeEventTest extends Specification {
|
||||||
|
|
||||||
|
private static final int IDS_RADIX = 16
|
||||||
|
private static final Duration SLEEP_DURATION = Duration.ofSeconds(1)
|
||||||
|
|
||||||
|
def writer = new ListWriter()
|
||||||
|
def tracer = new DDTracer(DEFAULT_SERVICE_NAME, writer, new RateByServiceSampler(), [:])
|
||||||
|
|
||||||
|
def parentContext =
|
||||||
|
new DDSpanContext(
|
||||||
|
123,
|
||||||
|
432,
|
||||||
|
222,
|
||||||
|
"fakeService",
|
||||||
|
"fakeOperation",
|
||||||
|
"fakeResource",
|
||||||
|
PrioritySampling.UNSET,
|
||||||
|
null,
|
||||||
|
[:],
|
||||||
|
false,
|
||||||
|
"fakeType",
|
||||||
|
null,
|
||||||
|
new PendingTrace(tracer, 123, [:]),
|
||||||
|
tracer)
|
||||||
|
def builder = tracer.buildSpan("test operation")
|
||||||
|
.asChildOf(parentContext)
|
||||||
|
.withServiceName("test service")
|
||||||
|
.withResourceName("test resource")
|
||||||
|
|
||||||
|
def "Scope event is written"() {
|
||||||
|
setup:
|
||||||
|
def recording = JfrHelper.startRecording()
|
||||||
|
|
||||||
|
when:
|
||||||
|
Scope scope = builder.startActive(false)
|
||||||
|
Span span = scope.span()
|
||||||
|
sleep(SLEEP_DURATION.toMillis())
|
||||||
|
scope.close()
|
||||||
|
def events = JfrHelper.stopRecording(recording)
|
||||||
|
span.finish()
|
||||||
|
|
||||||
|
then:
|
||||||
|
events.size() == 1
|
||||||
|
def event = events[0]
|
||||||
|
event.eventType.name == "datadog.Scope"
|
||||||
|
event.duration >= SLEEP_DURATION
|
||||||
|
event.getString("traceId") == span.context().traceId.toString(IDS_RADIX)
|
||||||
|
event.getString("spanId") == span.context().spanId.toString(IDS_RADIX)
|
||||||
|
event.getString("parentId") == span.context().parentId.toString(IDS_RADIX)
|
||||||
|
event.getString("serviceName") == "test service"
|
||||||
|
event.getString("resourceName") == "test resource"
|
||||||
|
event.getString("operationName") == "test operation"
|
||||||
|
}
|
||||||
|
|
||||||
|
def "Scope event is written after continuation activation"() {
|
||||||
|
setup:
|
||||||
|
TraceScope parentScope = builder.startActive(false)
|
||||||
|
parentScope.setAsyncPropagation(true)
|
||||||
|
Span span = parentScope.span()
|
||||||
|
TraceScope.Continuation continuation = parentScope.capture()
|
||||||
|
def recording = JfrHelper.startRecording()
|
||||||
|
|
||||||
|
when:
|
||||||
|
TraceScope scope = continuation.activate()
|
||||||
|
sleep(SLEEP_DURATION.toMillis())
|
||||||
|
scope.close()
|
||||||
|
def events = JfrHelper.stopRecording(recording)
|
||||||
|
span.finish()
|
||||||
|
|
||||||
|
then:
|
||||||
|
events.size() == 1
|
||||||
|
def event = events[0]
|
||||||
|
event.eventType.name == "datadog.Scope"
|
||||||
|
event.duration >= SLEEP_DURATION
|
||||||
|
event.getString("traceId") == span.context().traceId.toString(IDS_RADIX)
|
||||||
|
event.getString("spanId") == span.context().spanId.toString(IDS_RADIX)
|
||||||
|
event.getString("parentId") == span.context().parentId.toString(IDS_RADIX)
|
||||||
|
event.getString("serviceName") == "test service"
|
||||||
|
event.getString("resourceName") == "test resource"
|
||||||
|
event.getString("operationName") == "test operation"
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,6 +2,8 @@ package datadog.opentracing;
|
||||||
|
|
||||||
import datadog.opentracing.decorators.AbstractDecorator;
|
import datadog.opentracing.decorators.AbstractDecorator;
|
||||||
import datadog.opentracing.decorators.DDDecoratorsFactory;
|
import datadog.opentracing.decorators.DDDecoratorsFactory;
|
||||||
|
import datadog.opentracing.jfr.DDNoopScopeEventFactory;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEventFactory;
|
||||||
import datadog.opentracing.propagation.ExtractedContext;
|
import datadog.opentracing.propagation.ExtractedContext;
|
||||||
import datadog.opentracing.propagation.HttpCodec;
|
import datadog.opentracing.propagation.HttpCodec;
|
||||||
import datadog.opentracing.propagation.TagContext;
|
import datadog.opentracing.propagation.TagContext;
|
||||||
|
@ -115,7 +117,8 @@ public class DDTracer implements io.opentracing.Tracer, Closeable, datadog.trace
|
||||||
sampler(Sampler.Builder.forConfig(config));
|
sampler(Sampler.Builder.forConfig(config));
|
||||||
injector(HttpCodec.createInjector(config));
|
injector(HttpCodec.createInjector(config));
|
||||||
extractor(HttpCodec.createExtractor(config, config.getHeaderTags()));
|
extractor(HttpCodec.createExtractor(config, config.getHeaderTags()));
|
||||||
scopeManager(new ContextualScopeManager(config.getScopeDepthLimit()));
|
scopeManager(
|
||||||
|
new ContextualScopeManager(config.getScopeDepthLimit(), createScopeEventFactory()));
|
||||||
localRootSpanTags(config.getLocalRootSpanTags());
|
localRootSpanTags(config.getLocalRootSpanTags());
|
||||||
defaultSpanTags(config.getMergedSpanTags());
|
defaultSpanTags(config.getMergedSpanTags());
|
||||||
serviceNameMappings(config.getServiceMapping());
|
serviceNameMappings(config.getServiceMapping());
|
||||||
|
@ -260,7 +263,7 @@ public class DDTracer implements io.opentracing.Tracer, Closeable, datadog.trace
|
||||||
sampler,
|
sampler,
|
||||||
HttpCodec.createInjector(Config.get()),
|
HttpCodec.createInjector(Config.get()),
|
||||||
HttpCodec.createExtractor(Config.get(), taggedHeaders),
|
HttpCodec.createExtractor(Config.get(), taggedHeaders),
|
||||||
new ContextualScopeManager(Config.get().getScopeDepthLimit()),
|
new ContextualScopeManager(Config.get().getScopeDepthLimit(), createScopeEventFactory()),
|
||||||
localRootSpanTags,
|
localRootSpanTags,
|
||||||
defaultSpanTags,
|
defaultSpanTags,
|
||||||
serviceNameMappings,
|
serviceNameMappings,
|
||||||
|
@ -551,6 +554,16 @@ public class DDTracer implements io.opentracing.Tracer, Closeable, datadog.trace
|
||||||
return Collections.unmodifiableMap(runtimeTags);
|
return Collections.unmodifiableMap(runtimeTags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static DDScopeEventFactory createScopeEventFactory() {
|
||||||
|
try {
|
||||||
|
return (DDScopeEventFactory)
|
||||||
|
Class.forName("datadog.opentracing.jfr.openjdk.ScopeEventFactory").newInstance();
|
||||||
|
} catch (final ClassFormatError | ReflectiveOperationException | NoClassDefFoundError e) {
|
||||||
|
log.debug("Cannot create Openjdk JFR scope event factory", e);
|
||||||
|
}
|
||||||
|
return new DDNoopScopeEventFactory();
|
||||||
|
}
|
||||||
|
|
||||||
/** Spans are built using this builder */
|
/** Spans are built using this builder */
|
||||||
public class DDSpanBuilder implements SpanBuilder {
|
public class DDSpanBuilder implements SpanBuilder {
|
||||||
private final ScopeManager scopeManager;
|
private final ScopeManager scopeManager;
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
package datadog.opentracing.jfr;
|
||||||
|
|
||||||
|
/** Scope event implementation that does no reporting */
|
||||||
|
public final class DDNoopScopeEvent implements DDScopeEvent {
|
||||||
|
|
||||||
|
public static final DDNoopScopeEvent INSTANCE = new DDNoopScopeEvent();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void start() {
|
||||||
|
// Noop
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void finish() {
|
||||||
|
// Noop
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
package datadog.opentracing.jfr;
|
||||||
|
|
||||||
|
import datadog.opentracing.DDSpanContext;
|
||||||
|
|
||||||
|
/** Event factory that returns {@link DDNoopScopeEvent} */
|
||||||
|
public final class DDNoopScopeEventFactory implements DDScopeEventFactory {
|
||||||
|
@Override
|
||||||
|
public DDScopeEvent create(final DDSpanContext context) {
|
||||||
|
return DDNoopScopeEvent.INSTANCE;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
package datadog.opentracing.jfr;
|
||||||
|
|
||||||
|
/** Scope event */
|
||||||
|
public interface DDScopeEvent {
|
||||||
|
|
||||||
|
void start();
|
||||||
|
|
||||||
|
void finish();
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package datadog.opentracing.jfr;
|
||||||
|
|
||||||
|
import datadog.opentracing.DDSpanContext;
|
||||||
|
|
||||||
|
/** Factory that produces scope events */
|
||||||
|
public interface DDScopeEventFactory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new scope event for given context.
|
||||||
|
*
|
||||||
|
* @param context span context.
|
||||||
|
* @return scope event instance
|
||||||
|
*/
|
||||||
|
DDScopeEvent create(final DDSpanContext context);
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package datadog.opentracing.scopemanager;
|
package datadog.opentracing.scopemanager;
|
||||||
|
|
||||||
import datadog.opentracing.DDSpan;
|
import datadog.opentracing.DDSpan;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEventFactory;
|
||||||
import datadog.trace.context.ScopeListener;
|
import datadog.trace.context.ScopeListener;
|
||||||
import io.opentracing.Scope;
|
import io.opentracing.Scope;
|
||||||
import io.opentracing.ScopeManager;
|
import io.opentracing.ScopeManager;
|
||||||
|
@ -19,9 +20,11 @@ public class ContextualScopeManager implements ScopeManager {
|
||||||
final List<ScopeListener> scopeListeners = new CopyOnWriteArrayList<>();
|
final List<ScopeListener> scopeListeners = new CopyOnWriteArrayList<>();
|
||||||
|
|
||||||
private final int depthLimit;
|
private final int depthLimit;
|
||||||
|
private final DDScopeEventFactory scopeEventFactory;
|
||||||
|
|
||||||
public ContextualScopeManager(final int depthLimit) {
|
public ContextualScopeManager(final int depthLimit, final DDScopeEventFactory scopeEventFactory) {
|
||||||
this.depthLimit = depthLimit;
|
this.depthLimit = depthLimit;
|
||||||
|
this.scopeEventFactory = scopeEventFactory;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -40,7 +43,7 @@ public class ContextualScopeManager implements ScopeManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (span instanceof DDSpan) {
|
if (span instanceof DDSpan) {
|
||||||
return new ContinuableScope(this, (DDSpan) span, finishOnClose);
|
return new ContinuableScope(this, (DDSpan) span, finishOnClose, scopeEventFactory);
|
||||||
} else {
|
} else {
|
||||||
return new SimpleScope(this, span, finishOnClose);
|
return new SimpleScope(this, span, finishOnClose);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,8 @@ package datadog.opentracing.scopemanager;
|
||||||
import datadog.opentracing.DDSpan;
|
import datadog.opentracing.DDSpan;
|
||||||
import datadog.opentracing.DDSpanContext;
|
import datadog.opentracing.DDSpanContext;
|
||||||
import datadog.opentracing.PendingTrace;
|
import datadog.opentracing.PendingTrace;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEvent;
|
||||||
|
import datadog.opentracing.jfr.DDScopeEventFactory;
|
||||||
import datadog.trace.context.ScopeListener;
|
import datadog.trace.context.ScopeListener;
|
||||||
import datadog.trace.context.TraceScope;
|
import datadog.trace.context.TraceScope;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -19,6 +21,10 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
* Span contained by this scope. Async scopes will hold a reference to the parent scope's span.
|
* Span contained by this scope. Async scopes will hold a reference to the parent scope's span.
|
||||||
*/
|
*/
|
||||||
private final DDSpan spanUnderScope;
|
private final DDSpan spanUnderScope;
|
||||||
|
|
||||||
|
private final DDScopeEventFactory eventFactory;
|
||||||
|
/** Event for this scope */
|
||||||
|
private final DDScopeEvent event;
|
||||||
/** If true, finish the span when openCount hits 0. */
|
/** If true, finish the span when openCount hits 0. */
|
||||||
private final boolean finishOnClose;
|
private final boolean finishOnClose;
|
||||||
/** Count of open scope and continuations */
|
/** Count of open scope and continuations */
|
||||||
|
@ -35,8 +41,9 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
ContinuableScope(
|
ContinuableScope(
|
||||||
final ContextualScopeManager scopeManager,
|
final ContextualScopeManager scopeManager,
|
||||||
final DDSpan spanUnderScope,
|
final DDSpan spanUnderScope,
|
||||||
final boolean finishOnClose) {
|
final boolean finishOnClose,
|
||||||
this(scopeManager, new AtomicInteger(1), null, spanUnderScope, finishOnClose);
|
final DDScopeEventFactory eventFactory) {
|
||||||
|
this(scopeManager, new AtomicInteger(1), null, spanUnderScope, finishOnClose, eventFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
private ContinuableScope(
|
private ContinuableScope(
|
||||||
|
@ -44,13 +51,17 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
final AtomicInteger openCount,
|
final AtomicInteger openCount,
|
||||||
final Continuation continuation,
|
final Continuation continuation,
|
||||||
final DDSpan spanUnderScope,
|
final DDSpan spanUnderScope,
|
||||||
final boolean finishOnClose) {
|
final boolean finishOnClose,
|
||||||
|
final DDScopeEventFactory eventFactory) {
|
||||||
assert spanUnderScope != null : "span must not be null";
|
assert spanUnderScope != null : "span must not be null";
|
||||||
this.scopeManager = scopeManager;
|
this.scopeManager = scopeManager;
|
||||||
this.openCount = openCount;
|
this.openCount = openCount;
|
||||||
this.continuation = continuation;
|
this.continuation = continuation;
|
||||||
this.spanUnderScope = spanUnderScope;
|
this.spanUnderScope = spanUnderScope;
|
||||||
this.finishOnClose = finishOnClose;
|
this.finishOnClose = finishOnClose;
|
||||||
|
this.eventFactory = eventFactory;
|
||||||
|
event = eventFactory.create(spanUnderScope.context());
|
||||||
|
event.start();
|
||||||
toRestore = scopeManager.tlsScope.get();
|
toRestore = scopeManager.tlsScope.get();
|
||||||
scopeManager.tlsScope.set(this);
|
scopeManager.tlsScope.set(this);
|
||||||
depth = toRestore == null ? 0 : toRestore.depth() + 1;
|
depth = toRestore == null ? 0 : toRestore.depth() + 1;
|
||||||
|
@ -61,6 +72,11 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
|
// We have to scope finish event before we finish then span (which finishes span event).
|
||||||
|
// The reason is that we get span on construction and span event starts when span is created.
|
||||||
|
// This means from JFR perspective scope is included into the span.
|
||||||
|
event.finish();
|
||||||
|
|
||||||
if (null != continuation) {
|
if (null != continuation) {
|
||||||
spanUnderScope.context().getTrace().cancelContinuation(continuation);
|
spanUnderScope.context().getTrace().cancelContinuation(continuation);
|
||||||
}
|
}
|
||||||
|
@ -135,7 +151,7 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
|
|
||||||
private Continuation() {
|
private Continuation() {
|
||||||
openCount.incrementAndGet();
|
openCount.incrementAndGet();
|
||||||
final DDSpanContext context = (DDSpanContext) spanUnderScope.context();
|
final DDSpanContext context = spanUnderScope.context();
|
||||||
trace = context.getTrace();
|
trace = context.getTrace();
|
||||||
trace.registerContinuation(this);
|
trace.registerContinuation(this);
|
||||||
}
|
}
|
||||||
|
@ -144,14 +160,15 @@ public class ContinuableScope implements DDScope, TraceScope {
|
||||||
public ContinuableScope activate() {
|
public ContinuableScope activate() {
|
||||||
if (used.compareAndSet(false, true)) {
|
if (used.compareAndSet(false, true)) {
|
||||||
final ContinuableScope scope =
|
final ContinuableScope scope =
|
||||||
new ContinuableScope(scopeManager, openCount, this, spanUnderScope, finishOnClose);
|
new ContinuableScope(
|
||||||
|
scopeManager, openCount, this, spanUnderScope, finishOnClose, eventFactory);
|
||||||
log.debug("Activating continuation {}, scope: {}", this, scope);
|
log.debug("Activating continuation {}, scope: {}", this, scope);
|
||||||
return scope;
|
return scope;
|
||||||
} else {
|
} else {
|
||||||
log.debug(
|
log.debug(
|
||||||
"Failed to activate continuation. Reusing a continuation not allowed. Returning a new scope. Spans will not be linked.");
|
"Failed to activate continuation. Reusing a continuation not allowed. Returning a new scope. Spans will not be linked.");
|
||||||
return new ContinuableScope(
|
return new ContinuableScope(
|
||||||
scopeManager, new AtomicInteger(1), null, spanUnderScope, finishOnClose);
|
scopeManager, new AtomicInteger(1), null, spanUnderScope, finishOnClose, eventFactory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ ext {
|
||||||
|
|
||||||
spock : "1.3-groovy-$spockGroovyVer",
|
spock : "1.3-groovy-$spockGroovyVer",
|
||||||
groovy : groovyVer,
|
groovy : groovyVer,
|
||||||
|
junit5 : "5.5.2",
|
||||||
logback : "1.2.3",
|
logback : "1.2.3",
|
||||||
lombok : "1.18.10",
|
lombok : "1.18.10",
|
||||||
bytebuddy : "1.10.6",
|
bytebuddy : "1.10.6",
|
||||||
|
@ -52,6 +53,7 @@ ext {
|
||||||
dependencies.create(group: 'org.objenesis', name: 'objenesis', version: '2.6') // Last version to support Java7
|
dependencies.create(group: 'org.objenesis', name: 'objenesis', version: '2.6') // Last version to support Java7
|
||||||
],
|
],
|
||||||
groovy : "org.codehaus.groovy:groovy-all:${versions.groovy}",
|
groovy : "org.codehaus.groovy:groovy-all:${versions.groovy}",
|
||||||
|
junit5 : "org.junit.jupiter:junit-jupiter:${versions.junit5}",
|
||||||
testcontainers : "org.testcontainers:testcontainers:1.12.2",
|
testcontainers : "org.testcontainers:testcontainers:1.12.2",
|
||||||
testLogging : [
|
testLogging : [
|
||||||
dependencies.create(group: 'ch.qos.logback', name: 'logback-classic', version: versions.logback),
|
dependencies.create(group: 'ch.qos.logback', name: 'logback-classic', version: versions.logback),
|
||||||
|
|
|
@ -123,6 +123,9 @@ repositories {
|
||||||
mavenLocal()
|
mavenLocal()
|
||||||
jcenter()
|
jcenter()
|
||||||
mavenCentral()
|
mavenCentral()
|
||||||
|
maven {
|
||||||
|
url "https://adoptopenjdk.jfrog.io/adoptopenjdk/jmc-libs-snapshots"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dependencies {
|
dependencies {
|
||||||
|
@ -336,6 +339,10 @@ for (def env : System.getenv().entrySet()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.withType(Test).configureEach {
|
tasks.withType(Test).configureEach {
|
||||||
|
if (project.findProperty("enableJunitPlatform") == true) {
|
||||||
|
useJUnitPlatform()
|
||||||
|
}
|
||||||
|
|
||||||
// All tests must complete within 3 minutes.
|
// All tests must complete within 3 minutes.
|
||||||
timeout = Duration.ofMinutes(3)
|
timeout = Duration.ofMinutes(3)
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ rootProject.name = 'dd-trace-java'
|
||||||
// external apis
|
// external apis
|
||||||
include ':dd-trace-api'
|
include ':dd-trace-api'
|
||||||
include ':dd-trace-ot'
|
include ':dd-trace-ot'
|
||||||
|
include ':dd-trace-ot:jfr-openjdk'
|
||||||
|
|
||||||
// agent projects
|
// agent projects
|
||||||
include ':dd-java-agent'
|
include ':dd-java-agent'
|
||||||
|
@ -29,6 +30,12 @@ include ':dd-java-agent:agent-tooling'
|
||||||
include ':dd-java-agent:agent-jmxfetch'
|
include ':dd-java-agent:agent-jmxfetch'
|
||||||
include ':dd-java-agent:load-generator'
|
include ':dd-java-agent:load-generator'
|
||||||
|
|
||||||
|
include ':dd-java-agent:agent-profiling'
|
||||||
|
include ':dd-java-agent:agent-profiling:profiling-controller'
|
||||||
|
include ':dd-java-agent:agent-profiling:profiling-controller-openjdk'
|
||||||
|
include ':dd-java-agent:agent-profiling:profiling-uploader'
|
||||||
|
include ':dd-java-agent:agent-profiling:profiling-testing'
|
||||||
|
|
||||||
// misc
|
// misc
|
||||||
include ':dd-java-agent:testing'
|
include ':dd-java-agent:testing'
|
||||||
include ':utils:test-utils'
|
include ':utils:test-utils'
|
||||||
|
@ -40,6 +47,7 @@ include ':dd-smoke-tests:java9-modules'
|
||||||
include ':dd-smoke-tests:play'
|
include ':dd-smoke-tests:play'
|
||||||
include ':dd-smoke-tests:springboot'
|
include ':dd-smoke-tests:springboot'
|
||||||
include ':dd-smoke-tests:wildfly'
|
include ':dd-smoke-tests:wildfly'
|
||||||
|
include ':dd-smoke-tests:profiling-integration-tests'
|
||||||
|
|
||||||
// instrumentation:
|
// instrumentation:
|
||||||
include ':dd-java-agent:instrumentation:akka-http-10.0'
|
include ':dd-java-agent:instrumentation:akka-http-10.0'
|
||||||
|
|
Loading…
Reference in New Issue