mirror of https://github.com/grpc/grpc-java.git
interop-testing: Use separate event loops in RetryTest
The RetryTest was flaky, and it seems to have been caused by the client and server getting assigned to the same event loop. Separating the two reduces the flake rate from ~3% to less than 0.1% (no flakes in a 1000). While I was here fixing the executors, I reduced the number of threads created and shut down the threads after they are no longer used. This had no impact to the flake rate (no flakes in 1000).
This commit is contained in:
parent
7ba0718bb9
commit
95b847e799
|
|
@ -77,6 +77,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.junit.After;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
|
@ -110,7 +111,7 @@ public class RetryTest {
|
|||
mock(ClientCall.Listener.class, delegatesTo(testCallListener));
|
||||
|
||||
private CountDownLatch backoffLatch = new CountDownLatch(1);
|
||||
private final EventLoopGroup group = new DefaultEventLoopGroup() {
|
||||
private final EventLoopGroup clientGroup = new DefaultEventLoopGroup(1) {
|
||||
@SuppressWarnings("FutureReturnValueIgnored")
|
||||
@Override
|
||||
public ScheduledFuture<?> schedule(
|
||||
|
|
@ -122,7 +123,7 @@ public class RetryTest {
|
|||
new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
group.execute(command);
|
||||
clientGroup.execute(command);
|
||||
}
|
||||
},
|
||||
delay,
|
||||
|
|
@ -137,6 +138,7 @@ public class RetryTest {
|
|||
TimeUnit.NANOSECONDS);
|
||||
}
|
||||
};
|
||||
private final EventLoopGroup serverGroup = new DefaultEventLoopGroup(1);
|
||||
private final FakeStatsRecorder clientStatsRecorder = new FakeStatsRecorder();
|
||||
private final ClientInterceptor statsInterceptor =
|
||||
InternalCensusStatsAccessor.getClientInterceptor(
|
||||
|
|
@ -173,11 +175,18 @@ public class RetryTest {
|
|||
private Map<String, Object> retryPolicy = null;
|
||||
private long bufferLimit = 1L << 20; // 1M
|
||||
|
||||
@After
|
||||
@SuppressWarnings("FutureReturnValueIgnored")
|
||||
public void tearDown() {
|
||||
clientGroup.shutdownGracefully();
|
||||
serverGroup.shutdownGracefully();
|
||||
}
|
||||
|
||||
private void startNewServer() throws Exception {
|
||||
localServer = cleanupRule.register(NettyServerBuilder.forAddress(localAddress)
|
||||
.channelType(LocalServerChannel.class)
|
||||
.bossEventLoopGroup(group)
|
||||
.workerEventLoopGroup(group)
|
||||
.bossEventLoopGroup(serverGroup)
|
||||
.workerEventLoopGroup(serverGroup)
|
||||
.addService(serviceDefinition)
|
||||
.build());
|
||||
localServer.start();
|
||||
|
|
@ -196,7 +205,7 @@ public class RetryTest {
|
|||
channel = cleanupRule.register(
|
||||
NettyChannelBuilder.forAddress(localAddress)
|
||||
.channelType(LocalChannel.class, LocalAddress.class)
|
||||
.eventLoopGroup(group)
|
||||
.eventLoopGroup(clientGroup)
|
||||
.usePlaintext()
|
||||
.enableRetry()
|
||||
.perRpcBufferLimit(bufferLimit)
|
||||
|
|
|
|||
Loading…
Reference in New Issue