mirror of https://github.com/grpc/grpc-java.git
xds: Unconditionally apply backoff on ADS stream recreation
This would limit ADS stream creation to one per second, even if the old stream was considered good as it received a response. This shouldn't really ever trigger, and if it does 1 QPS is probably still too high. But 1 QPS is _substantially_ better than a closed loop and there's very few additional signals we could use to avoid resetting the backoff. b/224833499
This commit is contained in:
parent
72ae95792c
commit
957079194a
|
|
@ -470,14 +470,9 @@ final class AbstractXdsClient {
|
||||||
// has never been initialized.
|
// has never been initialized.
|
||||||
retryBackoffPolicy = backoffPolicyProvider.get();
|
retryBackoffPolicy = backoffPolicyProvider.get();
|
||||||
}
|
}
|
||||||
long delayNanos = 0;
|
long delayNanos = Math.max(
|
||||||
if (!responseReceived) {
|
0,
|
||||||
delayNanos =
|
retryBackoffPolicy.nextBackoffNanos() - stopwatch.elapsed(TimeUnit.NANOSECONDS));
|
||||||
Math.max(
|
|
||||||
0,
|
|
||||||
retryBackoffPolicy.nextBackoffNanos()
|
|
||||||
- stopwatch.elapsed(TimeUnit.NANOSECONDS));
|
|
||||||
}
|
|
||||||
logger.log(XdsLogLevel.INFO, "Retry ADS stream in {0} ns", delayNanos);
|
logger.log(XdsLogLevel.INFO, "Retry ADS stream in {0} ns", delayNanos);
|
||||||
rpcRetryTimer = syncContext.schedule(
|
rpcRetryTimer = syncContext.schedule(
|
||||||
new RpcRetryTask(), delayNanos, TimeUnit.NANOSECONDS, timeService);
|
new RpcRetryTask(), delayNanos, TimeUnit.NANOSECONDS, timeService);
|
||||||
|
|
|
||||||
|
|
@ -2526,9 +2526,13 @@ public abstract class ClientXdsClientTestBase {
|
||||||
verify(edsResourceWatcher, times(3)).onError(errorCaptor.capture());
|
verify(edsResourceWatcher, times(3)).onError(errorCaptor.capture());
|
||||||
assertThat(errorCaptor.getValue().getCode()).isEqualTo(Code.DEADLINE_EXCEEDED);
|
assertThat(errorCaptor.getValue().getCode()).isEqualTo(Code.DEADLINE_EXCEEDED);
|
||||||
|
|
||||||
// Reset backoff sequence and retry immediately.
|
// Reset backoff sequence and retry after backoff.
|
||||||
inOrder.verify(backoffPolicyProvider).get();
|
inOrder.verify(backoffPolicyProvider).get();
|
||||||
fakeClock.runDueTasks();
|
inOrder.verify(backoffPolicy2).nextBackoffNanos();
|
||||||
|
retryTask =
|
||||||
|
Iterables.getOnlyElement(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER));
|
||||||
|
assertThat(retryTask.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(20L);
|
||||||
|
fakeClock.forwardNanos(20L);
|
||||||
call = resourceDiscoveryCalls.poll();
|
call = resourceDiscoveryCalls.poll();
|
||||||
call.verifyRequest(LDS, LDS_RESOURCE, "63", "", NODE);
|
call.verifyRequest(LDS, LDS_RESOURCE, "63", "", NODE);
|
||||||
call.verifyRequest(RDS, RDS_RESOURCE, "5", "", NODE);
|
call.verifyRequest(RDS, RDS_RESOURCE, "5", "", NODE);
|
||||||
|
|
@ -2550,8 +2554,8 @@ public abstract class ClientXdsClientTestBase {
|
||||||
inOrder.verify(backoffPolicy2).nextBackoffNanos();
|
inOrder.verify(backoffPolicy2).nextBackoffNanos();
|
||||||
retryTask =
|
retryTask =
|
||||||
Iterables.getOnlyElement(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER));
|
Iterables.getOnlyElement(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER));
|
||||||
assertThat(retryTask.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(20L);
|
assertThat(retryTask.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(200L);
|
||||||
fakeClock.forwardNanos(20L);
|
fakeClock.forwardNanos(200L);
|
||||||
call = resourceDiscoveryCalls.poll();
|
call = resourceDiscoveryCalls.poll();
|
||||||
call.verifyRequest(LDS, LDS_RESOURCE, "63", "", NODE);
|
call.verifyRequest(LDS, LDS_RESOURCE, "63", "", NODE);
|
||||||
call.verifyRequest(RDS, RDS_RESOURCE, "5", "", NODE);
|
call.verifyRequest(RDS, RDS_RESOURCE, "5", "", NODE);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue