remove shadowing of netty && grpc

Signed-off-by: iosmanthus <myosmanthustree@gmail.com>
This commit is contained in:
iosmanthus 2022-11-23 15:02:06 +08:00
parent 5d0ec62e75
commit 60f635ed9c
No known key found for this signature in database
GPG Key ID: DEE5BAABFE092169
13 changed files with 0 additions and 10523 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,854 +0,0 @@
/*
* Copyright 2014 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.internal;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
import static io.grpc.Contexts.statusFromCancelled;
import static io.grpc.Status.DEADLINE_EXCEEDED;
import static io.grpc.internal.GrpcUtil.CONTENT_ACCEPT_ENCODING_KEY;
import static io.grpc.internal.GrpcUtil.CONTENT_ENCODING_KEY;
import static io.grpc.internal.GrpcUtil.CONTENT_LENGTH_KEY;
import static io.grpc.internal.GrpcUtil.MESSAGE_ACCEPT_ENCODING_KEY;
import static io.grpc.internal.GrpcUtil.MESSAGE_ENCODING_KEY;
import static java.lang.Math.max;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import io.grpc.Attributes;
import io.grpc.CallOptions;
import io.grpc.ClientCall;
import io.grpc.ClientStreamTracer;
import io.grpc.Codec;
import io.grpc.Compressor;
import io.grpc.CompressorRegistry;
import io.grpc.Context;
import io.grpc.Context.CancellationListener;
import io.grpc.Deadline;
import io.grpc.DecompressorRegistry;
import io.grpc.InternalConfigSelector;
import io.grpc.InternalDecompressorRegistry;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
import io.grpc.Status;
import io.grpc.internal.ManagedChannelServiceConfig.MethodInfo;
import io.perfmark.Link;
import io.perfmark.PerfMark;
import io.perfmark.Tag;
import io.prometheus.client.Histogram;
import java.io.InputStream;
import java.nio.charset.Charset;
import java.util.Locale;
import java.util.concurrent.CancellationException;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
import org.tikv.common.util.HistogramUtils;
/** Implementation of {@link ClientCall}. */
final class ClientCallImpl<ReqT, RespT> extends ClientCall<ReqT, RespT> {
public static final Histogram perfmarkClientCallImplDuration =
HistogramUtils.buildDuration()
.name("perfmark_client_call_impl_duration_seconds")
.help("Perfmark client call impl duration seconds")
.labelNames("type")
.register();
private static final Logger log = Logger.getLogger(ClientCallImpl.class.getName());
private static final byte[] FULL_STREAM_DECOMPRESSION_ENCODINGS =
"gzip".getBytes(Charset.forName("US-ASCII"));
private final MethodDescriptor<ReqT, RespT> method;
private final Tag tag;
private final Executor callExecutor;
private final boolean callExecutorIsDirect;
private final CallTracer channelCallsTracer;
private final Context context;
private volatile ScheduledFuture<?> deadlineCancellationFuture;
private final boolean unaryRequest;
private CallOptions callOptions;
private ClientStream stream;
private volatile boolean cancelListenersShouldBeRemoved;
private boolean cancelCalled;
private boolean halfCloseCalled;
private final ClientStreamProvider clientStreamProvider;
private final ContextCancellationListener cancellationListener =
new ContextCancellationListener();
private final ScheduledExecutorService deadlineCancellationExecutor;
private boolean fullStreamDecompression;
private DecompressorRegistry decompressorRegistry = DecompressorRegistry.getDefaultInstance();
private CompressorRegistry compressorRegistry = CompressorRegistry.getDefaultInstance();
ClientCallImpl(
MethodDescriptor<ReqT, RespT> method,
Executor executor,
CallOptions callOptions,
ClientStreamProvider clientStreamProvider,
ScheduledExecutorService deadlineCancellationExecutor,
CallTracer channelCallsTracer,
// TODO(zdapeng): remove this arg
@Nullable InternalConfigSelector configSelector) {
this.method = method;
// TODO(carl-mastrangelo): consider moving this construction to ManagedChannelImpl.
this.tag = PerfMark.createTag(method.getFullMethodName(), System.identityHashCode(this));
// If we know that the executor is a direct executor, we don't need to wrap it with a
// SerializingExecutor. This is purely for performance reasons.
// See https://github.com/grpc/grpc-java/issues/368
if (executor == directExecutor()) {
this.callExecutor = new SerializeReentrantCallsDirectExecutor();
callExecutorIsDirect = true;
} else {
this.callExecutor = new SerializingExecutor(executor);
callExecutorIsDirect = false;
}
this.channelCallsTracer = channelCallsTracer;
// Propagate the context from the thread which initiated the call to all callbacks.
this.context = Context.current();
this.unaryRequest =
method.getType() == MethodType.UNARY || method.getType() == MethodType.SERVER_STREAMING;
this.callOptions = callOptions;
this.clientStreamProvider = clientStreamProvider;
this.deadlineCancellationExecutor = deadlineCancellationExecutor;
PerfMark.event("ClientCall.<init>", tag);
}
private final class ContextCancellationListener implements CancellationListener {
@Override
public void cancelled(Context context) {
stream.cancel(statusFromCancelled(context));
}
}
/** Provider of {@link ClientStream}s. */
interface ClientStreamProvider {
ClientStream newStream(
MethodDescriptor<?, ?> method, CallOptions callOptions, Metadata headers, Context context);
}
ClientCallImpl<ReqT, RespT> setFullStreamDecompression(boolean fullStreamDecompression) {
this.fullStreamDecompression = fullStreamDecompression;
return this;
}
ClientCallImpl<ReqT, RespT> setDecompressorRegistry(DecompressorRegistry decompressorRegistry) {
this.decompressorRegistry = decompressorRegistry;
return this;
}
ClientCallImpl<ReqT, RespT> setCompressorRegistry(CompressorRegistry compressorRegistry) {
this.compressorRegistry = compressorRegistry;
return this;
}
@VisibleForTesting
static void prepareHeaders(
Metadata headers,
DecompressorRegistry decompressorRegistry,
Compressor compressor,
boolean fullStreamDecompression) {
headers.discardAll(CONTENT_LENGTH_KEY);
headers.discardAll(MESSAGE_ENCODING_KEY);
if (compressor != Codec.Identity.NONE) {
headers.put(MESSAGE_ENCODING_KEY, compressor.getMessageEncoding());
}
headers.discardAll(MESSAGE_ACCEPT_ENCODING_KEY);
byte[] advertisedEncodings =
InternalDecompressorRegistry.getRawAdvertisedMessageEncodings(decompressorRegistry);
if (advertisedEncodings.length != 0) {
headers.put(MESSAGE_ACCEPT_ENCODING_KEY, advertisedEncodings);
}
headers.discardAll(CONTENT_ENCODING_KEY);
headers.discardAll(CONTENT_ACCEPT_ENCODING_KEY);
if (fullStreamDecompression) {
headers.put(CONTENT_ACCEPT_ENCODING_KEY, FULL_STREAM_DECOMPRESSION_ENCODINGS);
}
}
@Override
public void start(Listener<RespT> observer, Metadata headers) {
PerfMark.startTask("ClientCall.start", tag);
Histogram.Timer start = perfmarkClientCallImplDuration.labels("ClientCall.start").startTimer();
try {
startInternal(observer, headers);
} finally {
PerfMark.stopTask("ClientCall.start", tag);
start.observeDuration();
}
}
private void startInternal(Listener<RespT> observer, Metadata headers) {
checkState(stream == null, "Already started");
checkState(!cancelCalled, "call was cancelled");
checkNotNull(observer, "observer");
checkNotNull(headers, "headers");
if (context.isCancelled()) {
// Context is already cancelled so no need to create a real stream, just notify the observer
// of cancellation via callback on the executor
stream = NoopClientStream.INSTANCE;
final Listener<RespT> finalObserver = observer;
class ClosedByContext extends ContextRunnable {
ClosedByContext() {
super(context);
}
@Override
public void runInContext() {
closeObserver(finalObserver, statusFromCancelled(context), new Metadata());
}
}
callExecutor.execute(new ClosedByContext());
return;
}
applyMethodConfig();
final String compressorName = callOptions.getCompressor();
Compressor compressor;
if (compressorName != null) {
compressor = compressorRegistry.lookupCompressor(compressorName);
if (compressor == null) {
stream = NoopClientStream.INSTANCE;
final Listener<RespT> finalObserver = observer;
class ClosedByNotFoundCompressor extends ContextRunnable {
ClosedByNotFoundCompressor() {
super(context);
}
@Override
public void runInContext() {
closeObserver(
finalObserver,
Status.INTERNAL.withDescription(
String.format("Unable to find compressor by name %s", compressorName)),
new Metadata());
}
}
callExecutor.execute(new ClosedByNotFoundCompressor());
return;
}
} else {
compressor = Codec.Identity.NONE;
}
prepareHeaders(headers, decompressorRegistry, compressor, fullStreamDecompression);
Deadline effectiveDeadline = effectiveDeadline();
boolean deadlineExceeded = effectiveDeadline != null && effectiveDeadline.isExpired();
if (!deadlineExceeded) {
logIfContextNarrowedTimeout(
effectiveDeadline, context.getDeadline(), callOptions.getDeadline());
stream = clientStreamProvider.newStream(method, callOptions, headers, context);
} else {
ClientStreamTracer[] tracers =
GrpcUtil.getClientStreamTracers(callOptions, headers, 0, false);
stream =
new FailingClientStream(
DEADLINE_EXCEEDED.withDescription(
"ClientCall started after deadline exceeded: " + effectiveDeadline),
tracers);
}
if (callExecutorIsDirect) {
stream.optimizeForDirectExecutor();
}
if (callOptions.getAuthority() != null) {
stream.setAuthority(callOptions.getAuthority());
}
if (callOptions.getMaxInboundMessageSize() != null) {
stream.setMaxInboundMessageSize(callOptions.getMaxInboundMessageSize());
}
if (callOptions.getMaxOutboundMessageSize() != null) {
stream.setMaxOutboundMessageSize(callOptions.getMaxOutboundMessageSize());
}
if (effectiveDeadline != null) {
stream.setDeadline(effectiveDeadline);
}
stream.setCompressor(compressor);
if (fullStreamDecompression) {
stream.setFullStreamDecompression(fullStreamDecompression);
}
stream.setDecompressorRegistry(decompressorRegistry);
channelCallsTracer.reportCallStarted();
stream.start(new ClientStreamListenerImpl(observer));
// Delay any sources of cancellation after start(), because most of the transports are broken if
// they receive cancel before start. Issue #1343 has more details
// Propagate later Context cancellation to the remote side.
context.addListener(cancellationListener, directExecutor());
if (effectiveDeadline != null
// If the context has the effective deadline, we don't need to schedule an extra task.
&& !effectiveDeadline.equals(context.getDeadline())
// If the channel has been terminated, we don't need to schedule an extra task.
&& deadlineCancellationExecutor != null) {
deadlineCancellationFuture = startDeadlineTimer(effectiveDeadline);
}
if (cancelListenersShouldBeRemoved) {
// Race detected! ClientStreamListener.closed may have been called before
// deadlineCancellationFuture was set / context listener added, thereby preventing the future
// and listener from being cancelled. Go ahead and cancel again, just to be sure it
// was cancelled.
removeContextListenerAndCancelDeadlineFuture();
}
}
private void applyMethodConfig() {
MethodInfo info = callOptions.getOption(MethodInfo.KEY);
if (info == null) {
return;
}
if (info.timeoutNanos != null) {
Deadline newDeadline = Deadline.after(info.timeoutNanos, TimeUnit.NANOSECONDS);
Deadline existingDeadline = callOptions.getDeadline();
// If the new deadline is sooner than the existing deadline, swap them.
if (existingDeadline == null || newDeadline.compareTo(existingDeadline) < 0) {
callOptions = callOptions.withDeadline(newDeadline);
}
}
if (info.waitForReady != null) {
callOptions =
info.waitForReady ? callOptions.withWaitForReady() : callOptions.withoutWaitForReady();
}
if (info.maxInboundMessageSize != null) {
Integer existingLimit = callOptions.getMaxInboundMessageSize();
if (existingLimit != null) {
callOptions =
callOptions.withMaxInboundMessageSize(
Math.min(existingLimit, info.maxInboundMessageSize));
} else {
callOptions = callOptions.withMaxInboundMessageSize(info.maxInboundMessageSize);
}
}
if (info.maxOutboundMessageSize != null) {
Integer existingLimit = callOptions.getMaxOutboundMessageSize();
if (existingLimit != null) {
callOptions =
callOptions.withMaxOutboundMessageSize(
Math.min(existingLimit, info.maxOutboundMessageSize));
} else {
callOptions = callOptions.withMaxOutboundMessageSize(info.maxOutboundMessageSize);
}
}
}
private static void logIfContextNarrowedTimeout(
Deadline effectiveDeadline,
@Nullable Deadline outerCallDeadline,
@Nullable Deadline callDeadline) {
if (!log.isLoggable(Level.FINE)
|| effectiveDeadline == null
|| !effectiveDeadline.equals(outerCallDeadline)) {
return;
}
long effectiveTimeout = max(0, effectiveDeadline.timeRemaining(TimeUnit.NANOSECONDS));
StringBuilder builder =
new StringBuilder(
String.format(
Locale.US,
"Call timeout set to '%d' ns, due to context deadline.",
effectiveTimeout));
if (callDeadline == null) {
builder.append(" Explicit call timeout was not set.");
} else {
long callTimeout = callDeadline.timeRemaining(TimeUnit.NANOSECONDS);
builder.append(String.format(Locale.US, " Explicit call timeout was '%d' ns.", callTimeout));
}
log.fine(builder.toString());
}
private void removeContextListenerAndCancelDeadlineFuture() {
context.removeListener(cancellationListener);
ScheduledFuture<?> f = deadlineCancellationFuture;
if (f != null) {
f.cancel(false);
}
}
private class DeadlineTimer implements Runnable {
private final long remainingNanos;
DeadlineTimer(long remainingNanos) {
this.remainingNanos = remainingNanos;
}
@Override
public void run() {
InsightBuilder insight = new InsightBuilder();
stream.appendTimeoutInsight(insight);
// DelayedStream.cancel() is safe to call from a thread that is different from where the
// stream is created.
long seconds = Math.abs(remainingNanos) / TimeUnit.SECONDS.toNanos(1);
long nanos = Math.abs(remainingNanos) % TimeUnit.SECONDS.toNanos(1);
StringBuilder buf = new StringBuilder();
buf.append("deadline exceeded after ");
if (remainingNanos < 0) {
buf.append('-');
}
buf.append(seconds);
buf.append(String.format(Locale.US, ".%09d", nanos));
buf.append("s. ");
buf.append(insight);
stream.cancel(DEADLINE_EXCEEDED.augmentDescription(buf.toString()));
}
}
private ScheduledFuture<?> startDeadlineTimer(Deadline deadline) {
long remainingNanos = deadline.timeRemaining(TimeUnit.NANOSECONDS);
return deadlineCancellationExecutor.schedule(
new LogExceptionRunnable(new DeadlineTimer(remainingNanos)),
remainingNanos,
TimeUnit.NANOSECONDS);
}
@Nullable
private Deadline effectiveDeadline() {
// Call options and context are immutable, so we don't need to cache the deadline.
return min(callOptions.getDeadline(), context.getDeadline());
}
@Nullable
private static Deadline min(@Nullable Deadline deadline0, @Nullable Deadline deadline1) {
if (deadline0 == null) {
return deadline1;
}
if (deadline1 == null) {
return deadline0;
}
return deadline0.minimum(deadline1);
}
@Override
public void request(int numMessages) {
PerfMark.startTask("ClientCall.request", tag);
Histogram.Timer request =
perfmarkClientCallImplDuration.labels("ClientCall.request").startTimer();
try {
checkState(stream != null, "Not started");
checkArgument(numMessages >= 0, "Number requested must be non-negative");
stream.request(numMessages);
} finally {
PerfMark.stopTask("ClientCall.request", tag);
request.observeDuration();
}
}
@Override
public void cancel(@Nullable String message, @Nullable Throwable cause) {
PerfMark.startTask("ClientCall.cancel", tag);
Histogram.Timer cancel =
perfmarkClientCallImplDuration.labels("ClientCall.cancel").startTimer();
try {
cancelInternal(message, cause);
} finally {
PerfMark.stopTask("ClientCall.cancel", tag);
cancel.observeDuration();
}
}
private void cancelInternal(@Nullable String message, @Nullable Throwable cause) {
if (message == null && cause == null) {
cause = new CancellationException("Cancelled without a message or cause");
log.log(Level.WARNING, "Cancelling without a message or cause is suboptimal", cause);
}
if (cancelCalled) {
return;
}
cancelCalled = true;
try {
// Cancel is called in exception handling cases, so it may be the case that the
// stream was never successfully created or start has never been called.
if (stream != null) {
Status status = Status.CANCELLED;
if (message != null) {
status = status.withDescription(message);
} else {
status = status.withDescription("Call cancelled without message");
}
if (cause != null) {
status = status.withCause(cause);
}
stream.cancel(status);
}
} finally {
removeContextListenerAndCancelDeadlineFuture();
}
}
@Override
public void halfClose() {
PerfMark.startTask("ClientCall.halfClose", tag);
Histogram.Timer halfClose =
perfmarkClientCallImplDuration.labels("ClientCall.halfClose").startTimer();
try {
halfCloseInternal();
} finally {
PerfMark.stopTask("ClientCall.halfClose", tag);
halfClose.observeDuration();
}
}
private void halfCloseInternal() {
checkState(stream != null, "Not started");
checkState(!cancelCalled, "call was cancelled");
checkState(!halfCloseCalled, "call already half-closed");
halfCloseCalled = true;
stream.halfClose();
}
@Override
public void sendMessage(ReqT message) {
PerfMark.startTask("ClientCall.sendMessage", tag);
Histogram.Timer sendMessage =
perfmarkClientCallImplDuration.labels("ClientCall.sendMessage").startTimer();
try {
sendMessageInternal(message);
} finally {
PerfMark.stopTask("ClientCall.sendMessage", tag);
sendMessage.observeDuration();
}
}
private void sendMessageInternal(ReqT message) {
checkState(stream != null, "Not started");
checkState(!cancelCalled, "call was cancelled");
checkState(!halfCloseCalled, "call was half-closed");
try {
if (stream instanceof RetriableStream) {
@SuppressWarnings("unchecked")
RetriableStream<ReqT> retriableStream = (RetriableStream<ReqT>) stream;
retriableStream.sendMessage(message);
} else {
stream.writeMessage(method.streamRequest(message));
}
} catch (RuntimeException e) {
stream.cancel(Status.CANCELLED.withCause(e).withDescription("Failed to stream message"));
return;
} catch (Error e) {
stream.cancel(Status.CANCELLED.withDescription("Client sendMessage() failed with Error"));
throw e;
}
// For unary requests, we don't flush since we know that halfClose should be coming soon. This
// allows us to piggy-back the END_STREAM=true on the last message frame without opening the
// possibility of broken applications forgetting to call halfClose without noticing.
if (!unaryRequest) {
stream.flush();
}
}
@Override
public void setMessageCompression(boolean enabled) {
checkState(stream != null, "Not started");
stream.setMessageCompression(enabled);
}
@Override
public boolean isReady() {
if (halfCloseCalled) {
return false;
}
return stream.isReady();
}
@Override
public Attributes getAttributes() {
if (stream != null) {
return stream.getAttributes();
}
return Attributes.EMPTY;
}
private void closeObserver(Listener<RespT> observer, Status status, Metadata trailers) {
observer.onClose(status, trailers);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("method", method).toString();
}
private class ClientStreamListenerImpl implements ClientStreamListener {
private final Listener<RespT> observer;
private Status exceptionStatus;
public ClientStreamListenerImpl(Listener<RespT> observer) {
this.observer = checkNotNull(observer, "observer");
}
/**
* Cancels call and schedules onClose() notification. May only be called from the application
* thread.
*/
private void exceptionThrown(Status status) {
// Since each RPC can have its own executor, we can only call onClose() when we are sure there
// will be no further callbacks. We set the status here and overwrite the onClose() details
// when it arrives.
exceptionStatus = status;
stream.cancel(status);
}
@Override
public void headersRead(final Metadata headers) {
PerfMark.startTask("ClientStreamListener.headersRead", tag);
Histogram.Timer headersRead =
perfmarkClientCallImplDuration.labels("ClientStreamListener.headersRead").startTimer();
final Link link = PerfMark.linkOut();
final class HeadersRead extends ContextRunnable {
HeadersRead() {
super(context);
}
@Override
public void runInContext() {
PerfMark.startTask("ClientCall$Listener.headersRead", tag);
Histogram.Timer headersRead =
perfmarkClientCallImplDuration.labels("ClientCall$Listener.headersRead").startTimer();
PerfMark.linkIn(link);
try {
runInternal();
} finally {
PerfMark.stopTask("ClientCall$Listener.headersRead", tag);
headersRead.observeDuration();
}
}
private void runInternal() {
if (exceptionStatus != null) {
return;
}
try {
observer.onHeaders(headers);
} catch (Throwable t) {
exceptionThrown(
Status.CANCELLED.withCause(t).withDescription("Failed to read headers"));
}
}
}
try {
callExecutor.execute(new HeadersRead());
} finally {
PerfMark.stopTask("ClientStreamListener.headersRead", tag);
headersRead.observeDuration();
}
}
@Override
public void messagesAvailable(final MessageProducer producer) {
PerfMark.startTask("ClientStreamListener.messagesAvailable", tag);
Histogram.Timer messagesAvailable =
perfmarkClientCallImplDuration
.labels("ClientStreamListener.messagesAvailable")
.startTimer();
final Link link = PerfMark.linkOut();
final class MessagesAvailable extends ContextRunnable {
MessagesAvailable() {
super(context);
}
@Override
public void runInContext() {
PerfMark.startTask("ClientCall$Listener.messagesAvailable", tag);
Histogram.Timer messagesAvailable =
perfmarkClientCallImplDuration
.labels("ClientCall$Listener.messagesAvailable")
.startTimer();
PerfMark.linkIn(link);
try {
runInternal();
} finally {
PerfMark.stopTask("ClientCall$Listener.messagesAvailable", tag);
messagesAvailable.observeDuration();
}
}
private void runInternal() {
if (exceptionStatus != null) {
GrpcUtil.closeQuietly(producer);
return;
}
try {
InputStream message;
while ((message = producer.next()) != null) {
try {
observer.onMessage(method.parseResponse(message));
} catch (Throwable t) {
GrpcUtil.closeQuietly(message);
throw t;
}
message.close();
}
} catch (Throwable t) {
GrpcUtil.closeQuietly(producer);
exceptionThrown(
Status.CANCELLED.withCause(t).withDescription("Failed to read message."));
}
}
}
try {
callExecutor.execute(new MessagesAvailable());
} finally {
PerfMark.stopTask("ClientStreamListener.messagesAvailable", tag);
messagesAvailable.observeDuration();
}
}
@Override
public void closed(Status status, RpcProgress rpcProgress, Metadata trailers) {
PerfMark.startTask("ClientStreamListener.closed", tag);
Histogram.Timer closed =
perfmarkClientCallImplDuration.labels("ClientStreamListener.closed").startTimer();
try {
closedInternal(status, rpcProgress, trailers);
} finally {
PerfMark.stopTask("ClientStreamListener.closed", tag);
closed.observeDuration();
}
}
private void closedInternal(
Status status, @SuppressWarnings("unused") RpcProgress rpcProgress, Metadata trailers) {
Deadline deadline = effectiveDeadline();
if (status.getCode() == Status.Code.CANCELLED && deadline != null) {
// When the server's deadline expires, it can only reset the stream with CANCEL and no
// description. Since our timer may be delayed in firing, we double-check the deadline and
// turn the failure into the likely more helpful DEADLINE_EXCEEDED status.
if (deadline.isExpired()) {
InsightBuilder insight = new InsightBuilder();
stream.appendTimeoutInsight(insight);
status =
DEADLINE_EXCEEDED.augmentDescription(
"ClientCall was cancelled at or after deadline. " + insight);
// Replace trailers to prevent mixing sources of status and trailers.
trailers = new Metadata();
}
}
final Status savedStatus = status;
final Metadata savedTrailers = trailers;
final Link link = PerfMark.linkOut();
final class StreamClosed extends ContextRunnable {
StreamClosed() {
super(context);
}
@Override
public void runInContext() {
PerfMark.startTask("ClientCall$Listener.onClose", tag);
Histogram.Timer onClose =
perfmarkClientCallImplDuration.labels("ClientCall$Listener.onClose").startTimer();
PerfMark.linkIn(link);
try {
runInternal();
} finally {
PerfMark.stopTask("ClientCall$Listener.onClose", tag);
onClose.observeDuration();
}
}
private void runInternal() {
Status status = savedStatus;
Metadata trailers = savedTrailers;
if (exceptionStatus != null) {
// Ideally exceptionStatus == savedStatus, as exceptionStatus was passed to cancel().
// However the cancel is racy and this closed() may have already been queued when the
// cancellation occurred. Since other calls like onMessage() will throw away data if
// exceptionStatus != null, it is semantically essential that we _not_ use a status
// provided by the server.
status = exceptionStatus;
// Replace trailers to prevent mixing sources of status and trailers.
trailers = new Metadata();
}
cancelListenersShouldBeRemoved = true;
try {
closeObserver(observer, status, trailers);
} finally {
removeContextListenerAndCancelDeadlineFuture();
channelCallsTracer.reportCallEnded(status.isOk());
}
}
}
callExecutor.execute(new StreamClosed());
}
@Override
public void onReady() {
if (method.getType().clientSendsOneMessage()) {
return;
}
PerfMark.startTask("ClientStreamListener.onReady", tag);
Histogram.Timer onReady =
perfmarkClientCallImplDuration.labels("ClientStreamListener.onReady").startTimer();
final Link link = PerfMark.linkOut();
final class StreamOnReady extends ContextRunnable {
StreamOnReady() {
super(context);
}
@Override
public void runInContext() {
PerfMark.startTask("ClientCall$Listener.onReady", tag);
Histogram.Timer onReady =
perfmarkClientCallImplDuration.labels("ClientCall$Listener.onReady").startTimer();
PerfMark.linkIn(link);
try {
runInternal();
} finally {
PerfMark.stopTask("ClientCall$Listener.onReady", tag);
onReady.observeDuration();
}
}
private void runInternal() {
if (exceptionStatus != null) {
return;
}
try {
observer.onReady();
} catch (Throwable t) {
exceptionThrown(
Status.CANCELLED.withCause(t).withDescription("Failed to call onReady."));
}
}
}
try {
callExecutor.execute(new StreamOnReady());
} finally {
PerfMark.stopTask("ClientStreamListener.onReady", tag);
onReady.observeDuration();
}
}
}
}

View File

@ -1,774 +0,0 @@
/*
* Copyright 2014 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.netty;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static io.grpc.internal.GrpcUtil.DEFAULT_KEEPALIVE_TIMEOUT_NANOS;
import static io.grpc.internal.GrpcUtil.KEEPALIVE_TIME_NANOS_DISABLED;
import com.google.common.annotations.VisibleForTesting;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import io.grpc.Attributes;
import io.grpc.CallCredentials;
import io.grpc.ChannelCredentials;
import io.grpc.ChannelLogger;
import io.grpc.EquivalentAddressGroup;
import io.grpc.ExperimentalApi;
import io.grpc.HttpConnectProxiedSocketAddress;
import io.grpc.Internal;
import io.grpc.ManagedChannelBuilder;
import io.grpc.internal.AbstractManagedChannelImplBuilder;
import io.grpc.internal.AtomicBackoff;
import io.grpc.internal.ClientTransportFactory;
import io.grpc.internal.ConnectionClientTransport;
import io.grpc.internal.FixedObjectPool;
import io.grpc.internal.GrpcUtil;
import io.grpc.internal.KeepAliveManager;
import io.grpc.internal.ManagedChannelImplBuilder;
import io.grpc.internal.ManagedChannelImplBuilder.ChannelBuilderDefaultPortProvider;
import io.grpc.internal.ManagedChannelImplBuilder.ClientTransportFactoryBuilder;
import io.grpc.internal.ObjectPool;
import io.grpc.internal.SharedResourcePool;
import io.grpc.internal.TransportTracer;
import io.grpc.netty.ProtocolNegotiators.FromChannelCredentialsResult;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFactory;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.ReflectiveChannelFactory;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.ssl.SslContext;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.CheckReturnValue;
import javax.annotation.Nullable;
import javax.net.ssl.SSLException;
/** A builder to help simplify construction of channels using the Netty transport. */
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1784")
@CanIgnoreReturnValue
public final class NettyChannelBuilder
extends AbstractManagedChannelImplBuilder<NettyChannelBuilder> {
// 1MiB.
public static final int DEFAULT_FLOW_CONTROL_WINDOW = 1024 * 1024;
private static final boolean DEFAULT_AUTO_FLOW_CONTROL;
private static final long AS_LARGE_AS_INFINITE = TimeUnit.DAYS.toNanos(1000L);
private static final ChannelFactory<? extends Channel> DEFAULT_CHANNEL_FACTORY =
new ReflectiveChannelFactory<>(Utils.DEFAULT_CLIENT_CHANNEL_TYPE);
private static final ObjectPool<? extends EventLoopGroup> DEFAULT_EVENT_LOOP_GROUP_POOL =
SharedResourcePool.forResource(Utils.DEFAULT_WORKER_EVENT_LOOP_GROUP);
static {
String autoFlowControl = System.getenv("GRPC_EXPERIMENTAL_AUTOFLOWCONTROL");
if (autoFlowControl == null) {
autoFlowControl = "true";
}
DEFAULT_AUTO_FLOW_CONTROL = Boolean.parseBoolean(autoFlowControl);
}
private final ManagedChannelImplBuilder managedChannelImplBuilder;
private TransportTracer.Factory transportTracerFactory = TransportTracer.getDefaultFactory();
private final Map<ChannelOption<?>, Object> channelOptions = new HashMap<>();
private ChannelFactory<? extends Channel> channelFactory = DEFAULT_CHANNEL_FACTORY;
private ObjectPool<? extends EventLoopGroup> eventLoopGroupPool = DEFAULT_EVENT_LOOP_GROUP_POOL;
private boolean autoFlowControl = DEFAULT_AUTO_FLOW_CONTROL;
private int flowControlWindow = DEFAULT_FLOW_CONTROL_WINDOW;
private int maxInboundMessageSize = GrpcUtil.DEFAULT_MAX_MESSAGE_SIZE;
private int maxHeaderListSize = GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE;
private long keepAliveTimeNanos = KEEPALIVE_TIME_NANOS_DISABLED;
private long keepAliveTimeoutNanos = DEFAULT_KEEPALIVE_TIMEOUT_NANOS;
private boolean keepAliveWithoutCalls;
private ProtocolNegotiator.ClientFactory protocolNegotiatorFactory =
new DefaultProtocolNegotiator();
private final boolean freezeProtocolNegotiatorFactory;
private LocalSocketPicker localSocketPicker;
/**
* If true, indicates that the transport may use the GET method for RPCs, and may include the
* request body in the query params.
*/
private final boolean useGetForSafeMethods = false;
/**
* Creates a new builder with the given server address. This factory method is primarily intended
* for using Netty Channel types other than SocketChannel. {@link #forAddress(String, int)} should
* generally be preferred over this method, since that API permits delaying DNS lookups and
* noticing changes to DNS. If an unresolved InetSocketAddress is passed in, then it will remain
* unresolved.
*/
@CheckReturnValue
public static NettyChannelBuilder forAddress(SocketAddress serverAddress) {
return new NettyChannelBuilder(serverAddress);
}
/** Creates a new builder with the given host and port. */
@CheckReturnValue
public static NettyChannelBuilder forAddress(String host, int port) {
return forTarget(GrpcUtil.authorityFromHostAndPort(host, port));
}
/** Creates a new builder with the given host and port. */
@CheckReturnValue
public static NettyChannelBuilder forAddress(String host, int port, ChannelCredentials creds) {
return forTarget(GrpcUtil.authorityFromHostAndPort(host, port), creds);
}
/**
* Creates a new builder with the given target string that will be resolved by {@link
* io.grpc.NameResolver}.
*/
@CheckReturnValue
public static NettyChannelBuilder forTarget(String target) {
return new NettyChannelBuilder(target);
}
/**
* Creates a new builder with the given target string that will be resolved by {@link
* io.grpc.NameResolver}.
*/
@CheckReturnValue
public static NettyChannelBuilder forTarget(String target, ChannelCredentials creds) {
FromChannelCredentialsResult result = ProtocolNegotiators.from(creds);
if (result.error != null) {
throw new IllegalArgumentException(result.error);
}
return new NettyChannelBuilder(target, creds, result.callCredentials, result.negotiator);
}
private final class NettyChannelTransportFactoryBuilder implements ClientTransportFactoryBuilder {
@Override
public ClientTransportFactory buildClientTransportFactory() {
return buildTransportFactory();
}
}
private final class NettyChannelDefaultPortProvider implements ChannelBuilderDefaultPortProvider {
@Override
public int getDefaultPort() {
return protocolNegotiatorFactory.getDefaultPort();
}
}
@CheckReturnValue
NettyChannelBuilder(String target) {
managedChannelImplBuilder =
new ManagedChannelImplBuilder(
target,
new NettyChannelTransportFactoryBuilder(),
new NettyChannelDefaultPortProvider());
this.freezeProtocolNegotiatorFactory = false;
}
NettyChannelBuilder(
String target,
ChannelCredentials channelCreds,
CallCredentials callCreds,
ProtocolNegotiator.ClientFactory negotiator) {
managedChannelImplBuilder =
new ManagedChannelImplBuilder(
target,
channelCreds,
callCreds,
new NettyChannelTransportFactoryBuilder(),
new NettyChannelDefaultPortProvider());
this.protocolNegotiatorFactory = checkNotNull(negotiator, "negotiator");
this.freezeProtocolNegotiatorFactory = true;
}
@CheckReturnValue
NettyChannelBuilder(SocketAddress address) {
managedChannelImplBuilder =
new ManagedChannelImplBuilder(
address,
getAuthorityFromAddress(address),
new NettyChannelTransportFactoryBuilder(),
new NettyChannelDefaultPortProvider());
this.freezeProtocolNegotiatorFactory = false;
}
@Internal
@Override
protected ManagedChannelBuilder<?> delegate() {
return managedChannelImplBuilder;
}
@CheckReturnValue
private static String getAuthorityFromAddress(SocketAddress address) {
if (address instanceof InetSocketAddress) {
InetSocketAddress inetAddress = (InetSocketAddress) address;
return GrpcUtil.authorityFromHostAndPort(inetAddress.getHostString(), inetAddress.getPort());
} else {
return address.toString();
}
}
/**
* Specifies the channel type to use, by default we use {@code EpollSocketChannel} if available,
* otherwise using {@link NioSocketChannel}.
*
* <p>You either use this or {@link #channelFactory(io.netty.channel.ChannelFactory)} if your
* {@link Channel} implementation has no no-args constructor.
*
* <p>It's an optional parameter. If the user has not provided an Channel type or ChannelFactory
* when the channel is built, the builder will use the default one which is static.
*
* <p>You must also provide corresponding {@link #eventLoopGroup(EventLoopGroup)}. For example,
* {@link NioSocketChannel} must use {@link io.netty.channel.nio.NioEventLoopGroup}, otherwise
* your application won't start.
*/
public NettyChannelBuilder channelType(Class<? extends Channel> channelType) {
checkNotNull(channelType, "channelType");
return channelFactory(new ReflectiveChannelFactory<>(channelType));
}
/**
* Specifies the {@link ChannelFactory} to create {@link Channel} instances. This method is
* usually only used if the specific {@code Channel} requires complex logic which requires
* additional information to create the {@code Channel}. Otherwise, recommend to use {@link
* #channelType(Class)}.
*
* <p>It's an optional parameter. If the user has not provided an Channel type or ChannelFactory
* when the channel is built, the builder will use the default one which is static.
*
* <p>You must also provide corresponding {@link #eventLoopGroup(EventLoopGroup)}. For example,
* {@link NioSocketChannel} based {@link ChannelFactory} must use {@link
* io.netty.channel.nio.NioEventLoopGroup}, otherwise your application won't start.
*/
public NettyChannelBuilder channelFactory(ChannelFactory<? extends Channel> channelFactory) {
this.channelFactory = checkNotNull(channelFactory, "channelFactory");
return this;
}
/**
* Specifies a channel option. As the underlying channel as well as network implementation may
* ignore this value applications should consider it a hint.
*/
public <T> NettyChannelBuilder withOption(ChannelOption<T> option, T value) {
channelOptions.put(option, value);
return this;
}
/**
* Sets the negotiation type for the HTTP/2 connection.
*
* <p>Default: <code>TLS</code>
*/
public NettyChannelBuilder negotiationType(NegotiationType type) {
checkState(
!freezeProtocolNegotiatorFactory, "Cannot change security when using ChannelCredentials");
if (!(protocolNegotiatorFactory instanceof DefaultProtocolNegotiator)) {
// Do nothing for compatibility
return this;
}
((DefaultProtocolNegotiator) protocolNegotiatorFactory).negotiationType = type;
return this;
}
/**
* Provides an EventGroupLoop to be used by the netty transport.
*
* <p>It's an optional parameter. If the user has not provided an EventGroupLoop when the channel
* is built, the builder will use the default one which is static.
*
* <p>You must also provide corresponding {@link #channelType(Class)} or {@link
* #channelFactory(ChannelFactory)} corresponding to the given {@code EventLoopGroup}. For
* example, {@link io.netty.channel.nio.NioEventLoopGroup} requires {@link NioSocketChannel}
*
* <p>The channel won't take ownership of the given EventLoopGroup. It's caller's responsibility
* to shut it down when it's desired.
*/
public NettyChannelBuilder eventLoopGroup(@Nullable EventLoopGroup eventLoopGroup) {
if (eventLoopGroup != null) {
return eventLoopGroupPool(new FixedObjectPool<>(eventLoopGroup));
}
return eventLoopGroupPool(DEFAULT_EVENT_LOOP_GROUP_POOL);
}
NettyChannelBuilder eventLoopGroupPool(ObjectPool<? extends EventLoopGroup> eventLoopGroupPool) {
this.eventLoopGroupPool = checkNotNull(eventLoopGroupPool, "eventLoopGroupPool");
return this;
}
/**
* SSL/TLS context to use instead of the system default. It must have been configured with {@link
* GrpcSslContexts}, but options could have been overridden.
*/
public NettyChannelBuilder sslContext(SslContext sslContext) {
checkState(
!freezeProtocolNegotiatorFactory, "Cannot change security when using ChannelCredentials");
if (sslContext != null) {
checkArgument(sslContext.isClient(), "Server SSL context can not be used for client channel");
GrpcSslContexts.ensureAlpnAndH2Enabled(sslContext.applicationProtocolNegotiator());
}
if (!(protocolNegotiatorFactory instanceof DefaultProtocolNegotiator)) {
// Do nothing for compatibility
return this;
}
((DefaultProtocolNegotiator) protocolNegotiatorFactory).sslContext = sslContext;
return this;
}
/**
* Sets the initial flow control window in bytes. Setting initial flow control window enables auto
* flow control tuning using bandwidth-delay product algorithm. To disable auto flow control
* tuning, use {@link #flowControlWindow(int)}. By default, auto flow control is enabled with
* initial flow control window size of {@link #DEFAULT_FLOW_CONTROL_WINDOW}.
*/
public NettyChannelBuilder initialFlowControlWindow(int initialFlowControlWindow) {
checkArgument(initialFlowControlWindow > 0, "initialFlowControlWindow must be positive");
this.flowControlWindow = initialFlowControlWindow;
this.autoFlowControl = true;
return this;
}
/**
* Sets the flow control window in bytes. Setting flowControlWindow disables auto flow control
* tuning; use {@link #initialFlowControlWindow(int)} to enable auto flow control tuning. If not
* called, the default value is {@link #DEFAULT_FLOW_CONTROL_WINDOW}) with auto flow control
* tuning.
*/
public NettyChannelBuilder flowControlWindow(int flowControlWindow) {
checkArgument(flowControlWindow > 0, "flowControlWindow must be positive");
this.flowControlWindow = flowControlWindow;
this.autoFlowControl = false;
return this;
}
/**
* Sets the maximum size of header list allowed to be received. This is cumulative size of the
* headers with some overhead, as defined for <a
* href="http://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2">HTTP/2's
* SETTINGS_MAX_HEADER_LIST_SIZE</a>. The default is 8 KiB.
*
* @deprecated Use {@link #maxInboundMetadataSize} instead
*/
@Deprecated
public NettyChannelBuilder maxHeaderListSize(int maxHeaderListSize) {
return maxInboundMetadataSize(maxHeaderListSize);
}
/**
* Sets the maximum size of metadata allowed to be received. This is cumulative size of the
* entries with some overhead, as defined for <a
* href="http://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2">HTTP/2's
* SETTINGS_MAX_HEADER_LIST_SIZE</a>. The default is 8 KiB.
*
* @param bytes the maximum size of received metadata
* @return this
* @throws IllegalArgumentException if bytes is non-positive
* @since 1.17.0
*/
@Override
public NettyChannelBuilder maxInboundMetadataSize(int bytes) {
checkArgument(bytes > 0, "maxInboundMetadataSize must be > 0");
this.maxHeaderListSize = bytes;
return this;
}
/** Equivalent to using {@link #negotiationType(NegotiationType)} with {@code PLAINTEXT}. */
@Override
public NettyChannelBuilder usePlaintext() {
negotiationType(NegotiationType.PLAINTEXT);
return this;
}
/** Equivalent to using {@link #negotiationType(NegotiationType)} with {@code TLS}. */
@Override
public NettyChannelBuilder useTransportSecurity() {
negotiationType(NegotiationType.TLS);
return this;
}
/**
* {@inheritDoc}
*
* @since 1.3.0
*/
@Override
public NettyChannelBuilder keepAliveTime(long keepAliveTime, TimeUnit timeUnit) {
checkArgument(keepAliveTime > 0L, "keepalive time must be positive");
keepAliveTimeNanos = timeUnit.toNanos(keepAliveTime);
keepAliveTimeNanos = KeepAliveManager.clampKeepAliveTimeInNanos(keepAliveTimeNanos);
if (keepAliveTimeNanos >= AS_LARGE_AS_INFINITE) {
// Bump keepalive time to infinite. This disables keepalive.
keepAliveTimeNanos = KEEPALIVE_TIME_NANOS_DISABLED;
}
return this;
}
/**
* {@inheritDoc}
*
* @since 1.3.0
*/
@Override
public NettyChannelBuilder keepAliveTimeout(long keepAliveTimeout, TimeUnit timeUnit) {
checkArgument(keepAliveTimeout > 0L, "keepalive timeout must be positive");
keepAliveTimeoutNanos = timeUnit.toNanos(keepAliveTimeout);
keepAliveTimeoutNanos = KeepAliveManager.clampKeepAliveTimeoutInNanos(keepAliveTimeoutNanos);
return this;
}
/**
* {@inheritDoc}
*
* @since 1.3.0
*/
@Override
public NettyChannelBuilder keepAliveWithoutCalls(boolean enable) {
keepAliveWithoutCalls = enable;
return this;
}
/** If non-{@code null}, attempts to create connections bound to a local port. */
public NettyChannelBuilder localSocketPicker(@Nullable LocalSocketPicker localSocketPicker) {
this.localSocketPicker = localSocketPicker;
return this;
}
/**
* This class is meant to be overriden with a custom implementation of {@link
* #createSocketAddress}. The default implementation is a no-op.
*
* @since 1.16.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4917")
public static class LocalSocketPicker {
/**
* Called by gRPC to pick local socket to bind to. This may be called multiple times. Subclasses
* are expected to override this method.
*
* @param remoteAddress the remote address to connect to.
* @param attrs the Attributes present on the {@link io.grpc.EquivalentAddressGroup} associated
* with the address.
* @return a {@link SocketAddress} suitable for binding, or else {@code null}.
* @since 1.16.0
*/
@Nullable
public SocketAddress createSocketAddress(
SocketAddress remoteAddress, @EquivalentAddressGroup.Attr Attributes attrs) {
return null;
}
}
/**
* Sets the maximum message size allowed for a single gRPC frame. If an inbound messages larger
* than this limit is received it will not be processed and the RPC will fail with
* RESOURCE_EXHAUSTED.
*/
@Override
public NettyChannelBuilder maxInboundMessageSize(int max) {
checkArgument(max >= 0, "negative max");
maxInboundMessageSize = max;
return this;
}
@CheckReturnValue
ClientTransportFactory buildTransportFactory() {
assertEventLoopAndChannelType();
ProtocolNegotiator negotiator = protocolNegotiatorFactory.newNegotiator();
return new NettyTransportFactory(
negotiator,
channelFactory,
channelOptions,
eventLoopGroupPool,
autoFlowControl,
flowControlWindow,
maxInboundMessageSize,
maxHeaderListSize,
keepAliveTimeNanos,
keepAliveTimeoutNanos,
keepAliveWithoutCalls,
transportTracerFactory,
localSocketPicker,
useGetForSafeMethods);
}
@VisibleForTesting
void assertEventLoopAndChannelType() {
boolean bothProvided =
channelFactory != DEFAULT_CHANNEL_FACTORY
&& eventLoopGroupPool != DEFAULT_EVENT_LOOP_GROUP_POOL;
boolean nonProvided =
channelFactory == DEFAULT_CHANNEL_FACTORY
&& eventLoopGroupPool == DEFAULT_EVENT_LOOP_GROUP_POOL;
checkState(
bothProvided || nonProvided,
"Both EventLoopGroup and ChannelType should be provided or neither should be");
}
@CheckReturnValue
int getDefaultPort() {
return protocolNegotiatorFactory.getDefaultPort();
}
@VisibleForTesting
@CheckReturnValue
static ProtocolNegotiator createProtocolNegotiatorByType(
NegotiationType negotiationType,
SslContext sslContext,
ObjectPool<? extends Executor> executorPool) {
switch (negotiationType) {
case PLAINTEXT:
return ProtocolNegotiators.plaintext();
case PLAINTEXT_UPGRADE:
return ProtocolNegotiators.plaintextUpgrade();
case TLS:
return ProtocolNegotiators.tls(sslContext, executorPool);
default:
throw new IllegalArgumentException("Unsupported negotiationType: " + negotiationType);
}
}
NettyChannelBuilder disableCheckAuthority() {
this.managedChannelImplBuilder.disableCheckAuthority();
return this;
}
NettyChannelBuilder enableCheckAuthority() {
this.managedChannelImplBuilder.enableCheckAuthority();
return this;
}
void protocolNegotiatorFactory(ProtocolNegotiator.ClientFactory protocolNegotiatorFactory) {
checkState(
!freezeProtocolNegotiatorFactory, "Cannot change security when using ChannelCredentials");
this.protocolNegotiatorFactory =
checkNotNull(protocolNegotiatorFactory, "protocolNegotiatorFactory");
}
void setTracingEnabled(boolean value) {
this.managedChannelImplBuilder.setTracingEnabled(value);
}
void setStatsEnabled(boolean value) {
this.managedChannelImplBuilder.setStatsEnabled(value);
}
void setStatsRecordStartedRpcs(boolean value) {
this.managedChannelImplBuilder.setStatsRecordStartedRpcs(value);
}
void setStatsRecordFinishedRpcs(boolean value) {
this.managedChannelImplBuilder.setStatsRecordFinishedRpcs(value);
}
void setStatsRecordRealTimeMetrics(boolean value) {
this.managedChannelImplBuilder.setStatsRecordRealTimeMetrics(value);
}
@VisibleForTesting
NettyChannelBuilder setTransportTracerFactory(TransportTracer.Factory transportTracerFactory) {
this.transportTracerFactory = transportTracerFactory;
return this;
}
private final class DefaultProtocolNegotiator implements ProtocolNegotiator.ClientFactory {
private NegotiationType negotiationType = NegotiationType.TLS;
private SslContext sslContext;
@Override
public ProtocolNegotiator newNegotiator() {
SslContext localSslContext = sslContext;
if (negotiationType == NegotiationType.TLS && localSslContext == null) {
try {
localSslContext = GrpcSslContexts.forClient().build();
} catch (SSLException ex) {
throw new RuntimeException(ex);
}
}
return createProtocolNegotiatorByType(
negotiationType, localSslContext, managedChannelImplBuilder.getOffloadExecutorPool());
}
@Override
public int getDefaultPort() {
switch (negotiationType) {
case PLAINTEXT:
case PLAINTEXT_UPGRADE:
return GrpcUtil.DEFAULT_PORT_PLAINTEXT;
case TLS:
return GrpcUtil.DEFAULT_PORT_SSL;
default:
throw new AssertionError(negotiationType + " not handled");
}
}
}
/** Creates Netty transports. Exposed for internal use, as it should be private. */
@CheckReturnValue
private static final class NettyTransportFactory implements ClientTransportFactory {
private final ProtocolNegotiator protocolNegotiator;
private final ChannelFactory<? extends Channel> channelFactory;
private final Map<ChannelOption<?>, ?> channelOptions;
private final ObjectPool<? extends EventLoopGroup> groupPool;
private final EventLoopGroup group;
private final boolean autoFlowControl;
private final int flowControlWindow;
private final int maxMessageSize;
private final int maxHeaderListSize;
private final long keepAliveTimeNanos;
private final AtomicBackoff keepAliveBackoff;
private final long keepAliveTimeoutNanos;
private final boolean keepAliveWithoutCalls;
private final TransportTracer.Factory transportTracerFactory;
private final LocalSocketPicker localSocketPicker;
private final boolean useGetForSafeMethods;
private boolean closed;
NettyTransportFactory(
ProtocolNegotiator protocolNegotiator,
ChannelFactory<? extends Channel> channelFactory,
Map<ChannelOption<?>, ?> channelOptions,
ObjectPool<? extends EventLoopGroup> groupPool,
boolean autoFlowControl,
int flowControlWindow,
int maxMessageSize,
int maxHeaderListSize,
long keepAliveTimeNanos,
long keepAliveTimeoutNanos,
boolean keepAliveWithoutCalls,
TransportTracer.Factory transportTracerFactory,
LocalSocketPicker localSocketPicker,
boolean useGetForSafeMethods) {
this.protocolNegotiator = checkNotNull(protocolNegotiator, "protocolNegotiator");
this.channelFactory = channelFactory;
this.channelOptions = new HashMap<ChannelOption<?>, Object>(channelOptions);
this.groupPool = groupPool;
this.group = groupPool.getObject();
this.autoFlowControl = autoFlowControl;
this.flowControlWindow = flowControlWindow;
this.maxMessageSize = maxMessageSize;
this.maxHeaderListSize = maxHeaderListSize;
this.keepAliveTimeNanos = keepAliveTimeNanos;
this.keepAliveBackoff = new AtomicBackoff("keepalive time nanos", keepAliveTimeNanos);
this.keepAliveTimeoutNanos = keepAliveTimeoutNanos;
this.keepAliveWithoutCalls = keepAliveWithoutCalls;
this.transportTracerFactory = transportTracerFactory;
this.localSocketPicker =
localSocketPicker != null ? localSocketPicker : new LocalSocketPicker();
this.useGetForSafeMethods = useGetForSafeMethods;
}
@Override
public ConnectionClientTransport newClientTransport(
SocketAddress serverAddress, ClientTransportOptions options, ChannelLogger channelLogger) {
checkState(!closed, "The transport factory is closed.");
ProtocolNegotiator localNegotiator = protocolNegotiator;
HttpConnectProxiedSocketAddress proxiedAddr = options.getHttpConnectProxiedSocketAddress();
if (proxiedAddr != null) {
serverAddress = proxiedAddr.getTargetAddress();
localNegotiator =
ProtocolNegotiators.httpProxy(
proxiedAddr.getProxyAddress(),
proxiedAddr.getUsername(),
proxiedAddr.getPassword(),
protocolNegotiator);
}
final AtomicBackoff.State keepAliveTimeNanosState = keepAliveBackoff.getState();
Runnable tooManyPingsRunnable =
new Runnable() {
@Override
public void run() {
keepAliveTimeNanosState.backoff();
}
};
// TODO(carl-mastrangelo): Pass channelLogger in.
NettyClientTransport transport =
new NettyClientTransport(
serverAddress,
channelFactory,
channelOptions,
group,
localNegotiator,
autoFlowControl,
flowControlWindow,
maxMessageSize,
maxHeaderListSize,
keepAliveTimeNanosState.get(),
keepAliveTimeoutNanos,
keepAliveWithoutCalls,
options.getAuthority(),
options.getUserAgent(),
tooManyPingsRunnable,
transportTracerFactory.create(),
options.getEagAttributes(),
localSocketPicker,
channelLogger,
useGetForSafeMethods);
return transport;
}
@Override
public ScheduledExecutorService getScheduledExecutorService() {
return group;
}
@Override
public SwapChannelCredentialsResult swapChannelCredentials(ChannelCredentials channelCreds) {
checkNotNull(channelCreds, "channelCreds");
FromChannelCredentialsResult result = ProtocolNegotiators.from(channelCreds);
if (result.error != null) {
return null;
}
ClientTransportFactory factory =
new NettyTransportFactory(
result.negotiator.newNegotiator(),
channelFactory,
channelOptions,
groupPool,
autoFlowControl,
flowControlWindow,
maxMessageSize,
maxHeaderListSize,
keepAliveTimeNanos,
keepAliveTimeoutNanos,
keepAliveWithoutCalls,
transportTracerFactory,
localSocketPicker,
useGetForSafeMethods);
return new SwapChannelCredentialsResult(factory, result.callCredentials);
}
@Override
public void close() {
if (closed) {
return;
}
closed = true;
protocolNegotiator.close();
groupPool.returnObject(group);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,375 +0,0 @@
/*
* Copyright 2015 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.netty;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static io.netty.buffer.Unpooled.EMPTY_BUFFER;
import com.google.common.base.Preconditions;
import com.google.common.io.BaseEncoding;
import io.grpc.Attributes;
import io.grpc.CallOptions;
import io.grpc.InternalKnownTransport;
import io.grpc.InternalMethodDescriptor;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.Status;
import io.grpc.internal.AbstractClientStream;
import io.grpc.internal.Http2ClientStreamTransportState;
import io.grpc.internal.StatsTraceContext;
import io.grpc.internal.TransportTracer;
import io.grpc.internal.WritableBuffer;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.EventLoop;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2Stream;
import io.netty.util.AsciiString;
import io.perfmark.PerfMark;
import io.perfmark.Tag;
import io.prometheus.client.Histogram;
import javax.annotation.Nullable;
import org.tikv.common.util.HistogramUtils;
/** Client stream for a Netty transport. Must only be called from the sending application thread. */
class NettyClientStream extends AbstractClientStream {
private static final InternalMethodDescriptor methodDescriptorAccessor =
new InternalMethodDescriptor(
NettyClientTransport.class.getName().contains("grpc.netty.shaded")
? InternalKnownTransport.NETTY_SHADED
: InternalKnownTransport.NETTY);
private final Sink sink = new Sink();
private final TransportState state;
private final WriteQueue writeQueue;
private final MethodDescriptor<?, ?> method;
private AsciiString authority;
private final AsciiString scheme;
private final AsciiString userAgent;
public static final Histogram perfmarkNettyClientStreamDuration =
HistogramUtils.buildDuration()
.name("perfmark_netty_client_stream_duration_seconds")
.help("Perfmark netty client stream duration seconds")
.labelNames("type")
.register();
NettyClientStream(
TransportState state,
MethodDescriptor<?, ?> method,
Metadata headers,
Channel channel,
AsciiString authority,
AsciiString scheme,
AsciiString userAgent,
StatsTraceContext statsTraceCtx,
TransportTracer transportTracer,
CallOptions callOptions,
boolean useGetForSafeMethods) {
super(
new NettyWritableBufferAllocator(channel.alloc()),
statsTraceCtx,
transportTracer,
headers,
callOptions,
useGetForSafeMethods && method.isSafe());
this.state = checkNotNull(state, "transportState");
this.writeQueue = state.handler.getWriteQueue();
this.method = checkNotNull(method, "method");
this.authority = checkNotNull(authority, "authority");
this.scheme = checkNotNull(scheme, "scheme");
this.userAgent = userAgent;
}
@Override
protected TransportState transportState() {
return state;
}
@Override
protected Sink abstractClientStreamSink() {
return sink;
}
@Override
public void setAuthority(String authority) {
this.authority = AsciiString.of(checkNotNull(authority, "authority"));
}
@Override
public Attributes getAttributes() {
return state.handler.getAttributes();
}
private class Sink implements AbstractClientStream.Sink {
@Override
public void writeHeaders(Metadata headers, byte[] requestPayload) {
PerfMark.startTask("NettyClientStream$Sink.writeHeaders");
Histogram.Timer writeHeaders =
perfmarkNettyClientStreamDuration
.labels("NettyClientStream$Sink.writeHeaders")
.startTimer();
try {
writeHeadersInternal(headers, requestPayload);
} finally {
PerfMark.stopTask("NettyClientStream$Sink.writeHeaders");
writeHeaders.observeDuration();
}
}
private void writeHeadersInternal(Metadata headers, byte[] requestPayload) {
// Convert the headers into Netty HTTP/2 headers.
AsciiString defaultPath = (AsciiString) methodDescriptorAccessor.geRawMethodName(method);
if (defaultPath == null) {
defaultPath = new AsciiString("/" + method.getFullMethodName());
methodDescriptorAccessor.setRawMethodName(method, defaultPath);
}
boolean get = (requestPayload != null);
AsciiString httpMethod;
if (get) {
// Forge the query string
// TODO(ericgribkoff) Add the key back to the query string
defaultPath =
new AsciiString(defaultPath + "?" + BaseEncoding.base64().encode(requestPayload));
httpMethod = Utils.HTTP_GET_METHOD;
} else {
httpMethod = Utils.HTTP_METHOD;
}
Http2Headers http2Headers =
Utils.convertClientHeaders(
headers, scheme, defaultPath, authority, httpMethod, userAgent);
ChannelFutureListener failureListener =
new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
// Stream creation failed. Close the stream if not already closed.
// When the channel is shutdown, the lifecycle manager has a better view of the
// failure,
// especially before negotiation completes (because the negotiator commonly doesn't
// receive the execeptionCaught because NettyClientHandler does not propagate it).
Status s = transportState().handler.getLifecycleManager().getShutdownStatus();
if (s == null) {
s = transportState().statusFromFailedFuture(future);
}
transportState().transportReportStatus(s, true, new Metadata());
}
}
};
// Write the command requesting the creation of the stream.
writeQueue
.enqueue(
new CreateStreamCommand(
http2Headers, transportState(), shouldBeCountedForInUse(), get),
!method.getType().clientSendsOneMessage() || get)
.addListener(failureListener);
}
private void writeFrameInternal(
WritableBuffer frame, boolean endOfStream, boolean flush, final int numMessages) {
Preconditions.checkArgument(numMessages >= 0);
ByteBuf bytebuf =
frame == null ? EMPTY_BUFFER : ((NettyWritableBuffer) frame).bytebuf().touch();
final int numBytes = bytebuf.readableBytes();
if (numBytes > 0) {
// Add the bytes to outbound flow control.
onSendingBytes(numBytes);
writeQueue
.enqueue(new SendGrpcFrameCommand(transportState(), bytebuf, endOfStream), flush)
.addListener(
new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
// If the future succeeds when http2stream is null, the stream has been
// cancelled
// before it began and Netty is purging pending writes from the flow-controller.
if (future.isSuccess() && transportState().http2Stream() != null) {
// Remove the bytes from outbound flow control, optionally notifying
// the client that they can send more bytes.
transportState().onSentBytes(numBytes);
NettyClientStream.this.getTransportTracer().reportMessageSent(numMessages);
}
}
});
} else {
// The frame is empty and will not impact outbound flow control. Just send it.
writeQueue.enqueue(new SendGrpcFrameCommand(transportState(), bytebuf, endOfStream), flush);
}
}
@Override
public void writeFrame(
WritableBuffer frame, boolean endOfStream, boolean flush, int numMessages) {
PerfMark.startTask("NettyClientStream$Sink.writeFrame");
Histogram.Timer writeFrame =
perfmarkNettyClientStreamDuration
.labels("NettyClientStream$Sink.writeFrame")
.startTimer();
try {
writeFrameInternal(frame, endOfStream, flush, numMessages);
} finally {
PerfMark.stopTask("NettyClientStream$Sink.writeFrame");
writeFrame.observeDuration();
}
}
@Override
public void cancel(Status status) {
PerfMark.startTask("NettyClientStream$Sink.cancel");
Histogram.Timer cancel =
perfmarkNettyClientStreamDuration.labels("NettyClientStream$Sink.cancel").startTimer();
try {
writeQueue.enqueue(new CancelClientStreamCommand(transportState(), status), true);
} finally {
PerfMark.stopTask("NettyClientStream$Sink.cancel");
cancel.observeDuration();
}
}
}
/** This should only called from the transport thread. */
public abstract static class TransportState extends Http2ClientStreamTransportState
implements StreamIdHolder {
private static final int NON_EXISTENT_ID = -1;
private final String methodName;
private final NettyClientHandler handler;
private final EventLoop eventLoop;
private int id;
private Http2Stream http2Stream;
private Tag tag;
protected TransportState(
NettyClientHandler handler,
EventLoop eventLoop,
int maxMessageSize,
StatsTraceContext statsTraceCtx,
TransportTracer transportTracer,
String methodName) {
super(maxMessageSize, statsTraceCtx, transportTracer);
this.methodName = checkNotNull(methodName, "methodName");
this.handler = checkNotNull(handler, "handler");
this.eventLoop = checkNotNull(eventLoop, "eventLoop");
tag = PerfMark.createTag(methodName);
}
@Override
public int id() {
// id should be positive
return id;
}
public void setId(int id) {
checkArgument(id > 0, "id must be positive %s", id);
checkState(this.id == 0, "id has been previously set: %s", this.id);
this.id = id;
this.tag = PerfMark.createTag(methodName, id);
}
/**
* Marks the stream state as if it had never existed. This can happen if the stream is cancelled
* after it is created, but before it has been started.
*/
void setNonExistent() {
checkState(this.id == 0, "Id has been previously set: %s", this.id);
this.id = NON_EXISTENT_ID;
}
boolean isNonExistent() {
return this.id == NON_EXISTENT_ID;
}
/**
* Sets the underlying Netty {@link Http2Stream} for this stream. This must be called in the
* context of the transport thread.
*/
public void setHttp2Stream(Http2Stream http2Stream) {
checkNotNull(http2Stream, "http2Stream");
checkState(this.http2Stream == null, "Can only set http2Stream once");
this.http2Stream = http2Stream;
// Now that the stream has actually been initialized, call the listener's onReady callback if
// appropriate.
onStreamAllocated();
getTransportTracer().reportLocalStreamStarted();
}
/** Gets the underlying Netty {@link Http2Stream} for this stream. */
@Nullable
public Http2Stream http2Stream() {
return http2Stream;
}
/**
* Intended to be overridden by NettyClientTransport, which has more information about failures.
* May only be called from event loop.
*/
protected abstract Status statusFromFailedFuture(ChannelFuture f);
@Override
protected void http2ProcessingFailed(Status status, boolean stopDelivery, Metadata trailers) {
transportReportStatus(status, stopDelivery, trailers);
handler.getWriteQueue().enqueue(new CancelClientStreamCommand(this, status), true);
}
@Override
public void runOnTransportThread(final Runnable r) {
if (eventLoop.inEventLoop()) {
r.run();
} else {
eventLoop.execute(r);
}
}
@Override
public void bytesRead(int processedBytes) {
handler.returnProcessedBytes(http2Stream, processedBytes);
handler.getWriteQueue().scheduleFlush();
}
@Override
public void deframeFailed(Throwable cause) {
http2ProcessingFailed(Status.fromThrowable(cause), true, new Metadata());
}
void transportHeadersReceived(Http2Headers headers, boolean endOfStream) {
if (endOfStream) {
if (!isOutboundClosed()) {
handler.getWriteQueue().enqueue(new CancelClientStreamCommand(this, null), true);
}
transportTrailersReceived(Utils.convertTrailers(headers));
} else {
transportHeadersReceived(Utils.convertHeaders(headers));
}
}
void transportDataReceived(ByteBuf frame, boolean endOfStream) {
transportDataReceived(new NettyReadableBuffer(frame.retain()), endOfStream);
}
@Override
public final Tag tag() {
return tag;
}
}
}

View File

@ -1,316 +0,0 @@
/*
* Copyright 2015 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.netty;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelPromise;
import io.perfmark.Link;
import io.perfmark.PerfMark;
import io.prometheus.client.Histogram;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang3.tuple.Pair;
import org.tikv.common.util.HistogramUtils;
/** A queue of pending writes to a {@link Channel} that is flushed as a single unit. */
class WriteQueue {
// Dequeue in chunks, so we don't have to acquire the queue's log too often.
@VisibleForTesting static final int DEQUE_CHUNK_SIZE = 128;
/** {@link Runnable} used to schedule work onto the tail of the event loop. */
private final Runnable later =
new Runnable() {
@Override
public void run() {
flush();
}
};
private final Channel channel;
private final Queue<Pair<QueuedCommand, Long>> queue;
private final AtomicBoolean scheduled = new AtomicBoolean();
public static final Histogram writeQueuePendingDuration =
HistogramUtils.buildDuration()
.name("grpc_netty_write_queue_pending_duration_ms")
.labelNames("type")
.help("Pending duration of a task in the write queue.")
.register();
public static final Histogram writeQueueWaitBatchDuration =
HistogramUtils.buildDuration()
.name("grpc_netty_write_queue_wait_batch_duration_seconds")
.help("Duration of waiting a batch filled in the write queue.")
.register();
public static final Histogram writeQueueBatchSize =
Histogram.build()
.exponentialBuckets(1, 2, 10)
.name("grpc_netty_write_queue_batch_size")
.help("Number of tasks in a batch in the write queue.")
.register();
public static final Histogram writeQueueCmdRunDuration =
HistogramUtils.buildDuration()
.name("grpc_netty_write_queue_cmd_run_duration_seconds")
.help("Duration of a task execution in the write queue.")
.labelNames("type")
.register();
public static final Histogram writeQueueChannelFlushDuration =
HistogramUtils.buildDuration()
.name("grpc_netty_write_queue_channel_flush_duration_seconds")
.help("Duration of a channel flush in the write queue.")
.labelNames("phase")
.register();
public static final Histogram writeQueueFlushDuration =
HistogramUtils.buildDuration()
.name("grpc_netty_write_queue_flush_duration_seconds")
.help("Duration of a flush of the write queue.")
.register();
public static final Histogram perfmarkWriteQueueDuration =
HistogramUtils.buildDuration()
.name("perfmark_write_queue_duration_seconds")
.help("Perfmark write queue duration seconds")
.labelNames("type")
.register();
public WriteQueue(Channel channel) {
this.channel = Preconditions.checkNotNull(channel, "channel");
queue = new ConcurrentLinkedQueue<>();
}
/** Schedule a flush on the channel. */
void scheduleFlush() {
if (scheduled.compareAndSet(false, true)) {
// Add the queue to the tail of the event loop so writes will be executed immediately
// inside the event loop. Note DO NOT do channel.write outside the event loop as
// it will not wake up immediately without a flush.
channel.eventLoop().execute(later);
}
}
/**
* Enqueue a write command on the channel.
*
* @param command a write to be executed on the channel.
* @param flush true if a flush of the write should be schedule, false if a later call to enqueue
* will schedule the flush.
*/
@CanIgnoreReturnValue
ChannelFuture enqueue(QueuedCommand command, boolean flush) {
// Detect erroneous code that tries to reuse command objects.
Preconditions.checkArgument(command.promise() == null, "promise must not be set on command");
ChannelPromise promise = channel.newPromise();
command.promise(promise);
queue.add(Pair.of(command, System.nanoTime()));
if (flush) {
scheduleFlush();
}
return promise;
}
/**
* Enqueue the runnable. It is not safe for another thread to queue an Runnable directly to the
* event loop, because it will be out-of-order with writes. This method allows the Runnable to be
* processed in-order with writes.
*/
void enqueue(Runnable runnable, boolean flush) {
Long now = System.nanoTime();
queue.add(Pair.<QueuedCommand, Long>of(new RunnableCommand(runnable), now));
if (flush) {
scheduleFlush();
}
}
/**
* Executes enqueued work directly on the current thread. This can be used to trigger writes
* before performing additional reads. Must be called from the event loop. This method makes no
* guarantee that the work queue is empty when it returns.
*/
void drainNow() {
Preconditions.checkState(channel.eventLoop().inEventLoop(), "must be on the event loop");
if (queue.peek() == null) {
return;
}
flush();
}
/**
* Process the queue of commands and dispatch them to the stream. This method is only called in
* the event loop
*/
private void flush() {
Histogram.Timer flushTimer = writeQueueFlushDuration.startTimer();
PerfMark.startTask("WriteQueue.periodicFlush");
Histogram.Timer periodicFlush =
perfmarkWriteQueueDuration.labels("WriteQueue.periodicFlush").startTimer();
long start = System.nanoTime();
try {
Pair<QueuedCommand, Long> item;
int i = 0;
boolean flushedOnce = false;
Histogram.Timer waitBatchTimer = writeQueueWaitBatchDuration.startTimer();
while ((item = queue.poll()) != null) {
QueuedCommand cmd = item.getLeft();
String cmdName = cmd.getClass().getSimpleName();
writeQueuePendingDuration
.labels(cmdName)
.observe((System.nanoTime() - item.getRight()) / 1_000_000.0);
Histogram.Timer cmdTimer = writeQueueCmdRunDuration.labels(cmdName).startTimer();
// Run the command
cmd.run(channel);
cmdTimer.observeDuration();
if (++i == DEQUE_CHUNK_SIZE) {
waitBatchTimer.observeDuration();
i = 0;
// Flush each chunk so we are releasing buffers periodically. In theory this loop
// might never end as new events are continuously added to the queue, if we never
// flushed in that case we would be guaranteed to OOM.
PerfMark.startTask("WriteQueue.flush0");
Histogram.Timer flush0 =
perfmarkWriteQueueDuration.labels("WriteQueue.flush0").startTimer();
Histogram.Timer channelFlushTimer =
writeQueueChannelFlushDuration.labels("flush0").startTimer();
try {
channel.flush();
} finally {
waitBatchTimer = writeQueueWaitBatchDuration.startTimer();
writeQueueBatchSize.observe(DEQUE_CHUNK_SIZE);
channelFlushTimer.observeDuration();
PerfMark.stopTask("WriteQueue.flush0");
flush0.observeDuration();
}
flushedOnce = true;
}
}
// Must flush at least once, even if there were no writes.
if (i != 0 || !flushedOnce) {
waitBatchTimer.observeDuration();
PerfMark.startTask("WriteQueue.flush1");
Histogram.Timer flush1 =
perfmarkWriteQueueDuration.labels("WriteQueue.flush1").startTimer();
Histogram.Timer channelFlushTimer =
writeQueueChannelFlushDuration.labels("flush1").startTimer();
try {
channel.flush();
} finally {
writeQueueBatchSize.observe(i);
channelFlushTimer.observeDuration();
PerfMark.stopTask("WriteQueue.flush1");
flush1.observeDuration();
}
}
} finally {
PerfMark.stopTask("WriteQueue.periodicFlush");
periodicFlush.observeDuration();
flushTimer.observeDuration();
// Mark the write as done, if the queue is non-empty after marking trigger a new write.
scheduled.set(false);
if (!queue.isEmpty()) {
scheduleFlush();
}
}
}
private static class RunnableCommand implements QueuedCommand {
private final Runnable runnable;
private final Link link;
public RunnableCommand(Runnable runnable) {
this.link = PerfMark.linkOut();
this.runnable = runnable;
}
@Override
public final void promise(ChannelPromise promise) {
throw new UnsupportedOperationException();
}
@Override
public final ChannelPromise promise() {
throw new UnsupportedOperationException();
}
@Override
public final void run(Channel channel) {
runnable.run();
}
@Override
public Link getLink() {
return link;
}
}
abstract static class AbstractQueuedCommand implements QueuedCommand {
private ChannelPromise promise;
private final Link link;
AbstractQueuedCommand() {
this.link = PerfMark.linkOut();
}
@Override
public final void promise(ChannelPromise promise) {
this.promise = promise;
}
@Override
public final ChannelPromise promise() {
return promise;
}
@Override
public final void run(Channel channel) {
channel.write(this, promise);
}
@Override
public Link getLink() {
return link;
}
}
/** Simple wrapper type around a command and its optional completion listener. */
interface QueuedCommand {
/** Returns the promise beeing notified of the success/failure of the write. */
ChannelPromise promise();
/** Sets the promise. */
void promise(ChannelPromise promise);
void run(Channel channel);
Link getLink();
}
}

View File

@ -1,805 +0,0 @@
/*
* Copyright 2014 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.stub;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ListenableFuture;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;
import io.grpc.Metadata;
import io.grpc.MethodDescriptor;
import io.grpc.Status;
import io.grpc.StatusException;
import io.grpc.StatusRuntimeException;
import io.prometheus.client.Histogram;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.locks.LockSupport;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
import org.tikv.common.util.HistogramUtils;
/**
* Utility functions for processing different call idioms. We have one-to-one correspondence between
* utilities in this class and the potential signatures in a generated stub class so that the
* runtime can vary behavior without requiring regeneration of the stub.
*/
public final class ClientCalls {
private static final Logger logger = Logger.getLogger(ClientCalls.class.getName());
public static final Histogram asyncUnaryRequestCallDuration =
HistogramUtils.buildDuration()
.name("grpc_client_async_unary_request_call_duration_seconds")
.help("Histogram of time spent in asyncUnaryRequestCall")
.labelNames("phase")
.register();
public static final Histogram blockingUnaryRequestWaitDuration =
HistogramUtils.buildDuration()
.name("grpc_client_blocking_unary_request_wait_duration_seconds")
.help("Histogram of time spent waiting for future in blockingUnaryCall")
.register();
// Prevent instantiation
private ClientCalls() {}
/**
* Executes a unary call with a response {@link StreamObserver}. The {@code call} should not be
* already started. After calling this method, {@code call} should no longer be used.
*
* <p>If the provided {@code responseObserver} is an instance of {@link ClientResponseObserver},
* {@code beforeStart()} will be called.
*/
public static <ReqT, RespT> void asyncUnaryCall(
ClientCall<ReqT, RespT> call, ReqT req, StreamObserver<RespT> responseObserver) {
asyncUnaryRequestCall(call, req, responseObserver, false);
}
/**
* Executes a server-streaming call with a response {@link StreamObserver}. The {@code call}
* should not be already started. After calling this method, {@code call} should no longer be
* used.
*
* <p>If the provided {@code responseObserver} is an instance of {@link ClientResponseObserver},
* {@code beforeStart()} will be called.
*/
public static <ReqT, RespT> void asyncServerStreamingCall(
ClientCall<ReqT, RespT> call, ReqT req, StreamObserver<RespT> responseObserver) {
asyncUnaryRequestCall(call, req, responseObserver, true);
}
/**
* Executes a client-streaming call returning a {@link StreamObserver} for the request messages.
* The {@code call} should not be already started. After calling this method, {@code call} should
* no longer be used.
*
* <p>If the provided {@code responseObserver} is an instance of {@link ClientResponseObserver},
* {@code beforeStart()} will be called.
*
* @return request stream observer. It will extend {@link ClientCallStreamObserver}
*/
public static <ReqT, RespT> StreamObserver<ReqT> asyncClientStreamingCall(
ClientCall<ReqT, RespT> call, StreamObserver<RespT> responseObserver) {
return asyncStreamingRequestCall(call, responseObserver, false);
}
/**
* Executes a bidirectional-streaming call. The {@code call} should not be already started. After
* calling this method, {@code call} should no longer be used.
*
* <p>If the provided {@code responseObserver} is an instance of {@link ClientResponseObserver},
* {@code beforeStart()} will be called.
*
* @return request stream observer. It will extend {@link ClientCallStreamObserver}
*/
public static <ReqT, RespT> StreamObserver<ReqT> asyncBidiStreamingCall(
ClientCall<ReqT, RespT> call, StreamObserver<RespT> responseObserver) {
return asyncStreamingRequestCall(call, responseObserver, true);
}
/**
* Executes a unary call and blocks on the response. The {@code call} should not be already
* started. After calling this method, {@code call} should no longer be used.
*
* @return the single response message.
* @throws StatusRuntimeException on error
*/
public static <ReqT, RespT> RespT blockingUnaryCall(ClientCall<ReqT, RespT> call, ReqT req) {
try {
return getUnchecked(futureUnaryCall(call, req));
} catch (RuntimeException e) {
throw cancelThrow(call, e);
} catch (Error e) {
throw cancelThrow(call, e);
}
}
/**
* Executes a unary call and blocks on the response. The {@code call} should not be already
* started. After calling this method, {@code call} should no longer be used.
*
* @return the single response message.
* @throws StatusRuntimeException on error
*/
public static <ReqT, RespT> RespT blockingUnaryCall(
Channel channel, MethodDescriptor<ReqT, RespT> method, CallOptions callOptions, ReqT req) {
ThreadlessExecutor executor = new ThreadlessExecutor();
boolean interrupt = false;
ClientCall<ReqT, RespT> call =
channel.newCall(
method,
callOptions
.withOption(ClientCalls.STUB_TYPE_OPTION, StubType.BLOCKING)
.withExecutor(executor));
Histogram.Timer waitTimer = null;
try {
ListenableFuture<RespT> responseFuture = futureUnaryCall(call, req);
waitTimer = blockingUnaryRequestWaitDuration.startTimer();
while (!responseFuture.isDone()) {
try {
executor.waitAndDrain();
} catch (InterruptedException e) {
interrupt = true;
call.cancel("Thread interrupted", e);
// Now wait for onClose() to be called, so interceptors can clean up
}
}
return getUnchecked(responseFuture);
} catch (RuntimeException e) {
// Something very bad happened. All bets are off; it may be dangerous to wait for onClose().
throw cancelThrow(call, e);
} catch (Error e) {
// Something very bad happened. All bets are off; it may be dangerous to wait for onClose().
throw cancelThrow(call, e);
} finally {
if (waitTimer != null) {
waitTimer.observeDuration();
}
if (interrupt) {
Thread.currentThread().interrupt();
}
}
}
/**
* Executes a server-streaming call returning a blocking {@link Iterator} over the response
* stream. The {@code call} should not be already started. After calling this method, {@code call}
* should no longer be used.
*
* <p>The returned iterator may throw {@link StatusRuntimeException} on error.
*
* @return an iterator over the response stream.
*/
// TODO(louiscryan): Not clear if we want to use this idiom for 'simple' stubs.
public static <ReqT, RespT> Iterator<RespT> blockingServerStreamingCall(
ClientCall<ReqT, RespT> call, ReqT req) {
BlockingResponseStream<RespT> result = new BlockingResponseStream<>(call);
asyncUnaryRequestCall(call, req, result.listener());
return result;
}
/**
* Executes a server-streaming call returning a blocking {@link Iterator} over the response
* stream. The {@code call} should not be already started. After calling this method, {@code call}
* should no longer be used.
*
* <p>The returned iterator may throw {@link StatusRuntimeException} on error.
*
* @return an iterator over the response stream.
*/
// TODO(louiscryan): Not clear if we want to use this idiom for 'simple' stubs.
public static <ReqT, RespT> Iterator<RespT> blockingServerStreamingCall(
Channel channel, MethodDescriptor<ReqT, RespT> method, CallOptions callOptions, ReqT req) {
ThreadlessExecutor executor = new ThreadlessExecutor();
ClientCall<ReqT, RespT> call =
channel.newCall(
method,
callOptions
.withOption(ClientCalls.STUB_TYPE_OPTION, StubType.BLOCKING)
.withExecutor(executor));
BlockingResponseStream<RespT> result = new BlockingResponseStream<>(call, executor);
asyncUnaryRequestCall(call, req, result.listener());
return result;
}
/**
* Executes a unary call and returns a {@link ListenableFuture} to the response. The {@code call}
* should not be already started. After calling this method, {@code call} should no longer be
* used.
*
* @return a future for the single response message.
*/
public static <ReqT, RespT> ListenableFuture<RespT> futureUnaryCall(
ClientCall<ReqT, RespT> call, ReqT req) {
GrpcFuture<RespT> responseFuture = new GrpcFuture<>(call);
asyncUnaryRequestCall(call, req, new UnaryStreamToFuture<>(responseFuture));
return responseFuture;
}
/**
* Returns the result of calling {@link Future#get()} interruptibly on a task known not to throw a
* checked exception.
*
* <p>If interrupted, the interrupt is restored before throwing an exception..
*
* @throws java.util.concurrent.CancellationException if {@code get} throws a {@code
* CancellationException}.
* @throws io.grpc.StatusRuntimeException if {@code get} throws an {@link ExecutionException} or
* an {@link InterruptedException}.
*/
private static <V> V getUnchecked(Future<V> future) {
try {
return future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Status.CANCELLED
.withDescription("Thread interrupted")
.withCause(e)
.asRuntimeException();
} catch (ExecutionException e) {
throw toStatusRuntimeException(e.getCause());
}
}
/**
* Wraps the given {@link Throwable} in a {@link StatusRuntimeException}. If it contains an
* embedded {@link StatusException} or {@link StatusRuntimeException}, the returned exception will
* contain the embedded trailers and status, with the given exception as the cause. Otherwise, an
* exception will be generated from an {@link Status#UNKNOWN} status.
*/
private static StatusRuntimeException toStatusRuntimeException(Throwable t) {
Throwable cause = checkNotNull(t, "t");
while (cause != null) {
// If we have an embedded status, use it and replace the cause
if (cause instanceof StatusException) {
StatusException se = (StatusException) cause;
return new StatusRuntimeException(se.getStatus(), se.getTrailers());
} else if (cause instanceof StatusRuntimeException) {
StatusRuntimeException se = (StatusRuntimeException) cause;
return new StatusRuntimeException(se.getStatus(), se.getTrailers());
}
cause = cause.getCause();
}
return Status.UNKNOWN.withDescription("unexpected exception").withCause(t).asRuntimeException();
}
/**
* Cancels a call, and throws the exception.
*
* @param t must be a RuntimeException or Error
*/
private static RuntimeException cancelThrow(ClientCall<?, ?> call, Throwable t) {
try {
call.cancel(null, t);
} catch (Throwable e) {
assert e instanceof RuntimeException || e instanceof Error;
logger.log(Level.SEVERE, "RuntimeException encountered while closing call", e);
}
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else if (t instanceof Error) {
throw (Error) t;
}
// should be impossible
throw new AssertionError(t);
}
private static <ReqT, RespT> void asyncUnaryRequestCall(
ClientCall<ReqT, RespT> call,
ReqT req,
StreamObserver<RespT> responseObserver,
boolean streamingResponse) {
asyncUnaryRequestCall(
call,
req,
new StreamObserverToCallListenerAdapter<>(
responseObserver, new CallToStreamObserverAdapter<>(call, streamingResponse)));
}
private static <ReqT, RespT> void asyncUnaryRequestCall(
ClientCall<ReqT, RespT> call, ReqT req, StartableListener<RespT> responseListener) {
Histogram.Timer startCallTimer =
asyncUnaryRequestCallDuration.labels("start_call").startTimer();
startCall(call, responseListener);
startCallTimer.observeDuration();
try {
Histogram.Timer sendMessageTimer =
asyncUnaryRequestCallDuration.labels("send_message").startTimer();
call.sendMessage(req);
sendMessageTimer.observeDuration();
Histogram.Timer halfCloseTimer =
asyncUnaryRequestCallDuration.labels("half_close").startTimer();
call.halfClose();
halfCloseTimer.observeDuration();
} catch (RuntimeException e) {
throw cancelThrow(call, e);
} catch (Error e) {
throw cancelThrow(call, e);
}
}
private static <ReqT, RespT> StreamObserver<ReqT> asyncStreamingRequestCall(
ClientCall<ReqT, RespT> call,
StreamObserver<RespT> responseObserver,
boolean streamingResponse) {
CallToStreamObserverAdapter<ReqT> adapter =
new CallToStreamObserverAdapter<>(call, streamingResponse);
startCall(call, new StreamObserverToCallListenerAdapter<>(responseObserver, adapter));
return adapter;
}
private static <ReqT, RespT> void startCall(
ClientCall<ReqT, RespT> call, StartableListener<RespT> responseListener) {
call.start(responseListener, new Metadata());
responseListener.onStart();
}
private abstract static class StartableListener<T> extends ClientCall.Listener<T> {
abstract void onStart();
}
private static final class CallToStreamObserverAdapter<T> extends ClientCallStreamObserver<T> {
private boolean frozen;
private final ClientCall<T, ?> call;
private final boolean streamingResponse;
private Runnable onReadyHandler;
private int initialRequest = 1;
private boolean autoRequestEnabled = true;
private boolean aborted = false;
private boolean completed = false;
// Non private to avoid synthetic class
CallToStreamObserverAdapter(ClientCall<T, ?> call, boolean streamingResponse) {
this.call = call;
this.streamingResponse = streamingResponse;
}
private void freeze() {
this.frozen = true;
}
@Override
public void onNext(T value) {
checkState(!aborted, "Stream was terminated by error, no further calls are allowed");
checkState(!completed, "Stream is already completed, no further calls are allowed");
call.sendMessage(value);
}
@Override
public void onError(Throwable t) {
call.cancel("Cancelled by client with StreamObserver.onError()", t);
aborted = true;
}
@Override
public void onCompleted() {
call.halfClose();
completed = true;
}
@Override
public boolean isReady() {
return call.isReady();
}
@Override
public void setOnReadyHandler(Runnable onReadyHandler) {
if (frozen) {
throw new IllegalStateException(
"Cannot alter onReadyHandler after call started. Use ClientResponseObserver");
}
this.onReadyHandler = onReadyHandler;
}
@Deprecated
@Override
public void disableAutoInboundFlowControl() {
disableAutoRequestWithInitial(1);
}
@Override
public void disableAutoRequestWithInitial(int request) {
if (frozen) {
throw new IllegalStateException(
"Cannot disable auto flow control after call started. Use ClientResponseObserver");
}
Preconditions.checkArgument(request >= 0, "Initial requests must be non-negative");
initialRequest = request;
autoRequestEnabled = false;
}
@Override
public void request(int count) {
if (!streamingResponse && count == 1) {
// Initially ask for two responses from flow-control so that if a misbehaving server
// sends more than one responses, we can catch it and fail it in the listener.
call.request(2);
} else {
call.request(count);
}
}
@Override
public void setMessageCompression(boolean enable) {
call.setMessageCompression(enable);
}
@Override
public void cancel(@Nullable String message, @Nullable Throwable cause) {
call.cancel(message, cause);
}
}
private static final class StreamObserverToCallListenerAdapter<ReqT, RespT>
extends StartableListener<RespT> {
private final StreamObserver<RespT> observer;
private final CallToStreamObserverAdapter<ReqT> adapter;
private boolean firstResponseReceived;
// Non private to avoid synthetic class
StreamObserverToCallListenerAdapter(
StreamObserver<RespT> observer, CallToStreamObserverAdapter<ReqT> adapter) {
this.observer = observer;
this.adapter = adapter;
if (observer instanceof ClientResponseObserver) {
@SuppressWarnings("unchecked")
ClientResponseObserver<ReqT, RespT> clientResponseObserver =
(ClientResponseObserver<ReqT, RespT>) observer;
clientResponseObserver.beforeStart(adapter);
}
adapter.freeze();
}
@Override
public void onHeaders(Metadata headers) {}
@Override
public void onMessage(RespT message) {
if (firstResponseReceived && !adapter.streamingResponse) {
throw Status.INTERNAL
.withDescription("More than one responses received for unary or client-streaming call")
.asRuntimeException();
}
firstResponseReceived = true;
observer.onNext(message);
if (adapter.streamingResponse && adapter.autoRequestEnabled) {
// Request delivery of the next inbound message.
adapter.request(1);
}
}
@Override
public void onClose(Status status, Metadata trailers) {
if (status.isOk()) {
observer.onCompleted();
} else {
observer.onError(status.asRuntimeException(trailers));
}
}
@Override
public void onReady() {
if (adapter.onReadyHandler != null) {
adapter.onReadyHandler.run();
}
}
@Override
void onStart() {
if (adapter.initialRequest > 0) {
adapter.request(adapter.initialRequest);
}
}
}
/** Completes a {@link GrpcFuture} using {@link StreamObserver} events. */
private static final class UnaryStreamToFuture<RespT> extends StartableListener<RespT> {
private final GrpcFuture<RespT> responseFuture;
private RespT value;
// Non private to avoid synthetic class
UnaryStreamToFuture(GrpcFuture<RespT> responseFuture) {
this.responseFuture = responseFuture;
}
@Override
public void onHeaders(Metadata headers) {}
@Override
public void onMessage(RespT value) {
if (this.value != null) {
throw Status.INTERNAL
.withDescription("More than one value received for unary call")
.asRuntimeException();
}
this.value = value;
}
@Override
public void onClose(Status status, Metadata trailers) {
if (status.isOk()) {
if (value == null) {
// No value received so mark the future as an error
responseFuture.setException(
Status.INTERNAL
.withDescription("No value received for unary call")
.asRuntimeException(trailers));
}
responseFuture.set(value);
} else {
responseFuture.setException(status.asRuntimeException(trailers));
}
}
@Override
void onStart() {
responseFuture.call.request(2);
}
}
private static final class GrpcFuture<RespT> extends AbstractFuture<RespT> {
private final ClientCall<?, RespT> call;
// Non private to avoid synthetic class
GrpcFuture(ClientCall<?, RespT> call) {
this.call = call;
}
@Override
protected void interruptTask() {
call.cancel("GrpcFuture was cancelled", null);
}
@Override
protected boolean set(@Nullable RespT resp) {
return super.set(resp);
}
@Override
protected boolean setException(Throwable throwable) {
return super.setException(throwable);
}
@SuppressWarnings("MissingOverride") // Add @Override once Java 6 support is dropped
protected String pendingToString() {
return MoreObjects.toStringHelper(this).add("clientCall", call).toString();
}
}
/**
* Convert events on a {@link io.grpc.ClientCall.Listener} into a blocking {@link Iterator}.
*
* <p>The class is not thread-safe, but it does permit {@link ClientCall.Listener} calls in a
* separate thread from {@link Iterator} calls.
*/
// TODO(ejona86): determine how to allow ClientCall.cancel() in case of application error.
private static final class BlockingResponseStream<T> implements Iterator<T> {
// Due to flow control, only needs to hold up to 3 items: 2 for value, 1 for close.
// (2 for value, not 1, because of early request() in next())
private final BlockingQueue<Object> buffer = new ArrayBlockingQueue<>(3);
private final StartableListener<T> listener = new QueuingListener();
private final ClientCall<?, T> call;
/** May be null. */
private final ThreadlessExecutor threadless;
// Only accessed when iterating.
private Object last;
// Non private to avoid synthetic class
BlockingResponseStream(ClientCall<?, T> call) {
this(call, null);
}
// Non private to avoid synthetic class
BlockingResponseStream(ClientCall<?, T> call, ThreadlessExecutor threadless) {
this.call = call;
this.threadless = threadless;
}
StartableListener<T> listener() {
return listener;
}
private Object waitForNext() {
boolean interrupt = false;
try {
if (threadless == null) {
while (true) {
try {
return buffer.take();
} catch (InterruptedException ie) {
interrupt = true;
call.cancel("Thread interrupted", ie);
// Now wait for onClose() to be called, to guarantee BlockingQueue doesn't fill
}
}
} else {
Object next;
while ((next = buffer.poll()) == null) {
try {
threadless.waitAndDrain();
} catch (InterruptedException ie) {
interrupt = true;
call.cancel("Thread interrupted", ie);
// Now wait for onClose() to be called, so interceptors can clean up
}
}
return next;
}
} finally {
if (interrupt) {
Thread.currentThread().interrupt();
}
}
}
@Override
public boolean hasNext() {
while (last == null) {
// Will block here indefinitely waiting for content. RPC timeouts defend against permanent
// hangs here as the call will become closed.
last = waitForNext();
}
if (last instanceof StatusRuntimeException) {
// Rethrow the exception with a new stacktrace.
StatusRuntimeException e = (StatusRuntimeException) last;
throw e.getStatus().asRuntimeException(e.getTrailers());
}
return last != this;
}
@Override
public T next() {
// Eagerly call request(1) so it can be processing the next message while we wait for the
// current one, which reduces latency for the next message. With MigratingThreadDeframer and
// if the data has already been recieved, every other message can be delivered instantly. This
// can be run after hasNext(), but just would be slower.
if (!(last instanceof StatusRuntimeException) && last != this) {
call.request(1);
}
if (!hasNext()) {
throw new NoSuchElementException();
}
@SuppressWarnings("unchecked")
T tmp = (T) last;
last = null;
return tmp;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private final class QueuingListener extends StartableListener<T> {
// Non private to avoid synthetic class
QueuingListener() {}
private boolean done = false;
@Override
public void onHeaders(Metadata headers) {}
@Override
public void onMessage(T value) {
Preconditions.checkState(!done, "ClientCall already closed");
buffer.add(value);
}
@Override
public void onClose(Status status, Metadata trailers) {
Preconditions.checkState(!done, "ClientCall already closed");
if (status.isOk()) {
buffer.add(BlockingResponseStream.this);
} else {
buffer.add(status.asRuntimeException(trailers));
}
done = true;
}
@Override
void onStart() {
call.request(1);
}
}
}
@SuppressWarnings("serial")
private static final class ThreadlessExecutor extends ConcurrentLinkedQueue<Runnable>
implements Executor {
private static final Logger log = Logger.getLogger(ThreadlessExecutor.class.getName());
private volatile Thread waiter;
private static final Histogram lockDuration =
HistogramUtils.buildDuration()
.name("grpc_client_executor_lock_duration_seconds")
.help("Histogram of time spent in ThreadlessExecutor lock")
.labelNames("phase")
.register();
// Non private to avoid synthetic class
ThreadlessExecutor() {}
/**
* Waits until there is a Runnable, then executes it and all queued Runnables after it. Must
* only be called by one thread at a time.
*/
public void waitAndDrain() throws InterruptedException {
throwIfInterrupted();
Runnable runnable = poll();
if (runnable == null) {
waiter = Thread.currentThread();
try {
Histogram.Timer parkTimer = lockDuration.labels("park").startTimer();
while ((runnable = poll()) == null) {
LockSupport.park(this);
throwIfInterrupted();
}
parkTimer.observeDuration();
} finally {
waiter = null;
}
}
do {
try {
runnable.run();
} catch (Throwable t) {
log.log(Level.WARNING, "Runnable threw exception", t);
}
} while ((runnable = poll()) != null);
}
private static void throwIfInterrupted() throws InterruptedException {
if (Thread.interrupted()) {
throw new InterruptedException();
}
}
@Override
public void execute(Runnable runnable) {
add(runnable);
Histogram.Timer unparkTimer = lockDuration.labels("unpark").startTimer();
LockSupport.unpark(waiter); // no-op if null
unparkTimer.observeDuration();
}
}
enum StubType {
BLOCKING,
FUTURE,
ASYNC
}
/** Internal {@link CallOptions.Key} to indicate stub types. */
static final CallOptions.Key<StubType> STUB_TYPE_OPTION =
CallOptions.Key.create("internal-stub-type");
}

View File

@ -1,752 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import static io.netty.buffer.PoolChunk.isSubpage;
import static java.lang.Math.max;
import io.netty.util.internal.LongCounter;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
import io.prometheus.client.Counter;
import io.prometheus.client.Histogram;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
abstract class PoolArena<T> extends SizeClasses implements PoolArenaMetric {
public static final Counter poolArenaAllocations =
Counter.build()
.name("netty_buffer_pool_arena_allocations")
.help("Number of times a pool arena was allocated")
.labelNames("type")
.register();
public static final Histogram poolArenaAllocationsDuration =
Histogram.build()
.name("netty_buffer_pool_arena_allocations_duration_seconds")
.help("Duration of a pool arena allocation")
.labelNames("type")
.register();
static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe();
enum SizeClass {
Small,
Normal
}
final PooledByteBufAllocator parent;
final int numSmallSubpagePools;
final int directMemoryCacheAlignment;
private final PoolSubpage<T>[] smallSubpagePools;
private final PoolChunkList<T> q050;
private final PoolChunkList<T> q025;
private final PoolChunkList<T> q000;
private final PoolChunkList<T> qInit;
private final PoolChunkList<T> q075;
private final PoolChunkList<T> q100;
private final List<PoolChunkListMetric> chunkListMetrics;
// Metrics for allocations and deallocations
private long allocationsNormal;
// We need to use the LongCounter here as this is not guarded via synchronized block.
private final LongCounter allocationsSmall = PlatformDependent.newLongCounter();
private final LongCounter allocationsHuge = PlatformDependent.newLongCounter();
private final LongCounter activeBytesHuge = PlatformDependent.newLongCounter();
private long deallocationsSmall;
private long deallocationsNormal;
// We need to use the LongCounter here as this is not guarded via synchronized block.
private final LongCounter deallocationsHuge = PlatformDependent.newLongCounter();
// Number of thread caches backed by this arena.
final AtomicInteger numThreadCaches = new AtomicInteger();
// TODO: Test if adding padding helps under contention
// private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
protected PoolArena(
PooledByteBufAllocator parent,
int pageSize,
int pageShifts,
int chunkSize,
int cacheAlignment) {
super(pageSize, pageShifts, chunkSize, cacheAlignment);
this.parent = parent;
directMemoryCacheAlignment = cacheAlignment;
numSmallSubpagePools = nSubpages;
smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools);
for (int i = 0; i < smallSubpagePools.length; i++) {
smallSubpagePools[i] = newSubpagePoolHead();
}
q100 = new PoolChunkList<T>(this, null, 100, Integer.MAX_VALUE, chunkSize);
q075 = new PoolChunkList<T>(this, q100, 75, 100, chunkSize);
q050 = new PoolChunkList<T>(this, q075, 50, 100, chunkSize);
q025 = new PoolChunkList<T>(this, q050, 25, 75, chunkSize);
q000 = new PoolChunkList<T>(this, q025, 1, 50, chunkSize);
qInit = new PoolChunkList<T>(this, q000, Integer.MIN_VALUE, 25, chunkSize);
q100.prevList(q075);
q075.prevList(q050);
q050.prevList(q025);
q025.prevList(q000);
q000.prevList(null);
qInit.prevList(qInit);
List<PoolChunkListMetric> metrics = new ArrayList<PoolChunkListMetric>(6);
metrics.add(qInit);
metrics.add(q000);
metrics.add(q025);
metrics.add(q050);
metrics.add(q075);
metrics.add(q100);
chunkListMetrics = Collections.unmodifiableList(metrics);
}
private PoolSubpage<T> newSubpagePoolHead() {
PoolSubpage<T> head = new PoolSubpage<T>();
head.prev = head;
head.next = head;
return head;
}
@SuppressWarnings("unchecked")
private PoolSubpage<T>[] newSubpagePoolArray(int size) {
return new PoolSubpage[size];
}
abstract boolean isDirect();
PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) {
PooledByteBuf<T> buf = newByteBuf(maxCapacity);
allocate(cache, buf, reqCapacity);
return buf;
}
private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
final int sizeIdx = size2SizeIdx(reqCapacity);
if (sizeIdx <= smallMaxSizeIdx) {
Histogram.Timer smallAllocationTimer =
poolArenaAllocationsDuration.labels("small").startTimer();
tcacheAllocateSmall(cache, buf, reqCapacity, sizeIdx);
smallAllocationTimer.observeDuration();
poolArenaAllocations.labels("small").inc();
} else if (sizeIdx < nSizes) {
Histogram.Timer normalAllocationTimer =
poolArenaAllocationsDuration.labels("normal").startTimer();
tcacheAllocateNormal(cache, buf, reqCapacity, sizeIdx);
normalAllocationTimer.observeDuration();
poolArenaAllocations.labels("normal").inc();
} else {
Histogram.Timer hugeAllocationTimer =
poolArenaAllocationsDuration.labels("huge").startTimer();
int normCapacity = directMemoryCacheAlignment > 0 ? normalizeSize(reqCapacity) : reqCapacity;
// Huge allocations are never served via the cache so just call allocateHuge
allocateHuge(buf, normCapacity);
hugeAllocationTimer.observeDuration();
poolArenaAllocations.labels("huge").inc();
}
}
private void tcacheAllocateSmall(
PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity, final int sizeIdx) {
if (cache.allocateSmall(this, buf, reqCapacity, sizeIdx)) {
// was able to allocate out of the cache so move on
return;
}
/*
* Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
* {@link PoolChunk#free(long)} may modify the doubly linked list as well.
*/
final PoolSubpage<T> head = smallSubpagePools[sizeIdx];
final boolean needsNormalAllocation;
synchronized (head) {
final PoolSubpage<T> s = head.next;
needsNormalAllocation = s == head;
if (!needsNormalAllocation) {
assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx)
: "doNotDestroy="
+ s.doNotDestroy
+ ", elemSize="
+ s.elemSize
+ ", sizeIdx="
+ sizeIdx;
long handle = s.allocate();
assert handle >= 0;
s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
}
}
if (needsNormalAllocation) {
synchronized (this) {
allocateNormal(buf, reqCapacity, sizeIdx, cache);
}
}
incSmallAllocation();
}
private void tcacheAllocateNormal(
PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity, final int sizeIdx) {
if (cache.allocateNormal(this, buf, reqCapacity, sizeIdx)) {
// was able to allocate out of the cache so move on
return;
}
synchronized (this) {
allocateNormal(buf, reqCapacity, sizeIdx, cache);
++allocationsNormal;
}
}
// Method must be called inside synchronized(this) { ... } block
private void allocateNormal(
PooledByteBuf<T> buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) {
if (q050.allocate(buf, reqCapacity, sizeIdx, threadCache)
|| q025.allocate(buf, reqCapacity, sizeIdx, threadCache)
|| q000.allocate(buf, reqCapacity, sizeIdx, threadCache)
|| qInit.allocate(buf, reqCapacity, sizeIdx, threadCache)
|| q075.allocate(buf, reqCapacity, sizeIdx, threadCache)) {
return;
}
// Add a new chunk.
PoolChunk<T> c = newChunk(pageSize, nPSizes, pageShifts, chunkSize);
boolean success = c.allocate(buf, reqCapacity, sizeIdx, threadCache);
assert success;
qInit.add(c);
}
private void incSmallAllocation() {
allocationsSmall.increment();
}
private void allocateHuge(PooledByteBuf<T> buf, int reqCapacity) {
PoolChunk<T> chunk = newUnpooledChunk(reqCapacity);
activeBytesHuge.add(chunk.chunkSize());
buf.initUnpooled(chunk, reqCapacity);
allocationsHuge.increment();
}
void free(
PoolChunk<T> chunk,
ByteBuffer nioBuffer,
long handle,
int normCapacity,
PoolThreadCache cache) {
if (chunk.unpooled) {
int size = chunk.chunkSize();
destroyChunk(chunk);
activeBytesHuge.add(-size);
deallocationsHuge.increment();
} else {
SizeClass sizeClass = sizeClass(handle);
if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) {
// cached so not free it.
return;
}
freeChunk(chunk, handle, normCapacity, sizeClass, nioBuffer, false);
}
}
private static SizeClass sizeClass(long handle) {
return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
}
void freeChunk(
PoolChunk<T> chunk,
long handle,
int normCapacity,
SizeClass sizeClass,
ByteBuffer nioBuffer,
boolean finalizer) {
final boolean destroyChunk;
synchronized (this) {
// We only call this if freeChunk is not called because of the PoolThreadCache finalizer as
// otherwise this
// may fail due lazy class-loading in for example tomcat.
if (!finalizer) {
switch (sizeClass) {
case Normal:
++deallocationsNormal;
break;
case Small:
++deallocationsSmall;
break;
default:
throw new Error();
}
}
destroyChunk = !chunk.parent.free(chunk, handle, normCapacity, nioBuffer);
}
if (destroyChunk) {
// destroyChunk not need to be called while holding the synchronized lock.
destroyChunk(chunk);
}
}
PoolSubpage<T> findSubpagePoolHead(int sizeIdx) {
return smallSubpagePools[sizeIdx];
}
void reallocate(PooledByteBuf<T> buf, int newCapacity, boolean freeOldMemory) {
assert newCapacity >= 0 && newCapacity <= buf.maxCapacity();
int oldCapacity = buf.length;
if (oldCapacity == newCapacity) {
return;
}
PoolChunk<T> oldChunk = buf.chunk;
ByteBuffer oldNioBuffer = buf.tmpNioBuf;
long oldHandle = buf.handle;
T oldMemory = buf.memory;
int oldOffset = buf.offset;
int oldMaxLength = buf.maxLength;
// This does not touch buf's reader/writer indices
allocate(parent.threadCache(), buf, newCapacity);
int bytesToCopy;
if (newCapacity > oldCapacity) {
bytesToCopy = oldCapacity;
} else {
buf.trimIndicesToCapacity(newCapacity);
bytesToCopy = newCapacity;
}
memoryCopy(oldMemory, oldOffset, buf, bytesToCopy);
if (freeOldMemory) {
free(oldChunk, oldNioBuffer, oldHandle, oldMaxLength, buf.cache);
}
}
@Override
public int numThreadCaches() {
return numThreadCaches.get();
}
@Override
public int numTinySubpages() {
return 0;
}
@Override
public int numSmallSubpages() {
return smallSubpagePools.length;
}
@Override
public int numChunkLists() {
return chunkListMetrics.size();
}
@Override
public List<PoolSubpageMetric> tinySubpages() {
return Collections.emptyList();
}
@Override
public List<PoolSubpageMetric> smallSubpages() {
return subPageMetricList(smallSubpagePools);
}
@Override
public List<PoolChunkListMetric> chunkLists() {
return chunkListMetrics;
}
private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage<?>[] pages) {
List<PoolSubpageMetric> metrics = new ArrayList<PoolSubpageMetric>();
for (PoolSubpage<?> head : pages) {
if (head.next == head) {
continue;
}
PoolSubpage<?> s = head.next;
for (; ; ) {
metrics.add(s);
s = s.next;
if (s == head) {
break;
}
}
}
return metrics;
}
@Override
public long numAllocations() {
final long allocsNormal;
synchronized (this) {
allocsNormal = allocationsNormal;
}
return allocationsSmall.value() + allocsNormal + allocationsHuge.value();
}
@Override
public long numTinyAllocations() {
return 0;
}
@Override
public long numSmallAllocations() {
return allocationsSmall.value();
}
@Override
public synchronized long numNormalAllocations() {
return allocationsNormal;
}
@Override
public long numDeallocations() {
final long deallocs;
synchronized (this) {
deallocs = deallocationsSmall + deallocationsNormal;
}
return deallocs + deallocationsHuge.value();
}
@Override
public long numTinyDeallocations() {
return 0;
}
@Override
public synchronized long numSmallDeallocations() {
return deallocationsSmall;
}
@Override
public synchronized long numNormalDeallocations() {
return deallocationsNormal;
}
@Override
public long numHugeAllocations() {
return allocationsHuge.value();
}
@Override
public long numHugeDeallocations() {
return deallocationsHuge.value();
}
@Override
public long numActiveAllocations() {
long val = allocationsSmall.value() + allocationsHuge.value() - deallocationsHuge.value();
synchronized (this) {
val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
}
return max(val, 0);
}
@Override
public long numActiveTinyAllocations() {
return 0;
}
@Override
public long numActiveSmallAllocations() {
return max(numSmallAllocations() - numSmallDeallocations(), 0);
}
@Override
public long numActiveNormalAllocations() {
final long val;
synchronized (this) {
val = allocationsNormal - deallocationsNormal;
}
return max(val, 0);
}
@Override
public long numActiveHugeAllocations() {
return max(numHugeAllocations() - numHugeDeallocations(), 0);
}
@Override
public long numActiveBytes() {
long val = activeBytesHuge.value();
synchronized (this) {
for (int i = 0; i < chunkListMetrics.size(); i++) {
for (PoolChunkMetric m : chunkListMetrics.get(i)) {
val += m.chunkSize();
}
}
}
return max(0, val);
}
/**
* Return the number of bytes that are currently pinned to buffer instances, by the arena. The
* pinned memory is not accessible for use by any other allocation, until the buffers using have
* all been released.
*/
public long numPinnedBytes() {
long val =
activeBytesHuge
.value(); // Huge chunks are exact-sized for the buffers they were allocated to.
synchronized (this) {
for (int i = 0; i < chunkListMetrics.size(); i++) {
for (PoolChunkMetric m : chunkListMetrics.get(i)) {
val += ((PoolChunk<?>) m).pinnedBytes();
}
}
}
return max(0, val);
}
protected abstract PoolChunk<T> newChunk(
int pageSize, int maxPageIdx, int pageShifts, int chunkSize);
protected abstract PoolChunk<T> newUnpooledChunk(int capacity);
protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity);
protected abstract void memoryCopy(T src, int srcOffset, PooledByteBuf<T> dst, int length);
protected abstract void destroyChunk(PoolChunk<T> chunk);
@Override
public synchronized String toString() {
StringBuilder buf =
new StringBuilder()
.append("Chunk(s) at 0~25%:")
.append(StringUtil.NEWLINE)
.append(qInit)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 0~50%:")
.append(StringUtil.NEWLINE)
.append(q000)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 25~75%:")
.append(StringUtil.NEWLINE)
.append(q025)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 50~100%:")
.append(StringUtil.NEWLINE)
.append(q050)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 75~100%:")
.append(StringUtil.NEWLINE)
.append(q075)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 100%:")
.append(StringUtil.NEWLINE)
.append(q100)
.append(StringUtil.NEWLINE)
.append("small subpages:");
appendPoolSubPages(buf, smallSubpagePools);
buf.append(StringUtil.NEWLINE);
return buf.toString();
}
private static void appendPoolSubPages(StringBuilder buf, PoolSubpage<?>[] subpages) {
for (int i = 0; i < subpages.length; i++) {
PoolSubpage<?> head = subpages[i];
if (head.next == head) {
continue;
}
buf.append(StringUtil.NEWLINE).append(i).append(": ");
PoolSubpage<?> s = head.next;
for (; ; ) {
buf.append(s);
s = s.next;
if (s == head) {
break;
}
}
}
}
@Override
protected final void finalize() throws Throwable {
try {
super.finalize();
} finally {
destroyPoolSubPages(smallSubpagePools);
destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100);
}
}
private static void destroyPoolSubPages(PoolSubpage<?>[] pages) {
for (PoolSubpage<?> page : pages) {
page.destroy();
}
}
private void destroyPoolChunkLists(PoolChunkList<T>... chunkLists) {
for (PoolChunkList<T> chunkList : chunkLists) {
chunkList.destroy(this);
}
}
static final class HeapArena extends PoolArena<byte[]> {
HeapArena(PooledByteBufAllocator parent, int pageSize, int pageShifts, int chunkSize) {
super(parent, pageSize, pageShifts, chunkSize, 0);
}
private static byte[] newByteArray(int size) {
return PlatformDependent.allocateUninitializedArray(size);
}
@Override
boolean isDirect() {
return false;
}
@Override
protected PoolChunk<byte[]> newChunk(
int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
return new PoolChunk<byte[]>(
this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx);
}
@Override
protected PoolChunk<byte[]> newUnpooledChunk(int capacity) {
return new PoolChunk<byte[]>(this, null, newByteArray(capacity), capacity);
}
@Override
protected void destroyChunk(PoolChunk<byte[]> chunk) {
// Rely on GC.
}
@Override
protected PooledByteBuf<byte[]> newByteBuf(int maxCapacity) {
return HAS_UNSAFE
? PooledUnsafeHeapByteBuf.newUnsafeInstance(maxCapacity)
: PooledHeapByteBuf.newInstance(maxCapacity);
}
@Override
protected void memoryCopy(byte[] src, int srcOffset, PooledByteBuf<byte[]> dst, int length) {
if (length == 0) {
return;
}
System.arraycopy(src, srcOffset, dst.memory, dst.offset, length);
}
}
static final class DirectArena extends PoolArena<ByteBuffer> {
DirectArena(
PooledByteBufAllocator parent,
int pageSize,
int pageShifts,
int chunkSize,
int directMemoryCacheAlignment) {
super(parent, pageSize, pageShifts, chunkSize, directMemoryCacheAlignment);
}
@Override
boolean isDirect() {
return true;
}
@Override
protected PoolChunk<ByteBuffer> newChunk(
int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
if (directMemoryCacheAlignment == 0) {
ByteBuffer memory = allocateDirect(chunkSize);
return new PoolChunk<ByteBuffer>(
this, memory, memory, pageSize, pageShifts, chunkSize, maxPageIdx);
}
final ByteBuffer base = allocateDirect(chunkSize + directMemoryCacheAlignment);
final ByteBuffer memory =
PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment);
return new PoolChunk<ByteBuffer>(
this, base, memory, pageSize, pageShifts, chunkSize, maxPageIdx);
}
@Override
protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) {
if (directMemoryCacheAlignment == 0) {
ByteBuffer memory = allocateDirect(capacity);
return new PoolChunk<ByteBuffer>(this, memory, memory, capacity);
}
final ByteBuffer base = allocateDirect(capacity + directMemoryCacheAlignment);
final ByteBuffer memory =
PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment);
return new PoolChunk<ByteBuffer>(this, base, memory, capacity);
}
private static ByteBuffer allocateDirect(int capacity) {
return PlatformDependent.useDirectBufferNoCleaner()
? PlatformDependent.allocateDirectNoCleaner(capacity)
: ByteBuffer.allocateDirect(capacity);
}
@Override
protected void destroyChunk(PoolChunk<ByteBuffer> chunk) {
if (PlatformDependent.useDirectBufferNoCleaner()) {
PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base);
} else {
PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base);
}
}
@Override
protected PooledByteBuf<ByteBuffer> newByteBuf(int maxCapacity) {
if (HAS_UNSAFE) {
return PooledUnsafeDirectByteBuf.newInstance(maxCapacity);
} else {
return PooledDirectByteBuf.newInstance(maxCapacity);
}
}
@Override
protected void memoryCopy(
ByteBuffer src, int srcOffset, PooledByteBuf<ByteBuffer> dstBuf, int length) {
if (length == 0) {
return;
}
if (HAS_UNSAFE) {
PlatformDependent.copyMemory(
PlatformDependent.directBufferAddress(src) + srcOffset,
PlatformDependent.directBufferAddress(dstBuf.memory) + dstBuf.offset,
length);
} else {
// We must duplicate the NIO buffers because they may be accessed by other Netty buffers.
src = src.duplicate();
ByteBuffer dst = dstBuf.internalNioBuffer();
src.position(srcOffset).limit(srcOffset + length);
dst.position(dstBuf.offset);
dst.put(src);
}
}
}
}

View File

@ -1,602 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel.socket.nio;
import static io.netty.channel.internal.ChannelUtils.MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelException;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelOutboundBuffer;
import io.netty.channel.ChannelPromise;
import io.netty.channel.EventLoop;
import io.netty.channel.FileRegion;
import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.AbstractNioByteChannel;
import io.netty.channel.socket.DefaultSocketChannelConfig;
import io.netty.channel.socket.ServerSocketChannel;
import io.netty.channel.socket.SocketChannelConfig;
import io.netty.util.concurrent.GlobalEventExecutor;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.SocketUtils;
import io.netty.util.internal.SuppressJava6Requirement;
import io.netty.util.internal.UnstableApi;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import io.prometheus.client.Histogram;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.SocketChannel;
import java.nio.channels.spi.SelectorProvider;
import java.util.Map;
import java.util.concurrent.Executor;
import org.tikv.common.util.HistogramUtils;
/** {@link io.netty.channel.socket.SocketChannel} which uses NIO selector based implementation. */
public class NioSocketChannel extends AbstractNioByteChannel
implements io.netty.channel.socket.SocketChannel {
public static final Histogram socketWriteDuration =
HistogramUtils.buildDuration()
.name("netty_nio_socket_channel_write_duration_seconds")
.help("Time taken to write data to socket")
.register();
public static final Histogram socketWriteBytes =
HistogramUtils.buildBytes()
.name("netty_nio_socket_channel_write_bytes")
.help("number of bytes for each write call")
.register();
public static final Histogram socketWrittenBytes =
HistogramUtils.buildBytes()
.name("netty_nio_socket_channel_written_bytes")
.help("number of bytes actually written for each write call")
.register();
public static final Histogram socketWriteLeftBytes =
HistogramUtils.buildBytes()
.name("netty_nio_socket_channel_write_left_bytes")
.help("number of bytes not written for each write call")
.register();
public static final Histogram socketReadDuration =
HistogramUtils.buildDuration()
.name("netty_nio_socket_channel_read_duration_seconds")
.help("Time taken to read data to socket")
.register();
public static final Histogram socketReadBytes =
HistogramUtils.buildBytes()
.name("netty_nio_socket_channel_read_bytes")
.help("number of bytes for each read call")
.register();
public static final Histogram socketReadLeftBytes =
HistogramUtils.buildBytes()
.name("netty_nio_socket_channel_read_left_bytes")
.help("number of bytes not read for each read call")
.register();
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioSocketChannel.class);
private static final SelectorProvider DEFAULT_SELECTOR_PROVIDER = SelectorProvider.provider();
private static SocketChannel newSocket(SelectorProvider provider) {
try {
/**
* Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in
* {@link SelectorProvider#provider()} which is called by each SocketChannel.open() otherwise.
*
* <p>See <a href="https://github.com/netty/netty/issues/2308">#2308</a>.
*/
return provider.openSocketChannel();
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
}
private final SocketChannelConfig config;
/** Create a new instance */
public NioSocketChannel() {
this(DEFAULT_SELECTOR_PROVIDER);
}
/** Create a new instance using the given {@link SelectorProvider}. */
public NioSocketChannel(SelectorProvider provider) {
this(newSocket(provider));
}
/** Create a new instance using the given {@link SocketChannel}. */
public NioSocketChannel(SocketChannel socket) {
this(null, socket);
}
/**
* Create a new instance
*
* @param parent the {@link Channel} which created this instance or {@code null} if it was created
* by the user
* @param socket the {@link SocketChannel} which will be used
*/
public NioSocketChannel(Channel parent, SocketChannel socket) {
super(parent, socket);
config = new NioSocketChannelConfig(this, socket.socket());
}
@Override
public ServerSocketChannel parent() {
return (ServerSocketChannel) super.parent();
}
@Override
public SocketChannelConfig config() {
return config;
}
@Override
protected SocketChannel javaChannel() {
return (SocketChannel) super.javaChannel();
}
@Override
public boolean isActive() {
SocketChannel ch = javaChannel();
return ch.isOpen() && ch.isConnected();
}
@Override
public boolean isOutputShutdown() {
return javaChannel().socket().isOutputShutdown() || !isActive();
}
@Override
public boolean isInputShutdown() {
return javaChannel().socket().isInputShutdown() || !isActive();
}
@Override
public boolean isShutdown() {
Socket socket = javaChannel().socket();
return socket.isInputShutdown() && socket.isOutputShutdown() || !isActive();
}
@Override
public InetSocketAddress localAddress() {
return (InetSocketAddress) super.localAddress();
}
@Override
public InetSocketAddress remoteAddress() {
return (InetSocketAddress) super.remoteAddress();
}
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
@UnstableApi
@Override
protected final void doShutdownOutput() throws Exception {
if (PlatformDependent.javaVersion() >= 7) {
javaChannel().shutdownOutput();
} else {
javaChannel().socket().shutdownOutput();
}
}
@Override
public ChannelFuture shutdownOutput() {
return shutdownOutput(newPromise());
}
@Override
public ChannelFuture shutdownOutput(final ChannelPromise promise) {
final EventLoop loop = eventLoop();
if (loop.inEventLoop()) {
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
} else {
loop.execute(
new Runnable() {
@Override
public void run() {
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
}
});
}
return promise;
}
@Override
public ChannelFuture shutdownInput() {
return shutdownInput(newPromise());
}
@Override
protected boolean isInputShutdown0() {
return isInputShutdown();
}
@Override
public ChannelFuture shutdownInput(final ChannelPromise promise) {
EventLoop loop = eventLoop();
if (loop.inEventLoop()) {
shutdownInput0(promise);
} else {
loop.execute(
new Runnable() {
@Override
public void run() {
shutdownInput0(promise);
}
});
}
return promise;
}
@Override
public ChannelFuture shutdown() {
return shutdown(newPromise());
}
@Override
public ChannelFuture shutdown(final ChannelPromise promise) {
ChannelFuture shutdownOutputFuture = shutdownOutput();
if (shutdownOutputFuture.isDone()) {
shutdownOutputDone(shutdownOutputFuture, promise);
} else {
shutdownOutputFuture.addListener(
new ChannelFutureListener() {
@Override
public void operationComplete(final ChannelFuture shutdownOutputFuture)
throws Exception {
shutdownOutputDone(shutdownOutputFuture, promise);
}
});
}
return promise;
}
private void shutdownOutputDone(
final ChannelFuture shutdownOutputFuture, final ChannelPromise promise) {
ChannelFuture shutdownInputFuture = shutdownInput();
if (shutdownInputFuture.isDone()) {
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
} else {
shutdownInputFuture.addListener(
new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture shutdownInputFuture) throws Exception {
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
}
});
}
}
private static void shutdownDone(
ChannelFuture shutdownOutputFuture,
ChannelFuture shutdownInputFuture,
ChannelPromise promise) {
Throwable shutdownOutputCause = shutdownOutputFuture.cause();
Throwable shutdownInputCause = shutdownInputFuture.cause();
if (shutdownOutputCause != null) {
if (shutdownInputCause != null) {
logger.debug(
"Exception suppressed because a previous exception occurred.", shutdownInputCause);
}
promise.setFailure(shutdownOutputCause);
} else if (shutdownInputCause != null) {
promise.setFailure(shutdownInputCause);
} else {
promise.setSuccess();
}
}
private void shutdownInput0(final ChannelPromise promise) {
try {
shutdownInput0();
promise.setSuccess();
} catch (Throwable t) {
promise.setFailure(t);
}
}
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
private void shutdownInput0() throws Exception {
if (PlatformDependent.javaVersion() >= 7) {
javaChannel().shutdownInput();
} else {
javaChannel().socket().shutdownInput();
}
}
@Override
protected SocketAddress localAddress0() {
return javaChannel().socket().getLocalSocketAddress();
}
@Override
protected SocketAddress remoteAddress0() {
return javaChannel().socket().getRemoteSocketAddress();
}
@Override
protected void doBind(SocketAddress localAddress) throws Exception {
doBind0(localAddress);
}
private void doBind0(SocketAddress localAddress) throws Exception {
if (PlatformDependent.javaVersion() >= 7) {
SocketUtils.bind(javaChannel(), localAddress);
} else {
SocketUtils.bind(javaChannel().socket(), localAddress);
}
}
@Override
protected boolean doConnect(SocketAddress remoteAddress, SocketAddress localAddress)
throws Exception {
if (localAddress != null) {
doBind0(localAddress);
}
boolean success = false;
try {
boolean connected = SocketUtils.connect(javaChannel(), remoteAddress);
if (!connected) {
selectionKey().interestOps(SelectionKey.OP_CONNECT);
}
success = true;
return connected;
} finally {
if (!success) {
doClose();
}
}
}
@Override
protected void doFinishConnect() throws Exception {
if (!javaChannel().finishConnect()) {
throw new Error();
}
}
@Override
protected void doDisconnect() throws Exception {
doClose();
}
@Override
protected void doClose() throws Exception {
super.doClose();
javaChannel().close();
}
@Override
protected int doReadBytes(ByteBuf byteBuf) throws Exception {
final RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle();
int attemptedBytes = byteBuf.writableBytes();
allocHandle.attemptedBytesRead(attemptedBytes);
Histogram.Timer socketReadTime = socketReadDuration.startTimer();
SocketChannel sc = javaChannel();
int localReadBytes = byteBuf.writeBytes(sc, allocHandle.attemptedBytesRead());
socketReadTime.observeDuration();
socketReadBytes.observe(localReadBytes);
socketReadLeftBytes.observe(attemptedBytes - localReadBytes);
return localReadBytes;
}
@Override
protected int doWriteBytes(ByteBuf buf) throws Exception {
final int expectedWrittenBytes = buf.readableBytes();
return buf.readBytes(javaChannel(), expectedWrittenBytes);
}
@Override
protected long doWriteFileRegion(FileRegion region) throws Exception {
final long position = region.transferred();
return region.transferTo(javaChannel(), position);
}
private void adjustMaxBytesPerGatheringWrite(
int attempted, int written, int oldMaxBytesPerGatheringWrite) {
// By default we track the SO_SNDBUF when ever it is explicitly set. However some OSes may
// dynamically change
// SO_SNDBUF (and other characteristics that determine how much data can be written at once) so
// we should try
// make a best effort to adjust as OS behavior changes.
if (attempted == written) {
if (attempted << 1 > oldMaxBytesPerGatheringWrite) {
((NioSocketChannelConfig) config).setMaxBytesPerGatheringWrite(attempted << 1);
}
} else if (attempted > MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD
&& written < attempted >>> 1) {
((NioSocketChannelConfig) config).setMaxBytesPerGatheringWrite(attempted >>> 1);
}
}
@Override
protected void doWrite(ChannelOutboundBuffer in) throws Exception {
SocketChannel ch = javaChannel();
int writeSpinCount = config().getWriteSpinCount();
do {
if (in.isEmpty()) {
// All written so clear OP_WRITE
clearOpWrite();
// Directly return here so incompleteWrite(...) is not called.
return;
}
// Ensure the pending writes are made of ByteBufs only.
int maxBytesPerGatheringWrite =
((NioSocketChannelConfig) config).getMaxBytesPerGatheringWrite();
ByteBuffer[] nioBuffers = in.nioBuffers(1024, maxBytesPerGatheringWrite);
int nioBufferCnt = in.nioBufferCount();
// Always use nioBuffers() to workaround data-corruption.
// See https://github.com/netty/netty/issues/2761
switch (nioBufferCnt) {
case 0:
// We have something else beside ByteBuffers to write so fallback to normal writes.
writeSpinCount -= doWrite0(in);
break;
case 1:
{
// Only one ByteBuf so use non-gathering write
// Zero length buffers are not added to nioBuffers by ChannelOutboundBuffer, so there is
// no need
// to check if the total size of all the buffers is non-zero.
ByteBuffer buffer = nioBuffers[0];
int attemptedBytes = buffer.remaining();
socketWriteBytes.observe(attemptedBytes);
Histogram.Timer writeTime = socketWriteDuration.startTimer();
final int localWrittenBytes = ch.write(buffer);
writeTime.observeDuration();
socketWrittenBytes.observe(localWrittenBytes);
if (localWrittenBytes <= 0) {
incompleteWrite(true);
return;
}
socketWriteLeftBytes.observe(attemptedBytes - localWrittenBytes);
adjustMaxBytesPerGatheringWrite(
attemptedBytes, localWrittenBytes, maxBytesPerGatheringWrite);
in.removeBytes(localWrittenBytes);
--writeSpinCount;
break;
}
default:
{
// Zero length buffers are not added to nioBuffers by ChannelOutboundBuffer, so there is
// no need
// to check if the total size of all the buffers is non-zero.
// We limit the max amount to int above so cast is safe
long attemptedBytes = in.nioBufferSize();
socketWriteBytes.observe(attemptedBytes);
Histogram.Timer writeTime = socketWriteDuration.startTimer();
final long localWrittenBytes = ch.write(nioBuffers, 0, nioBufferCnt);
writeTime.observeDuration();
socketWrittenBytes.observe(localWrittenBytes);
if (localWrittenBytes <= 0) {
incompleteWrite(true);
return;
}
socketWriteLeftBytes.observe(attemptedBytes - localWrittenBytes);
// Casting to int is safe because we limit the total amount of data in the nioBuffers to
// int above.
adjustMaxBytesPerGatheringWrite(
(int) attemptedBytes, (int) localWrittenBytes, maxBytesPerGatheringWrite);
in.removeBytes(localWrittenBytes);
--writeSpinCount;
break;
}
}
} while (writeSpinCount > 0);
incompleteWrite(writeSpinCount < 0);
}
@Override
protected AbstractNioUnsafe newUnsafe() {
return new NioSocketChannelUnsafe();
}
private final class NioSocketChannelUnsafe extends NioByteUnsafe {
@Override
protected Executor prepareToClose() {
try {
if (javaChannel().isOpen() && config().getSoLinger() > 0) {
// We need to cancel this key of the channel so we may not end up in a eventloop spin
// because we try to read or write until the actual close happens which may be later due
// SO_LINGER handling.
// See https://github.com/netty/netty/issues/4449
doDeregister();
return GlobalEventExecutor.INSTANCE;
}
} catch (Throwable ignore) {
// Ignore the error as the underlying channel may be closed in the meantime and so
// getSoLinger() may produce an exception. In this case we just return null.
// See https://github.com/netty/netty/issues/4449
}
return null;
}
}
private final class NioSocketChannelConfig extends DefaultSocketChannelConfig {
private volatile int maxBytesPerGatheringWrite = Integer.MAX_VALUE;
private NioSocketChannelConfig(NioSocketChannel channel, Socket javaSocket) {
super(channel, javaSocket);
calculateMaxBytesPerGatheringWrite();
}
@Override
protected void autoReadCleared() {
clearReadPending();
}
@Override
public NioSocketChannelConfig setSendBufferSize(int sendBufferSize) {
super.setSendBufferSize(sendBufferSize);
calculateMaxBytesPerGatheringWrite();
return this;
}
@Override
public <T> boolean setOption(ChannelOption<T> option, T value) {
if (PlatformDependent.javaVersion() >= 7 && option instanceof NioChannelOption) {
return NioChannelOption.setOption(jdkChannel(), (NioChannelOption<T>) option, value);
}
return super.setOption(option, value);
}
@Override
public <T> T getOption(ChannelOption<T> option) {
if (PlatformDependent.javaVersion() >= 7 && option instanceof NioChannelOption) {
return NioChannelOption.getOption(jdkChannel(), (NioChannelOption<T>) option);
}
return super.getOption(option);
}
@Override
public Map<ChannelOption<?>, Object> getOptions() {
if (PlatformDependent.javaVersion() >= 7) {
return getOptions(super.getOptions(), NioChannelOption.getOptions(jdkChannel()));
}
return super.getOptions();
}
void setMaxBytesPerGatheringWrite(int maxBytesPerGatheringWrite) {
this.maxBytesPerGatheringWrite = maxBytesPerGatheringWrite;
}
int getMaxBytesPerGatheringWrite() {
return maxBytesPerGatheringWrite;
}
private void calculateMaxBytesPerGatheringWrite() {
// Multiply by 2 to give some extra space in case the OS can process write data faster than we
// can provide.
int newSendBufferSize = getSendBufferSize() << 1;
if (newSendBufferSize > 0) {
setMaxBytesPerGatheringWrite(newSendBufferSize);
}
}
private SocketChannel jdkChannel() {
return ((NioSocketChannel) channel).javaChannel();
}
}
}

View File

@ -1,719 +0,0 @@
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.netty.handler.codec.http2;
import static io.netty.buffer.Unpooled.directBuffer;
import static io.netty.buffer.Unpooled.unreleasableBuffer;
import static io.netty.handler.codec.http2.Http2CodecUtil.CONTINUATION_FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.DATA_FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_MAX_FRAME_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.GO_AWAY_FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.HEADERS_FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.INT_FIELD_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_UNSIGNED_BYTE;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_UNSIGNED_INT;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_WEIGHT;
import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_WEIGHT;
import static io.netty.handler.codec.http2.Http2CodecUtil.PING_FRAME_PAYLOAD_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.PRIORITY_ENTRY_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.PRIORITY_FRAME_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.PUSH_PROMISE_FRAME_HEADER_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.RST_STREAM_FRAME_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.SETTING_ENTRY_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.WINDOW_UPDATE_FRAME_LENGTH;
import static io.netty.handler.codec.http2.Http2CodecUtil.isMaxFrameSizeValid;
import static io.netty.handler.codec.http2.Http2CodecUtil.verifyPadding;
import static io.netty.handler.codec.http2.Http2CodecUtil.writeFrameHeaderInternal;
import static io.netty.handler.codec.http2.Http2Error.FRAME_SIZE_ERROR;
import static io.netty.handler.codec.http2.Http2Exception.connectionError;
import static io.netty.handler.codec.http2.Http2FrameTypes.CONTINUATION;
import static io.netty.handler.codec.http2.Http2FrameTypes.DATA;
import static io.netty.handler.codec.http2.Http2FrameTypes.GO_AWAY;
import static io.netty.handler.codec.http2.Http2FrameTypes.HEADERS;
import static io.netty.handler.codec.http2.Http2FrameTypes.PING;
import static io.netty.handler.codec.http2.Http2FrameTypes.PRIORITY;
import static io.netty.handler.codec.http2.Http2FrameTypes.PUSH_PROMISE;
import static io.netty.handler.codec.http2.Http2FrameTypes.RST_STREAM;
import static io.netty.handler.codec.http2.Http2FrameTypes.SETTINGS;
import static io.netty.handler.codec.http2.Http2FrameTypes.WINDOW_UPDATE;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
import static io.netty.util.internal.ObjectUtil.checkPositive;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
import static java.lang.Math.max;
import static java.lang.Math.min;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http2.Http2CodecUtil.SimpleChannelPromiseAggregator;
import io.netty.handler.codec.http2.Http2FrameWriter.Configuration;
import io.netty.handler.codec.http2.Http2HeadersEncoder.SensitivityDetector;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.UnstableApi;
import io.prometheus.client.Histogram;
import org.tikv.common.util.HistogramUtils;
/** A {@link Http2FrameWriter} that supports all frame types defined by the HTTP/2 specification. */
@UnstableApi
public class DefaultHttp2FrameWriter
implements Http2FrameWriter, Http2FrameSizePolicy, Configuration {
private static final String STREAM_ID = "Stream ID";
private static final String STREAM_DEPENDENCY = "Stream Dependency";
/**
* This buffer is allocated to the maximum size of the padding field, and filled with zeros. When
* padding is needed it can be taken as a slice of this buffer. Users should call {@link
* ByteBuf#retain()} before using their slice.
*/
private static final ByteBuf ZERO_BUFFER =
unreleasableBuffer(directBuffer(MAX_UNSIGNED_BYTE).writeZero(MAX_UNSIGNED_BYTE)).asReadOnly();
private final Http2HeadersEncoder headersEncoder;
private int maxFrameSize;
public static final Histogram writeHeaderDuration =
HistogramUtils.buildDuration()
.name("netty_http2_frame_writer_write_header_duration_seconds")
.help("Time taken to encode a header")
.register();
public DefaultHttp2FrameWriter() {
this(new DefaultHttp2HeadersEncoder());
}
public DefaultHttp2FrameWriter(SensitivityDetector headersSensitivityDetector) {
this(new DefaultHttp2HeadersEncoder(headersSensitivityDetector));
}
public DefaultHttp2FrameWriter(
SensitivityDetector headersSensitivityDetector, boolean ignoreMaxHeaderListSize) {
this(new DefaultHttp2HeadersEncoder(headersSensitivityDetector, ignoreMaxHeaderListSize));
}
public DefaultHttp2FrameWriter(Http2HeadersEncoder headersEncoder) {
this.headersEncoder = headersEncoder;
maxFrameSize = DEFAULT_MAX_FRAME_SIZE;
}
@Override
public Configuration configuration() {
return this;
}
@Override
public Http2HeadersEncoder.Configuration headersConfiguration() {
return headersEncoder.configuration();
}
@Override
public Http2FrameSizePolicy frameSizePolicy() {
return this;
}
@Override
public void maxFrameSize(int max) throws Http2Exception {
if (!isMaxFrameSizeValid(max)) {
throw connectionError(
FRAME_SIZE_ERROR, "Invalid MAX_FRAME_SIZE specified in sent settings: %d", max);
}
maxFrameSize = max;
}
@Override
public int maxFrameSize() {
return maxFrameSize;
}
@Override
public void close() {}
@Override
public ChannelFuture writeData(
ChannelHandlerContext ctx,
int streamId,
ByteBuf data,
int padding,
boolean endStream,
ChannelPromise promise) {
final SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
ByteBuf frameHeader = null;
try {
verifyStreamId(streamId, STREAM_ID);
verifyPadding(padding);
int remainingData = data.readableBytes();
Http2Flags flags = new Http2Flags();
flags.endOfStream(false);
flags.paddingPresent(false);
// Fast path to write frames of payload size maxFrameSize first.
if (remainingData > maxFrameSize) {
frameHeader = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(frameHeader, maxFrameSize, DATA, flags, streamId);
do {
// Write the header.
ctx.write(frameHeader.retainedSlice(), promiseAggregator.newPromise());
// Write the payload.
ctx.write(data.readRetainedSlice(maxFrameSize), promiseAggregator.newPromise());
remainingData -= maxFrameSize;
// Stop iterating if remainingData == maxFrameSize so we can take care of reference counts
// below.
} while (remainingData > maxFrameSize);
}
if (padding == 0) {
// Write the header.
if (frameHeader != null) {
frameHeader.release();
frameHeader = null;
}
ByteBuf frameHeader2 = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
flags.endOfStream(endStream);
writeFrameHeaderInternal(frameHeader2, remainingData, DATA, flags, streamId);
ctx.write(frameHeader2, promiseAggregator.newPromise());
// Write the payload.
ByteBuf lastFrame = data.readSlice(remainingData);
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
} else {
if (remainingData != maxFrameSize) {
if (frameHeader != null) {
frameHeader.release();
frameHeader = null;
}
} else {
remainingData -= maxFrameSize;
// Write the header.
ByteBuf lastFrame;
if (frameHeader == null) {
lastFrame = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(lastFrame, maxFrameSize, DATA, flags, streamId);
} else {
lastFrame = frameHeader.slice();
frameHeader = null;
}
ctx.write(lastFrame, promiseAggregator.newPromise());
// Write the payload.
lastFrame = data.readableBytes() != maxFrameSize ? data.readSlice(maxFrameSize) : data;
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
}
do {
int frameDataBytes = min(remainingData, maxFrameSize);
int framePaddingBytes = min(padding, max(0, (maxFrameSize - 1) - frameDataBytes));
// Decrement the remaining counters.
padding -= framePaddingBytes;
remainingData -= frameDataBytes;
// Write the header.
ByteBuf frameHeader2 = ctx.alloc().buffer(DATA_FRAME_HEADER_LENGTH);
flags.endOfStream(endStream && remainingData == 0 && padding == 0);
flags.paddingPresent(framePaddingBytes > 0);
writeFrameHeaderInternal(
frameHeader2, framePaddingBytes + frameDataBytes, DATA, flags, streamId);
writePaddingLength(frameHeader2, framePaddingBytes);
ctx.write(frameHeader2, promiseAggregator.newPromise());
// Write the payload.
if (frameDataBytes != 0) {
if (remainingData == 0) {
ByteBuf lastFrame = data.readSlice(frameDataBytes);
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
} else {
ctx.write(data.readRetainedSlice(frameDataBytes), promiseAggregator.newPromise());
}
}
// Write the frame padding.
if (paddingBytes(framePaddingBytes) > 0) {
ctx.write(
ZERO_BUFFER.slice(0, paddingBytes(framePaddingBytes)),
promiseAggregator.newPromise());
}
} while (remainingData != 0 || padding != 0);
}
} catch (Throwable cause) {
if (frameHeader != null) {
frameHeader.release();
}
// Use a try/finally here in case the data has been released before calling this method. This
// is not
// necessary above because we internally allocate frameHeader.
try {
if (data != null) {
data.release();
}
} finally {
promiseAggregator.setFailure(cause);
promiseAggregator.doneAllocatingPromises();
}
return promiseAggregator;
}
return promiseAggregator.doneAllocatingPromises();
}
@Override
public ChannelFuture writeHeaders(
ChannelHandlerContext ctx,
int streamId,
Http2Headers headers,
int padding,
boolean endStream,
ChannelPromise promise) {
return writeHeadersInternal(
ctx, streamId, headers, padding, endStream, false, 0, (short) 0, false, promise);
}
@Override
public ChannelFuture writeHeaders(
ChannelHandlerContext ctx,
int streamId,
Http2Headers headers,
int streamDependency,
short weight,
boolean exclusive,
int padding,
boolean endStream,
ChannelPromise promise) {
return writeHeadersInternal(
ctx,
streamId,
headers,
padding,
endStream,
true,
streamDependency,
weight,
exclusive,
promise);
}
@Override
public ChannelFuture writePriority(
ChannelHandlerContext ctx,
int streamId,
int streamDependency,
short weight,
boolean exclusive,
ChannelPromise promise) {
try {
verifyStreamId(streamId, STREAM_ID);
verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY);
verifyWeight(weight);
ByteBuf buf = ctx.alloc().buffer(PRIORITY_FRAME_LENGTH);
writeFrameHeaderInternal(buf, PRIORITY_ENTRY_LENGTH, PRIORITY, new Http2Flags(), streamId);
buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency);
// Adjust the weight so that it fits into a single byte on the wire.
buf.writeByte(weight - 1);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
@Override
public ChannelFuture writeRstStream(
ChannelHandlerContext ctx, int streamId, long errorCode, ChannelPromise promise) {
try {
verifyStreamId(streamId, STREAM_ID);
verifyErrorCode(errorCode);
ByteBuf buf = ctx.alloc().buffer(RST_STREAM_FRAME_LENGTH);
writeFrameHeaderInternal(buf, INT_FIELD_LENGTH, RST_STREAM, new Http2Flags(), streamId);
buf.writeInt((int) errorCode);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
@Override
public ChannelFuture writeSettings(
ChannelHandlerContext ctx, Http2Settings settings, ChannelPromise promise) {
try {
checkNotNull(settings, "settings");
int payloadLength = SETTING_ENTRY_LENGTH * settings.size();
ByteBuf buf =
ctx.alloc().buffer(FRAME_HEADER_LENGTH + settings.size() * SETTING_ENTRY_LENGTH);
writeFrameHeaderInternal(buf, payloadLength, SETTINGS, new Http2Flags(), 0);
for (Http2Settings.PrimitiveEntry<Long> entry : settings.entries()) {
buf.writeChar(entry.key());
buf.writeInt(entry.value().intValue());
}
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
@Override
public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) {
try {
ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(buf, 0, SETTINGS, new Http2Flags().ack(true), 0);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
@Override
public ChannelFuture writePing(
ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) {
Http2Flags flags = ack ? new Http2Flags().ack(true) : new Http2Flags();
ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH + PING_FRAME_PAYLOAD_LENGTH);
// Assume nothing below will throw until buf is written. That way we don't have to take care of
// ownership
// in the catch block.
writeFrameHeaderInternal(buf, PING_FRAME_PAYLOAD_LENGTH, PING, flags, 0);
buf.writeLong(data);
return ctx.write(buf, promise);
}
@Override
public ChannelFuture writePushPromise(
ChannelHandlerContext ctx,
int streamId,
int promisedStreamId,
Http2Headers headers,
int padding,
ChannelPromise promise) {
ByteBuf headerBlock = null;
SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
try {
verifyStreamId(streamId, STREAM_ID);
verifyStreamId(promisedStreamId, "Promised Stream ID");
verifyPadding(padding);
// Encode the entire header block into an intermediate buffer.
headerBlock = ctx.alloc().buffer();
headersEncoder.encodeHeaders(streamId, headers, headerBlock);
// Read the first fragment (possibly everything).
Http2Flags flags = new Http2Flags().paddingPresent(padding > 0);
// INT_FIELD_LENGTH is for the length of the promisedStreamId
int nonFragmentLength = INT_FIELD_LENGTH + padding;
int maxFragmentLength = maxFrameSize - nonFragmentLength;
ByteBuf fragment =
headerBlock.readRetainedSlice(min(headerBlock.readableBytes(), maxFragmentLength));
flags.endOfHeaders(!headerBlock.isReadable());
int payloadLength = fragment.readableBytes() + nonFragmentLength;
ByteBuf buf = ctx.alloc().buffer(PUSH_PROMISE_FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(buf, payloadLength, PUSH_PROMISE, flags, streamId);
writePaddingLength(buf, padding);
// Write out the promised stream ID.
buf.writeInt(promisedStreamId);
ctx.write(buf, promiseAggregator.newPromise());
// Write the first fragment.
ctx.write(fragment, promiseAggregator.newPromise());
// Write out the padding, if any.
if (paddingBytes(padding) > 0) {
ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise());
}
if (!flags.endOfHeaders()) {
writeContinuationFrames(ctx, streamId, headerBlock, promiseAggregator);
}
} catch (Http2Exception e) {
promiseAggregator.setFailure(e);
} catch (Throwable t) {
promiseAggregator.setFailure(t);
promiseAggregator.doneAllocatingPromises();
PlatformDependent.throwException(t);
} finally {
if (headerBlock != null) {
headerBlock.release();
}
}
return promiseAggregator.doneAllocatingPromises();
}
@Override
public ChannelFuture writeGoAway(
ChannelHandlerContext ctx,
int lastStreamId,
long errorCode,
ByteBuf debugData,
ChannelPromise promise) {
SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
try {
verifyStreamOrConnectionId(lastStreamId, "Last Stream ID");
verifyErrorCode(errorCode);
int payloadLength = 8 + debugData.readableBytes();
ByteBuf buf = ctx.alloc().buffer(GO_AWAY_FRAME_HEADER_LENGTH);
// Assume nothing below will throw until buf is written. That way we don't have to take care
// of ownership
// in the catch block.
writeFrameHeaderInternal(buf, payloadLength, GO_AWAY, new Http2Flags(), 0);
buf.writeInt(lastStreamId);
buf.writeInt((int) errorCode);
ctx.write(buf, promiseAggregator.newPromise());
} catch (Throwable t) {
try {
debugData.release();
} finally {
promiseAggregator.setFailure(t);
promiseAggregator.doneAllocatingPromises();
}
return promiseAggregator;
}
try {
ctx.write(debugData, promiseAggregator.newPromise());
} catch (Throwable t) {
promiseAggregator.setFailure(t);
}
return promiseAggregator.doneAllocatingPromises();
}
@Override
public ChannelFuture writeWindowUpdate(
ChannelHandlerContext ctx, int streamId, int windowSizeIncrement, ChannelPromise promise) {
try {
verifyStreamOrConnectionId(streamId, STREAM_ID);
verifyWindowSizeIncrement(windowSizeIncrement);
ByteBuf buf = ctx.alloc().buffer(WINDOW_UPDATE_FRAME_LENGTH);
writeFrameHeaderInternal(buf, INT_FIELD_LENGTH, WINDOW_UPDATE, new Http2Flags(), streamId);
buf.writeInt(windowSizeIncrement);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
@Override
public ChannelFuture writeFrame(
ChannelHandlerContext ctx,
byte frameType,
int streamId,
Http2Flags flags,
ByteBuf payload,
ChannelPromise promise) {
SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
try {
verifyStreamOrConnectionId(streamId, STREAM_ID);
ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
// Assume nothing below will throw until buf is written. That way we don't have to take care
// of ownership
// in the catch block.
writeFrameHeaderInternal(buf, payload.readableBytes(), frameType, flags, streamId);
ctx.write(buf, promiseAggregator.newPromise());
} catch (Throwable t) {
try {
payload.release();
} finally {
promiseAggregator.setFailure(t);
promiseAggregator.doneAllocatingPromises();
}
return promiseAggregator;
}
try {
ctx.write(payload, promiseAggregator.newPromise());
} catch (Throwable t) {
promiseAggregator.setFailure(t);
}
return promiseAggregator.doneAllocatingPromises();
}
private ChannelFuture writeHeadersInternal(
ChannelHandlerContext ctx,
int streamId,
Http2Headers headers,
int padding,
boolean endStream,
boolean hasPriority,
int streamDependency,
short weight,
boolean exclusive,
ChannelPromise promise) {
Histogram.Timer writeHeaderTimer = writeHeaderDuration.startTimer();
ByteBuf headerBlock = null;
SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
try {
verifyStreamId(streamId, STREAM_ID);
if (hasPriority) {
verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY);
verifyPadding(padding);
verifyWeight(weight);
}
// Encode the entire header block.
headerBlock = ctx.alloc().buffer();
headersEncoder.encodeHeaders(streamId, headers, headerBlock);
Http2Flags flags =
new Http2Flags()
.endOfStream(endStream)
.priorityPresent(hasPriority)
.paddingPresent(padding > 0);
// Read the first fragment (possibly everything).
int nonFragmentBytes = padding + flags.getNumPriorityBytes();
int maxFragmentLength = maxFrameSize - nonFragmentBytes;
ByteBuf fragment =
headerBlock.readRetainedSlice(min(headerBlock.readableBytes(), maxFragmentLength));
// Set the end of headers flag for the first frame.
flags.endOfHeaders(!headerBlock.isReadable());
int payloadLength = fragment.readableBytes() + nonFragmentBytes;
ByteBuf buf = ctx.alloc().buffer(HEADERS_FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(buf, payloadLength, HEADERS, flags, streamId);
writePaddingLength(buf, padding);
if (hasPriority) {
buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency);
// Adjust the weight so that it fits into a single byte on the wire.
buf.writeByte(weight - 1);
}
ctx.write(buf, promiseAggregator.newPromise());
// Write the first fragment.
ctx.write(fragment, promiseAggregator.newPromise());
// Write out the padding, if any.
if (paddingBytes(padding) > 0) {
ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise());
}
if (!flags.endOfHeaders()) {
writeContinuationFrames(ctx, streamId, headerBlock, promiseAggregator);
}
} catch (Http2Exception e) {
promiseAggregator.setFailure(e);
} catch (Throwable t) {
promiseAggregator.setFailure(t);
promiseAggregator.doneAllocatingPromises();
PlatformDependent.throwException(t);
} finally {
if (headerBlock != null) {
headerBlock.release();
}
}
ChannelPromise result = promiseAggregator.doneAllocatingPromises();
writeHeaderTimer.observeDuration();
return result;
}
/**
* Writes as many continuation frames as needed until {@code padding} and {@code headerBlock} are
* consumed.
*/
private ChannelFuture writeContinuationFrames(
ChannelHandlerContext ctx,
int streamId,
ByteBuf headerBlock,
SimpleChannelPromiseAggregator promiseAggregator) {
Http2Flags flags = new Http2Flags();
if (headerBlock.isReadable()) {
// The frame header (and padding) only changes on the last frame, so allocate it once and
// re-use
int fragmentReadableBytes = min(headerBlock.readableBytes(), maxFrameSize);
ByteBuf buf = ctx.alloc().buffer(CONTINUATION_FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(buf, fragmentReadableBytes, CONTINUATION, flags, streamId);
do {
fragmentReadableBytes = min(headerBlock.readableBytes(), maxFrameSize);
ByteBuf fragment = headerBlock.readRetainedSlice(fragmentReadableBytes);
if (headerBlock.isReadable()) {
ctx.write(buf.retain(), promiseAggregator.newPromise());
} else {
// The frame header is different for the last frame, so re-allocate and release the old
// buffer
flags = flags.endOfHeaders(true);
buf.release();
buf = ctx.alloc().buffer(CONTINUATION_FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(buf, fragmentReadableBytes, CONTINUATION, flags, streamId);
ctx.write(buf, promiseAggregator.newPromise());
}
ctx.write(fragment, promiseAggregator.newPromise());
} while (headerBlock.isReadable());
}
return promiseAggregator;
}
/** Returns the number of padding bytes that should be appended to the end of a frame. */
private static int paddingBytes(int padding) {
// The padding parameter contains the 1 byte pad length field as well as the trailing padding
// bytes.
// Subtract 1, so to only get the number of padding bytes that need to be appended to the end of
// a frame.
return padding - 1;
}
private static void writePaddingLength(ByteBuf buf, int padding) {
if (padding > 0) {
// It is assumed that the padding length has been bounds checked before this
// Minus 1, as the pad length field is included in the padding parameter and is 1 byte wide.
buf.writeByte(padding - 1);
}
}
private static void verifyStreamId(int streamId, String argumentName) {
checkPositive(streamId, argumentName);
}
private static void verifyStreamOrConnectionId(int streamId, String argumentName) {
checkPositiveOrZero(streamId, argumentName);
}
private static void verifyWeight(short weight) {
if (weight < MIN_WEIGHT || weight > MAX_WEIGHT) {
throw new IllegalArgumentException("Invalid weight: " + weight);
}
}
private static void verifyErrorCode(long errorCode) {
if (errorCode < 0 || errorCode > MAX_UNSIGNED_INT) {
throw new IllegalArgumentException("Invalid errorCode: " + errorCode);
}
}
private static void verifyWindowSizeIncrement(int windowSizeIncrement) {
checkPositiveOrZero(windowSizeIncrement, "windowSizeIncrement");
}
private static void verifyPingPayload(ByteBuf data) {
if (data == null || data.readableBytes() != PING_FRAME_PAYLOAD_LENGTH) {
throw new IllegalArgumentException(
"Opaque data must be " + PING_FRAME_PAYLOAD_LENGTH + " bytes");
}
}
}

View File

@ -1,788 +0,0 @@
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.netty.handler.codec.http2;
import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_WINDOW_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_WEIGHT;
import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_WEIGHT;
import static io.netty.handler.codec.http2.Http2Error.FLOW_CONTROL_ERROR;
import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR;
import static io.netty.handler.codec.http2.Http2Error.STREAM_CLOSED;
import static io.netty.handler.codec.http2.Http2Exception.streamError;
import static io.netty.handler.codec.http2.Http2Stream.State.HALF_CLOSED_LOCAL;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
import static java.lang.Math.max;
import static java.lang.Math.min;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.internal.UnstableApi;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import io.prometheus.client.Histogram;
import java.util.ArrayDeque;
import java.util.Deque;
import org.tikv.common.util.HistogramUtils;
/**
* Basic implementation of {@link Http2RemoteFlowController}.
*
* <p>This class is <strong>NOT</strong> thread safe. The assumption is all methods must be invoked
* from a single thread. Typically this thread is the event loop thread for the {@link
* ChannelHandlerContext} managed by this class.
*/
@UnstableApi
public class DefaultHttp2RemoteFlowController implements Http2RemoteFlowController {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(DefaultHttp2RemoteFlowController.class);
private static final int MIN_WRITABLE_CHUNK = 32 * 1024;
private final Http2Connection connection;
private final Http2Connection.PropertyKey stateKey;
private final StreamByteDistributor streamByteDistributor;
private final FlowState connectionState;
private int initialWindowSize = DEFAULT_WINDOW_SIZE;
private WritabilityMonitor monitor;
private ChannelHandlerContext ctx;
public static final Histogram byteDistributedDuration =
HistogramUtils.buildDuration()
.name("netty_http2_byte_distributed_duration_seconds")
.help("The duration of byte distributed to streams.")
.register();
public DefaultHttp2RemoteFlowController(Http2Connection connection) {
this(connection, (Listener) null);
}
public DefaultHttp2RemoteFlowController(
Http2Connection connection, StreamByteDistributor streamByteDistributor) {
this(connection, streamByteDistributor, null);
}
public DefaultHttp2RemoteFlowController(Http2Connection connection, final Listener listener) {
this(connection, new WeightedFairQueueByteDistributor(connection), listener);
}
public DefaultHttp2RemoteFlowController(
Http2Connection connection,
StreamByteDistributor streamByteDistributor,
final Listener listener) {
this.connection = checkNotNull(connection, "connection");
this.streamByteDistributor = checkNotNull(streamByteDistributor, "streamWriteDistributor");
// Add a flow state for the connection.
stateKey = connection.newKey();
connectionState = new FlowState(connection.connectionStream());
connection.connectionStream().setProperty(stateKey, connectionState);
// Monitor may depend upon connectionState, and so initialize after connectionState
listener(listener);
monitor.windowSize(connectionState, initialWindowSize);
// Register for notification of new streams.
connection.addListener(
new Http2ConnectionAdapter() {
@Override
public void onStreamAdded(Http2Stream stream) {
// If the stream state is not open then the stream is not yet eligible for flow
// controlled frames and
// only requires the ReducedFlowState. Otherwise the full amount of memory is required.
stream.setProperty(stateKey, new FlowState(stream));
}
@Override
public void onStreamActive(Http2Stream stream) {
// If the object was previously created, but later activated then we have to ensure the
// proper
// initialWindowSize is used.
monitor.windowSize(state(stream), initialWindowSize);
}
@Override
public void onStreamClosed(Http2Stream stream) {
// Any pending frames can never be written, cancel and
// write errors for any pending frames.
state(stream).cancel(STREAM_CLOSED, null);
}
@Override
public void onStreamHalfClosed(Http2Stream stream) {
if (HALF_CLOSED_LOCAL == stream.state()) {
/**
* When this method is called there should not be any pending frames left if the API
* is used correctly. However, it is possible that a erroneous application can sneak
* in a frame even after having already written a frame with the END_STREAM flag set,
* as the stream state might not transition immediately to HALF_CLOSED_LOCAL / CLOSED
* due to flow control delaying the write.
*
* <p>This is to cancel any such illegal writes.
*/
state(stream).cancel(STREAM_CLOSED, null);
}
}
});
}
/**
* {@inheritDoc}
*
* <p>Any queued {@link FlowControlled} objects will be sent.
*/
@Override
public void channelHandlerContext(ChannelHandlerContext ctx) throws Http2Exception {
this.ctx = checkNotNull(ctx, "ctx");
// Writing the pending bytes will not check writability change and instead a writability change
// notification
// to be provided by an explicit call.
channelWritabilityChanged();
// Don't worry about cleaning up queued frames here if ctx is null. It is expected that all
// streams will be
// closed and the queue cleanup will occur when the stream state transitions occur.
// If any frames have been queued up, we should send them now that we have a channel context.
if (isChannelWritable()) {
writePendingBytes();
}
}
@Override
public ChannelHandlerContext channelHandlerContext() {
return ctx;
}
@Override
public void initialWindowSize(int newWindowSize) throws Http2Exception {
assert ctx == null || ctx.executor().inEventLoop();
monitor.initialWindowSize(newWindowSize);
}
@Override
public int initialWindowSize() {
return initialWindowSize;
}
@Override
public int windowSize(Http2Stream stream) {
return state(stream).windowSize();
}
@Override
public boolean isWritable(Http2Stream stream) {
return monitor.isWritable(state(stream));
}
@Override
public void channelWritabilityChanged() throws Http2Exception {
monitor.channelWritabilityChange();
}
@Override
public void updateDependencyTree(
int childStreamId, int parentStreamId, short weight, boolean exclusive) {
// It is assumed there are all validated at a higher level. For example in the Http2FrameReader.
assert weight >= MIN_WEIGHT && weight <= MAX_WEIGHT : "Invalid weight";
assert childStreamId != parentStreamId : "A stream cannot depend on itself";
assert childStreamId > 0 && parentStreamId >= 0
: "childStreamId must be > 0. parentStreamId must be >= 0.";
streamByteDistributor.updateDependencyTree(childStreamId, parentStreamId, weight, exclusive);
}
private boolean isChannelWritable() {
return ctx != null && isChannelWritable0();
}
private boolean isChannelWritable0() {
return ctx.channel().isWritable();
}
@Override
public void listener(Listener listener) {
monitor =
listener == null ? new WritabilityMonitor() : new ListenerWritabilityMonitor(listener);
}
@Override
public void incrementWindowSize(Http2Stream stream, int delta) throws Http2Exception {
assert ctx == null || ctx.executor().inEventLoop();
monitor.incrementWindowSize(state(stream), delta);
}
@Override
public void addFlowControlled(Http2Stream stream, FlowControlled frame) {
// The context can be null assuming the frame will be queued and send later when the context is
// set.
assert ctx == null || ctx.executor().inEventLoop();
checkNotNull(frame, "frame");
try {
monitor.enqueueFrame(state(stream), frame);
} catch (Throwable t) {
frame.error(ctx, t);
}
}
@Override
public boolean hasFlowControlled(Http2Stream stream) {
return state(stream).hasFrame();
}
private FlowState state(Http2Stream stream) {
return (FlowState) stream.getProperty(stateKey);
}
/** Returns the flow control window for the entire connection. */
private int connectionWindowSize() {
return connectionState.windowSize();
}
private int minUsableChannelBytes() {
// The current allocation algorithm values "fairness" and doesn't give any consideration to
// "goodput". It
// is possible that 1 byte will be allocated to many streams. In an effort to try to make
// "goodput"
// reasonable with the current allocation algorithm we have this "cheap" check up front to
// ensure there is
// an "adequate" amount of connection window before allocation is attempted. This is not
// foolproof as if the
// number of streams is >= this minimal number then we may still have the issue, but the idea is
// to narrow the
// circumstances in which this can happen without rewriting the allocation algorithm.
return max(ctx.channel().config().getWriteBufferLowWaterMark(), MIN_WRITABLE_CHUNK);
}
private int maxUsableChannelBytes() {
// If the channel isWritable, allow at least minUsableChannelBytes.
int channelWritableBytes = (int) min(Integer.MAX_VALUE, ctx.channel().bytesBeforeUnwritable());
int usableBytes =
channelWritableBytes > 0 ? max(channelWritableBytes, minUsableChannelBytes()) : 0;
// Clip the usable bytes by the connection window.
return min(connectionState.windowSize(), usableBytes);
}
/**
* The amount of bytes that can be supported by underlying {@link io.netty.channel.Channel}
* without queuing "too-much".
*/
private int writableBytes() {
return min(connectionWindowSize(), maxUsableChannelBytes());
}
@Override
public void writePendingBytes() throws Http2Exception {
monitor.writePendingBytes();
}
/** The remote flow control state for a single stream. */
private final class FlowState implements StreamByteDistributor.StreamState {
private final Http2Stream stream;
private final Deque<FlowControlled> pendingWriteQueue;
private int window;
private long pendingBytes;
private boolean markedWritable;
/** Set to true while a frame is being written, false otherwise. */
private boolean writing;
/** Set to true if cancel() was called. */
private boolean cancelled;
FlowState(Http2Stream stream) {
this.stream = stream;
pendingWriteQueue = new ArrayDeque<FlowControlled>(2);
}
/**
* Determine if the stream associated with this object is writable.
*
* @return {@code true} if the stream associated with this object is writable.
*/
boolean isWritable() {
return windowSize() > pendingBytes() && !cancelled;
}
/** The stream this state is associated with. */
@Override
public Http2Stream stream() {
return stream;
}
/** Returns the parameter from the last call to {@link #markedWritability(boolean)}. */
boolean markedWritability() {
return markedWritable;
}
/** Save the state of writability. */
void markedWritability(boolean isWritable) {
this.markedWritable = isWritable;
}
@Override
public int windowSize() {
return window;
}
/** Reset the window size for this stream. */
void windowSize(int initialWindowSize) {
window = initialWindowSize;
}
/**
* Write the allocated bytes for this stream.
*
* @return the number of bytes written for a stream or {@code -1} if no write occurred.
*/
int writeAllocatedBytes(int allocated) {
final int initialAllocated = allocated;
int writtenBytes;
// In case an exception is thrown we want to remember it and pass it to cancel(Throwable).
Throwable cause = null;
FlowControlled frame;
try {
assert !writing;
writing = true;
// Write the remainder of frames that we are allowed to
boolean writeOccurred = false;
while (!cancelled && (frame = peek()) != null) {
int maxBytes = min(allocated, writableWindow());
if (maxBytes <= 0 && frame.size() > 0) {
// The frame still has data, but the amount of allocated bytes has been exhausted.
// Don't write needless empty frames.
break;
}
writeOccurred = true;
int initialFrameSize = frame.size();
try {
frame.write(ctx, max(0, maxBytes));
if (frame.size() == 0) {
// This frame has been fully written, remove this frame and notify it.
// Since we remove this frame first, we're guaranteed that its error
// method will not be called when we call cancel.
pendingWriteQueue.remove();
frame.writeComplete();
}
} finally {
// Decrement allocated by how much was actually written.
allocated -= initialFrameSize - frame.size();
}
}
if (!writeOccurred) {
// Either there was no frame, or the amount of allocated bytes has been exhausted.
return -1;
}
} catch (Throwable t) {
// Mark the state as cancelled, we'll clear the pending queue via cancel() below.
cancelled = true;
cause = t;
} finally {
writing = false;
// Make sure we always decrement the flow control windows
// by the bytes written.
writtenBytes = initialAllocated - allocated;
decrementPendingBytes(writtenBytes, false);
decrementFlowControlWindow(writtenBytes);
// If a cancellation occurred while writing, call cancel again to
// clear and error all of the pending writes.
if (cancelled) {
cancel(INTERNAL_ERROR, cause);
}
}
return writtenBytes;
}
/**
* Increments the flow control window for this stream by the given delta and returns the new
* value.
*/
int incrementStreamWindow(int delta) throws Http2Exception {
if (delta > 0 && Integer.MAX_VALUE - delta < window) {
throw streamError(
stream.id(), FLOW_CONTROL_ERROR, "Window size overflow for stream: %d", stream.id());
}
window += delta;
streamByteDistributor.updateStreamableBytes(this);
return window;
}
/** Returns the maximum writable window (minimum of the stream and connection windows). */
private int writableWindow() {
return min(window, connectionWindowSize());
}
@Override
public long pendingBytes() {
return pendingBytes;
}
/** Adds the {@code frame} to the pending queue and increments the pending byte count. */
void enqueueFrame(FlowControlled frame) {
FlowControlled last = pendingWriteQueue.peekLast();
if (last == null) {
enqueueFrameWithoutMerge(frame);
return;
}
int lastSize = last.size();
if (last.merge(ctx, frame)) {
incrementPendingBytes(last.size() - lastSize, true);
return;
}
enqueueFrameWithoutMerge(frame);
}
private void enqueueFrameWithoutMerge(FlowControlled frame) {
pendingWriteQueue.offer(frame);
// This must be called after adding to the queue in order so that hasFrame() is
// updated before updating the stream state.
incrementPendingBytes(frame.size(), true);
}
@Override
public boolean hasFrame() {
return !pendingWriteQueue.isEmpty();
}
/** Returns the head of the pending queue, or {@code null} if empty. */
private FlowControlled peek() {
return pendingWriteQueue.peek();
}
/**
* Clears the pending queue and writes errors for each remaining frame.
*
* @param error the {@link Http2Error} to use.
* @param cause the {@link Throwable} that caused this method to be invoked.
*/
void cancel(Http2Error error, Throwable cause) {
cancelled = true;
// Ensure that the queue can't be modified while we are writing.
if (writing) {
return;
}
FlowControlled frame = pendingWriteQueue.poll();
if (frame != null) {
// Only create exception once and reuse to reduce overhead of filling in the stacktrace.
final Http2Exception exception =
streamError(stream.id(), error, cause, "Stream closed before write could take place");
do {
writeError(frame, exception);
frame = pendingWriteQueue.poll();
} while (frame != null);
}
streamByteDistributor.updateStreamableBytes(this);
monitor.stateCancelled(this);
}
/**
* Increments the number of pending bytes for this node and optionally updates the {@link
* StreamByteDistributor}.
*/
private void incrementPendingBytes(int numBytes, boolean updateStreamableBytes) {
pendingBytes += numBytes;
monitor.incrementPendingBytes(numBytes);
if (updateStreamableBytes) {
streamByteDistributor.updateStreamableBytes(this);
}
}
/**
* If this frame is in the pending queue, decrements the number of pending bytes for the stream.
*/
private void decrementPendingBytes(int bytes, boolean updateStreamableBytes) {
incrementPendingBytes(-bytes, updateStreamableBytes);
}
/** Decrement the per stream and connection flow control window by {@code bytes}. */
private void decrementFlowControlWindow(int bytes) {
try {
int negativeBytes = -bytes;
connectionState.incrementStreamWindow(negativeBytes);
incrementStreamWindow(negativeBytes);
} catch (Http2Exception e) {
// Should never get here since we're decrementing.
throw new IllegalStateException(
"Invalid window state when writing frame: " + e.getMessage(), e);
}
}
/**
* Discards this {@link FlowControlled}, writing an error. If this frame is in the pending
* queue, the unwritten bytes are removed from this branch of the priority tree.
*/
private void writeError(FlowControlled frame, Http2Exception cause) {
assert ctx != null;
decrementPendingBytes(frame.size(), true);
frame.error(ctx, cause);
}
}
/** Abstract class which provides common functionality for writability monitor implementations. */
private class WritabilityMonitor implements StreamByteDistributor.Writer {
private boolean inWritePendingBytes;
private long totalPendingBytes;
@Override
public final void write(Http2Stream stream, int numBytes) {
state(stream).writeAllocatedBytes(numBytes);
}
/**
* Called when the writability of the underlying channel changes.
*
* @throws Http2Exception If a write occurs and an exception happens in the write operation.
*/
void channelWritabilityChange() throws Http2Exception {}
/**
* Called when the state is cancelled.
*
* @param state the state that was cancelled.
*/
void stateCancelled(FlowState state) {}
/**
* Set the initial window size for {@code state}.
*
* @param state the state to change the initial window size for.
* @param initialWindowSize the size of the window in bytes.
*/
void windowSize(FlowState state, int initialWindowSize) {
state.windowSize(initialWindowSize);
}
/**
* Increment the window size for a particular stream.
*
* @param state the state associated with the stream whose window is being incremented.
* @param delta The amount to increment by.
* @throws Http2Exception If this operation overflows the window for {@code state}.
*/
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
state.incrementStreamWindow(delta);
}
/**
* Add a frame to be sent via flow control.
*
* @param state The state associated with the stream which the {@code frame} is associated with.
* @param frame the frame to enqueue.
* @throws Http2Exception If a writability error occurs.
*/
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
state.enqueueFrame(frame);
}
/**
* Increment the total amount of pending bytes for all streams. When any stream's pending bytes
* changes method should be called.
*
* @param delta The amount to increment by.
*/
final void incrementPendingBytes(int delta) {
totalPendingBytes += delta;
// Notification of writibilty change should be delayed until the end of the top level event.
// This is to ensure the flow controller is more consistent state before calling external
// listener methods.
}
/**
* Determine if the stream associated with {@code state} is writable.
*
* @param state The state which is associated with the stream to test writability for.
* @return {@code true} if {@link FlowState#stream()} is writable. {@code false} otherwise.
*/
final boolean isWritable(FlowState state) {
return isWritableConnection() && state.isWritable();
}
final void writePendingBytes() throws Http2Exception {
// Reentry is not permitted during the byte distribution process. It may lead to undesirable
// distribution of
// bytes and even infinite loops. We protect against reentry and make sure each call has an
// opportunity to
// cause a distribution to occur. This may be useful for example if the channel's writability
// changes from
// Writable -> Not Writable (because we are writing) -> Writable (because the user flushed to
// make more room
// in the channel outbound buffer).
if (inWritePendingBytes) {
return;
}
inWritePendingBytes = true;
try {
int bytesToWrite = writableBytes();
// Make sure we always write at least once, regardless if we have bytesToWrite or not.
// This ensures that zero-length frames will always be written.
for (; ; ) {
Histogram.Timer distributedTimer = byteDistributedDuration.startTimer();
boolean distributed = streamByteDistributor.distribute(bytesToWrite, this);
distributedTimer.observeDuration();
if (!distributed || (bytesToWrite = writableBytes()) <= 0 || !isChannelWritable0()) {
break;
}
}
} finally {
inWritePendingBytes = false;
}
}
void initialWindowSize(int newWindowSize) throws Http2Exception {
checkPositiveOrZero(newWindowSize, "newWindowSize");
final int delta = newWindowSize - initialWindowSize;
initialWindowSize = newWindowSize;
connection.forEachActiveStream(
new Http2StreamVisitor() {
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
state(stream).incrementStreamWindow(delta);
return true;
}
});
if (delta > 0 && isChannelWritable()) {
// The window size increased, send any pending frames for all streams.
writePendingBytes();
}
}
final boolean isWritableConnection() {
return connectionState.windowSize() - totalPendingBytes > 0 && isChannelWritable();
}
}
/**
* Writability of a {@code stream} is calculated using the following:
*
* <pre>
* Connection Window - Total Queued Bytes > 0 &&
* Stream Window - Bytes Queued for Stream > 0 &&
* isChannelWritable()
* </pre>
*/
private final class ListenerWritabilityMonitor extends WritabilityMonitor
implements Http2StreamVisitor {
private final Listener listener;
ListenerWritabilityMonitor(Listener listener) {
this.listener = listener;
}
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
FlowState state = state(stream);
if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}
return true;
}
@Override
void windowSize(FlowState state, int initialWindowSize) {
super.windowSize(state, initialWindowSize);
try {
checkStateWritability(state);
} catch (Http2Exception e) {
throw new RuntimeException("Caught unexpected exception from window", e);
}
}
@Override
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
super.incrementWindowSize(state, delta);
checkStateWritability(state);
}
@Override
void initialWindowSize(int newWindowSize) throws Http2Exception {
super.initialWindowSize(newWindowSize);
if (isWritableConnection()) {
// If the write operation does not occur we still need to check all streams because they
// may have transitioned from writable to not writable.
checkAllWritabilityChanged();
}
}
@Override
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
super.enqueueFrame(state, frame);
checkConnectionThenStreamWritabilityChanged(state);
}
@Override
void stateCancelled(FlowState state) {
try {
checkConnectionThenStreamWritabilityChanged(state);
} catch (Http2Exception e) {
throw new RuntimeException(
"Caught unexpected exception from checkAllWritabilityChanged", e);
}
}
@Override
void channelWritabilityChange() throws Http2Exception {
if (connectionState.markedWritability() != isChannelWritable()) {
checkAllWritabilityChanged();
}
}
private void checkStateWritability(FlowState state) throws Http2Exception {
if (isWritable(state) != state.markedWritability()) {
if (state == connectionState) {
checkAllWritabilityChanged();
} else {
notifyWritabilityChanged(state);
}
}
}
private void notifyWritabilityChanged(FlowState state) {
state.markedWritability(!state.markedWritability());
try {
listener.writabilityChanged(state.stream);
} catch (Throwable cause) {
logger.error("Caught Throwable from listener.writabilityChanged", cause);
}
}
private void checkConnectionThenStreamWritabilityChanged(FlowState state)
throws Http2Exception {
// It is possible that the connection window and/or the individual stream writability could
// change.
if (isWritableConnection() != connectionState.markedWritability()) {
checkAllWritabilityChanged();
} else if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}
}
private void checkAllWritabilityChanged() throws Http2Exception {
// Make sure we mark that we have notified as a result of this change.
connectionState.markedWritability(isWritableConnection());
connection.forEachActiveStream(this);
}
}
}

View File

@ -1,188 +0,0 @@
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http2;
import static io.netty.handler.codec.http2.Http2FrameLogger.Direction.OUTBOUND;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.util.internal.UnstableApi;
import io.prometheus.client.Histogram;
import org.tikv.common.util.HistogramUtils;
/**
* Decorator around a {@link Http2FrameWriter} that logs all outbound frames before calling the
* writer.
*/
@UnstableApi
public class Http2OutboundFrameLogger implements Http2FrameWriter {
private final Http2FrameWriter writer;
private final Http2FrameLogger logger;
public static final Histogram writeHeaderLogDuration =
HistogramUtils.buildDuration()
.name("netty_http2_write_header_log_duration_seconds")
.help("HTTP/2 write header log duration in seconds")
.register();
public Http2OutboundFrameLogger(Http2FrameWriter writer, Http2FrameLogger logger) {
this.writer = checkNotNull(writer, "writer");
this.logger = checkNotNull(logger, "logger");
}
@Override
public ChannelFuture writeData(
ChannelHandlerContext ctx,
int streamId,
ByteBuf data,
int padding,
boolean endStream,
ChannelPromise promise) {
logger.logData(OUTBOUND, ctx, streamId, data, padding, endStream);
return writer.writeData(ctx, streamId, data, padding, endStream, promise);
}
@Override
public ChannelFuture writeHeaders(
ChannelHandlerContext ctx,
int streamId,
Http2Headers headers,
int padding,
boolean endStream,
ChannelPromise promise) {
Histogram.Timer logTimer = writeHeaderLogDuration.startTimer();
logger.logHeaders(OUTBOUND, ctx, streamId, headers, padding, endStream);
logTimer.observeDuration();
return writer.writeHeaders(ctx, streamId, headers, padding, endStream, promise);
}
@Override
public ChannelFuture writeHeaders(
ChannelHandlerContext ctx,
int streamId,
Http2Headers headers,
int streamDependency,
short weight,
boolean exclusive,
int padding,
boolean endStream,
ChannelPromise promise) {
Histogram.Timer logTimer = writeHeaderLogDuration.startTimer();
logger.logHeaders(
OUTBOUND, ctx, streamId, headers, streamDependency, weight, exclusive, padding, endStream);
logTimer.observeDuration();
return writer.writeHeaders(
ctx, streamId, headers, streamDependency, weight, exclusive, padding, endStream, promise);
}
@Override
public ChannelFuture writePriority(
ChannelHandlerContext ctx,
int streamId,
int streamDependency,
short weight,
boolean exclusive,
ChannelPromise promise) {
logger.logPriority(OUTBOUND, ctx, streamId, streamDependency, weight, exclusive);
return writer.writePriority(ctx, streamId, streamDependency, weight, exclusive, promise);
}
@Override
public ChannelFuture writeRstStream(
ChannelHandlerContext ctx, int streamId, long errorCode, ChannelPromise promise) {
logger.logRstStream(OUTBOUND, ctx, streamId, errorCode);
return writer.writeRstStream(ctx, streamId, errorCode, promise);
}
@Override
public ChannelFuture writeSettings(
ChannelHandlerContext ctx, Http2Settings settings, ChannelPromise promise) {
logger.logSettings(OUTBOUND, ctx, settings);
return writer.writeSettings(ctx, settings, promise);
}
@Override
public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) {
logger.logSettingsAck(OUTBOUND, ctx);
return writer.writeSettingsAck(ctx, promise);
}
@Override
public ChannelFuture writePing(
ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) {
if (ack) {
logger.logPingAck(OUTBOUND, ctx, data);
} else {
logger.logPing(OUTBOUND, ctx, data);
}
return writer.writePing(ctx, ack, data, promise);
}
@Override
public ChannelFuture writePushPromise(
ChannelHandlerContext ctx,
int streamId,
int promisedStreamId,
Http2Headers headers,
int padding,
ChannelPromise promise) {
logger.logPushPromise(OUTBOUND, ctx, streamId, promisedStreamId, headers, padding);
return writer.writePushPromise(ctx, streamId, promisedStreamId, headers, padding, promise);
}
@Override
public ChannelFuture writeGoAway(
ChannelHandlerContext ctx,
int lastStreamId,
long errorCode,
ByteBuf debugData,
ChannelPromise promise) {
logger.logGoAway(OUTBOUND, ctx, lastStreamId, errorCode, debugData);
return writer.writeGoAway(ctx, lastStreamId, errorCode, debugData, promise);
}
@Override
public ChannelFuture writeWindowUpdate(
ChannelHandlerContext ctx, int streamId, int windowSizeIncrement, ChannelPromise promise) {
logger.logWindowsUpdate(OUTBOUND, ctx, streamId, windowSizeIncrement);
return writer.writeWindowUpdate(ctx, streamId, windowSizeIncrement, promise);
}
@Override
public ChannelFuture writeFrame(
ChannelHandlerContext ctx,
byte frameType,
int streamId,
Http2Flags flags,
ByteBuf payload,
ChannelPromise promise) {
logger.logUnknownFrame(OUTBOUND, ctx, frameType, streamId, flags, payload);
return writer.writeFrame(ctx, frameType, streamId, flags, payload, promise);
}
@Override
public void close() {
writer.close();
}
@Override
public Configuration configuration() {
return writer.configuration();
}
}