mirror of https://github.com/grpc/grpc-java.git
xds: import LRS v3 proto
This commit is contained in:
parent
cd0cc95553
commit
1c269e4289
|
|
@ -0,0 +1,330 @@
|
|||
package io.envoyproxy.envoy.service.load_stats.v3;
|
||||
|
||||
import static io.grpc.MethodDescriptor.generateFullMethodName;
|
||||
import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall;
|
||||
import static io.grpc.stub.ClientCalls.asyncClientStreamingCall;
|
||||
import static io.grpc.stub.ClientCalls.asyncServerStreamingCall;
|
||||
import static io.grpc.stub.ClientCalls.asyncUnaryCall;
|
||||
import static io.grpc.stub.ClientCalls.blockingServerStreamingCall;
|
||||
import static io.grpc.stub.ClientCalls.blockingUnaryCall;
|
||||
import static io.grpc.stub.ClientCalls.futureUnaryCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncClientStreamingCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncServerStreamingCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncUnaryCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall;
|
||||
import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall;
|
||||
|
||||
/**
|
||||
*/
|
||||
@javax.annotation.Generated(
|
||||
value = "by gRPC proto compiler",
|
||||
comments = "Source: envoy/service/load_stats/v3/lrs.proto")
|
||||
public final class LoadReportingServiceGrpc {
|
||||
|
||||
private LoadReportingServiceGrpc() {}
|
||||
|
||||
public static final String SERVICE_NAME = "envoy.service.load_stats.v3.LoadReportingService";
|
||||
|
||||
// Static method descriptors that strictly reflect the proto.
|
||||
private static volatile io.grpc.MethodDescriptor<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest,
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse> getStreamLoadStatsMethod;
|
||||
|
||||
@io.grpc.stub.annotations.RpcMethod(
|
||||
fullMethodName = SERVICE_NAME + '/' + "StreamLoadStats",
|
||||
requestType = io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest.class,
|
||||
responseType = io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse.class,
|
||||
methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
|
||||
public static io.grpc.MethodDescriptor<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest,
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse> getStreamLoadStatsMethod() {
|
||||
io.grpc.MethodDescriptor<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest, io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse> getStreamLoadStatsMethod;
|
||||
if ((getStreamLoadStatsMethod = LoadReportingServiceGrpc.getStreamLoadStatsMethod) == null) {
|
||||
synchronized (LoadReportingServiceGrpc.class) {
|
||||
if ((getStreamLoadStatsMethod = LoadReportingServiceGrpc.getStreamLoadStatsMethod) == null) {
|
||||
LoadReportingServiceGrpc.getStreamLoadStatsMethod = getStreamLoadStatsMethod =
|
||||
io.grpc.MethodDescriptor.<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest, io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse>newBuilder()
|
||||
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
|
||||
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamLoadStats"))
|
||||
.setSampledToLocalTracing(true)
|
||||
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest.getDefaultInstance()))
|
||||
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse.getDefaultInstance()))
|
||||
.setSchemaDescriptor(new LoadReportingServiceMethodDescriptorSupplier("StreamLoadStats"))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
}
|
||||
return getStreamLoadStatsMethod;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new async stub that supports all call types for the service
|
||||
*/
|
||||
public static LoadReportingServiceStub newStub(io.grpc.Channel channel) {
|
||||
io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceStub> factory =
|
||||
new io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceStub>() {
|
||||
@java.lang.Override
|
||||
public LoadReportingServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceStub(channel, callOptions);
|
||||
}
|
||||
};
|
||||
return LoadReportingServiceStub.newStub(factory, channel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
|
||||
*/
|
||||
public static LoadReportingServiceBlockingStub newBlockingStub(
|
||||
io.grpc.Channel channel) {
|
||||
io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceBlockingStub> factory =
|
||||
new io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceBlockingStub>() {
|
||||
@java.lang.Override
|
||||
public LoadReportingServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceBlockingStub(channel, callOptions);
|
||||
}
|
||||
};
|
||||
return LoadReportingServiceBlockingStub.newStub(factory, channel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ListenableFuture-style stub that supports unary calls on the service
|
||||
*/
|
||||
public static LoadReportingServiceFutureStub newFutureStub(
|
||||
io.grpc.Channel channel) {
|
||||
io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceFutureStub> factory =
|
||||
new io.grpc.stub.AbstractStub.StubFactory<LoadReportingServiceFutureStub>() {
|
||||
@java.lang.Override
|
||||
public LoadReportingServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceFutureStub(channel, callOptions);
|
||||
}
|
||||
};
|
||||
return LoadReportingServiceFutureStub.newStub(factory, channel);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static abstract class LoadReportingServiceImplBase implements io.grpc.BindableService {
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Advanced API to allow for multi-dimensional load balancing by remote
|
||||
* server. For receiving LB assignments, the steps are:
|
||||
* 1, The management server is configured with per cluster/zone/load metric
|
||||
* capacity configuration. The capacity configuration definition is
|
||||
* outside of the scope of this document.
|
||||
* 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters
|
||||
* to balance.
|
||||
* Independently, Envoy will initiate a StreamLoadStats bidi stream with a
|
||||
* management server:
|
||||
* 1. Once a connection establishes, the management server publishes a
|
||||
* LoadStatsResponse for all clusters it is interested in learning load
|
||||
* stats about.
|
||||
* 2. For each cluster, Envoy load balances incoming traffic to upstream hosts
|
||||
* based on per-zone weights and/or per-instance weights (if specified)
|
||||
* based on intra-zone LbPolicy. This information comes from the above
|
||||
* {Stream,Fetch}Endpoints.
|
||||
* 3. When upstream hosts reply, they optionally add header <define header
|
||||
* name> with ASCII representation of EndpointLoadMetricStats.
|
||||
* 4. Envoy aggregates load reports over the period of time given to it in
|
||||
* LoadStatsResponse.load_reporting_interval. This includes aggregation
|
||||
* stats Envoy maintains by itself (total_requests, rpc_errors etc.) as
|
||||
* well as load metrics from upstream hosts.
|
||||
* 5. When the timer of load_reporting_interval expires, Envoy sends new
|
||||
* LoadStatsRequest filled with load reports for each cluster.
|
||||
* 6. The management server uses the load reports from all reported Envoys
|
||||
* from around the world, computes global assignment and prepares traffic
|
||||
* assignment destined for each zone Envoys are located in. Goto 2.
|
||||
* </pre>
|
||||
*/
|
||||
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest> streamLoadStats(
|
||||
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse> responseObserver) {
|
||||
return asyncUnimplementedStreamingCall(getStreamLoadStatsMethod(), responseObserver);
|
||||
}
|
||||
|
||||
@java.lang.Override public final io.grpc.ServerServiceDefinition bindService() {
|
||||
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
|
||||
.addMethod(
|
||||
getStreamLoadStatsMethod(),
|
||||
asyncBidiStreamingCall(
|
||||
new MethodHandlers<
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest,
|
||||
io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse>(
|
||||
this, METHODID_STREAM_LOAD_STATS)))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static final class LoadReportingServiceStub extends io.grpc.stub.AbstractAsyncStub<LoadReportingServiceStub> {
|
||||
private LoadReportingServiceStub(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
super(channel, callOptions);
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
protected LoadReportingServiceStub build(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceStub(channel, callOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Advanced API to allow for multi-dimensional load balancing by remote
|
||||
* server. For receiving LB assignments, the steps are:
|
||||
* 1, The management server is configured with per cluster/zone/load metric
|
||||
* capacity configuration. The capacity configuration definition is
|
||||
* outside of the scope of this document.
|
||||
* 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters
|
||||
* to balance.
|
||||
* Independently, Envoy will initiate a StreamLoadStats bidi stream with a
|
||||
* management server:
|
||||
* 1. Once a connection establishes, the management server publishes a
|
||||
* LoadStatsResponse for all clusters it is interested in learning load
|
||||
* stats about.
|
||||
* 2. For each cluster, Envoy load balances incoming traffic to upstream hosts
|
||||
* based on per-zone weights and/or per-instance weights (if specified)
|
||||
* based on intra-zone LbPolicy. This information comes from the above
|
||||
* {Stream,Fetch}Endpoints.
|
||||
* 3. When upstream hosts reply, they optionally add header <define header
|
||||
* name> with ASCII representation of EndpointLoadMetricStats.
|
||||
* 4. Envoy aggregates load reports over the period of time given to it in
|
||||
* LoadStatsResponse.load_reporting_interval. This includes aggregation
|
||||
* stats Envoy maintains by itself (total_requests, rpc_errors etc.) as
|
||||
* well as load metrics from upstream hosts.
|
||||
* 5. When the timer of load_reporting_interval expires, Envoy sends new
|
||||
* LoadStatsRequest filled with load reports for each cluster.
|
||||
* 6. The management server uses the load reports from all reported Envoys
|
||||
* from around the world, computes global assignment and prepares traffic
|
||||
* assignment destined for each zone Envoys are located in. Goto 2.
|
||||
* </pre>
|
||||
*/
|
||||
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsRequest> streamLoadStats(
|
||||
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse> responseObserver) {
|
||||
return asyncBidiStreamingCall(
|
||||
getChannel().newCall(getStreamLoadStatsMethod(), getCallOptions()), responseObserver);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static final class LoadReportingServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub<LoadReportingServiceBlockingStub> {
|
||||
private LoadReportingServiceBlockingStub(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
super(channel, callOptions);
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
protected LoadReportingServiceBlockingStub build(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceBlockingStub(channel, callOptions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public static final class LoadReportingServiceFutureStub extends io.grpc.stub.AbstractFutureStub<LoadReportingServiceFutureStub> {
|
||||
private LoadReportingServiceFutureStub(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
super(channel, callOptions);
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
protected LoadReportingServiceFutureStub build(
|
||||
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
|
||||
return new LoadReportingServiceFutureStub(channel, callOptions);
|
||||
}
|
||||
}
|
||||
|
||||
private static final int METHODID_STREAM_LOAD_STATS = 0;
|
||||
|
||||
private static final class MethodHandlers<Req, Resp> implements
|
||||
io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
|
||||
io.grpc.stub.ServerCalls.ServerStreamingMethod<Req, Resp>,
|
||||
io.grpc.stub.ServerCalls.ClientStreamingMethod<Req, Resp>,
|
||||
io.grpc.stub.ServerCalls.BidiStreamingMethod<Req, Resp> {
|
||||
private final LoadReportingServiceImplBase serviceImpl;
|
||||
private final int methodId;
|
||||
|
||||
MethodHandlers(LoadReportingServiceImplBase serviceImpl, int methodId) {
|
||||
this.serviceImpl = serviceImpl;
|
||||
this.methodId = methodId;
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
@java.lang.SuppressWarnings("unchecked")
|
||||
public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) {
|
||||
switch (methodId) {
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
@java.lang.SuppressWarnings("unchecked")
|
||||
public io.grpc.stub.StreamObserver<Req> invoke(
|
||||
io.grpc.stub.StreamObserver<Resp> responseObserver) {
|
||||
switch (methodId) {
|
||||
case METHODID_STREAM_LOAD_STATS:
|
||||
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.streamLoadStats(
|
||||
(io.grpc.stub.StreamObserver<io.envoyproxy.envoy.service.load_stats.v3.LoadStatsResponse>) responseObserver);
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static abstract class LoadReportingServiceBaseDescriptorSupplier
|
||||
implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier {
|
||||
LoadReportingServiceBaseDescriptorSupplier() {}
|
||||
|
||||
@java.lang.Override
|
||||
public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() {
|
||||
return io.envoyproxy.envoy.service.load_stats.v3.LrsProto.getDescriptor();
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() {
|
||||
return getFileDescriptor().findServiceByName("LoadReportingService");
|
||||
}
|
||||
}
|
||||
|
||||
private static final class LoadReportingServiceFileDescriptorSupplier
|
||||
extends LoadReportingServiceBaseDescriptorSupplier {
|
||||
LoadReportingServiceFileDescriptorSupplier() {}
|
||||
}
|
||||
|
||||
private static final class LoadReportingServiceMethodDescriptorSupplier
|
||||
extends LoadReportingServiceBaseDescriptorSupplier
|
||||
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
|
||||
private final String methodName;
|
||||
|
||||
LoadReportingServiceMethodDescriptorSupplier(String methodName) {
|
||||
this.methodName = methodName;
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
|
||||
return getServiceDescriptor().findMethodByName(methodName);
|
||||
}
|
||||
}
|
||||
|
||||
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
|
||||
|
||||
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
|
||||
io.grpc.ServiceDescriptor result = serviceDescriptor;
|
||||
if (result == null) {
|
||||
synchronized (LoadReportingServiceGrpc.class) {
|
||||
result = serviceDescriptor;
|
||||
if (result == null) {
|
||||
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
|
||||
.setSchemaDescriptor(new LoadReportingServiceFileDescriptorSupplier())
|
||||
.addMethod(getStreamLoadStatsMethod())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -82,6 +82,7 @@ envoy/config/core/v3/socket_option.proto
|
|||
envoy/config/core/v3/substitution_format_string.proto
|
||||
envoy/config/endpoint/v3/endpoint.proto
|
||||
envoy/config/endpoint/v3/endpoint_components.proto
|
||||
envoy/config/endpoint/v3/load_report.proto
|
||||
envoy/config/filter/accesslog/v2/accesslog.proto
|
||||
envoy/config/filter/fault/v2/fault.proto
|
||||
envoy/config/filter/http/fault/v2/fault.proto
|
||||
|
|
@ -121,6 +122,7 @@ envoy/service/discovery/v2/sds.proto
|
|||
envoy/service/discovery/v3/ads.proto
|
||||
envoy/service/discovery/v3/discovery.proto
|
||||
envoy/service/load_stats/v2/lrs.proto
|
||||
envoy/service/load_stats/v3/lrs.proto
|
||||
envoy/type/http.proto
|
||||
envoy/type/matcher/regex.proto
|
||||
envoy/type/matcher/string.proto
|
||||
|
|
|
|||
167
xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto
vendored
Normal file
167
xds/third_party/envoy/src/main/proto/envoy/config/endpoint/v3/load_report.proto
vendored
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.config.endpoint.v3;
|
||||
|
||||
import "envoy/config/core/v3/address.proto";
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.endpoint.v3";
|
||||
option java_outer_classname = "LoadReportProto";
|
||||
option java_multiple_files = true;
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Load Report]
|
||||
|
||||
// These are stats Envoy reports to the management server at a frequency defined by
|
||||
// :ref:`LoadStatsResponse.load_reporting_interval<envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`.
|
||||
// Stats per upstream region/zone and optionally per subzone.
|
||||
// [#next-free-field: 9]
|
||||
message UpstreamLocalityStats {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.UpstreamLocalityStats";
|
||||
|
||||
// Name of zone, region and optionally endpoint group these metrics were
|
||||
// collected from. Zone and region names could be empty if unknown.
|
||||
core.v3.Locality locality = 1;
|
||||
|
||||
// The total number of requests successfully completed by the endpoints in the
|
||||
// locality.
|
||||
uint64 total_successful_requests = 2;
|
||||
|
||||
// The total number of unfinished requests
|
||||
uint64 total_requests_in_progress = 3;
|
||||
|
||||
// The total number of requests that failed due to errors at the endpoint,
|
||||
// aggregated over all endpoints in the locality.
|
||||
uint64 total_error_requests = 4;
|
||||
|
||||
// The total number of requests that were issued by this Envoy since
|
||||
// the last report. This information is aggregated over all the
|
||||
// upstream endpoints in the locality.
|
||||
uint64 total_issued_requests = 8;
|
||||
|
||||
// Stats for multi-dimensional load balancing.
|
||||
repeated EndpointLoadMetricStats load_metric_stats = 5;
|
||||
|
||||
// Endpoint granularity stats information for this locality. This information
|
||||
// is populated if the Server requests it by setting
|
||||
// :ref:`LoadStatsResponse.report_endpoint_granularity<envoy_api_field_service.load_stats.v3.LoadStatsResponse.report_endpoint_granularity>`.
|
||||
repeated UpstreamEndpointStats upstream_endpoint_stats = 7;
|
||||
|
||||
// [#not-implemented-hide:] The priority of the endpoint group these metrics
|
||||
// were collected from.
|
||||
uint32 priority = 6;
|
||||
}
|
||||
|
||||
// [#next-free-field: 8]
|
||||
message UpstreamEndpointStats {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.UpstreamEndpointStats";
|
||||
|
||||
// Upstream host address.
|
||||
core.v3.Address address = 1;
|
||||
|
||||
// Opaque and implementation dependent metadata of the
|
||||
// endpoint. Envoy will pass this directly to the management server.
|
||||
google.protobuf.Struct metadata = 6;
|
||||
|
||||
// The total number of requests successfully completed by the endpoints in the
|
||||
// locality. These include non-5xx responses for HTTP, where errors
|
||||
// originate at the client and the endpoint responded successfully. For gRPC,
|
||||
// the grpc-status values are those not covered by total_error_requests below.
|
||||
uint64 total_successful_requests = 2;
|
||||
|
||||
// The total number of unfinished requests for this endpoint.
|
||||
uint64 total_requests_in_progress = 3;
|
||||
|
||||
// The total number of requests that failed due to errors at the endpoint.
|
||||
// For HTTP these are responses with 5xx status codes and for gRPC the
|
||||
// grpc-status values:
|
||||
//
|
||||
// - DeadlineExceeded
|
||||
// - Unimplemented
|
||||
// - Internal
|
||||
// - Unavailable
|
||||
// - Unknown
|
||||
// - DataLoss
|
||||
uint64 total_error_requests = 4;
|
||||
|
||||
// The total number of requests that were issued to this endpoint
|
||||
// since the last report. A single TCP connection, HTTP or gRPC
|
||||
// request or stream is counted as one request.
|
||||
uint64 total_issued_requests = 7;
|
||||
|
||||
// Stats for multi-dimensional load balancing.
|
||||
repeated EndpointLoadMetricStats load_metric_stats = 5;
|
||||
}
|
||||
|
||||
message EndpointLoadMetricStats {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.EndpointLoadMetricStats";
|
||||
|
||||
// Name of the metric; may be empty.
|
||||
string metric_name = 1;
|
||||
|
||||
// Number of calls that finished and included this metric.
|
||||
uint64 num_requests_finished_with_metric = 2;
|
||||
|
||||
// Sum of metric values across all calls that finished with this metric for
|
||||
// load_reporting_interval.
|
||||
double total_metric_value = 3;
|
||||
}
|
||||
|
||||
// Per cluster load stats. Envoy reports these stats a management server in a
|
||||
// :ref:`LoadStatsRequest<envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`
|
||||
// Next ID: 7
|
||||
// [#next-free-field: 7]
|
||||
message ClusterStats {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats";
|
||||
|
||||
message DroppedRequests {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.ClusterStats.DroppedRequests";
|
||||
|
||||
// Identifier for the policy specifying the drop.
|
||||
string category = 1 [(validate.rules).string = {min_bytes: 1}];
|
||||
|
||||
// Total number of deliberately dropped requests for the category.
|
||||
uint64 dropped_count = 2;
|
||||
}
|
||||
|
||||
// The name of the cluster.
|
||||
string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}];
|
||||
|
||||
// The eds_cluster_config service_name of the cluster.
|
||||
// It's possible that two clusters send the same service_name to EDS,
|
||||
// in that case, the management server is supposed to do aggregation on the load reports.
|
||||
string cluster_service_name = 6;
|
||||
|
||||
// Need at least one.
|
||||
repeated UpstreamLocalityStats upstream_locality_stats = 2
|
||||
[(validate.rules).repeated = {min_items: 1}];
|
||||
|
||||
// Cluster-level stats such as total_successful_requests may be computed by
|
||||
// summing upstream_locality_stats. In addition, below there are additional
|
||||
// cluster-wide stats.
|
||||
//
|
||||
// The total number of dropped requests. This covers requests
|
||||
// deliberately dropped by the drop_overload policy and circuit breaking.
|
||||
uint64 total_dropped_requests = 3;
|
||||
|
||||
// Information about deliberately dropped requests for each category specified
|
||||
// in the DropOverload policy.
|
||||
repeated DroppedRequests dropped_requests = 5;
|
||||
|
||||
// Period over which the actual load report occurred. This will be guaranteed to include every
|
||||
// request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy
|
||||
// and the *LoadStatsResponse* message sent from the management server, this may be longer than
|
||||
// the requested load reporting interval in the *LoadStatsResponse*.
|
||||
google.protobuf.Duration load_report_interval = 4;
|
||||
}
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.service.load_stats.v3;
|
||||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/endpoint/v3/load_report.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.service.load_stats.v3";
|
||||
option java_outer_classname = "LrsProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Load Reporting service (LRS)]
|
||||
|
||||
// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional
|
||||
// stream with a management server. Upon connecting, the management server can send a
|
||||
// :ref:`LoadStatsResponse <envoy_api_msg_service.load_stats.v3.LoadStatsResponse>` to a node it is
|
||||
// interested in getting the load reports for. Envoy in this node will start sending
|
||||
// :ref:`LoadStatsRequest <envoy_api_msg_service.load_stats.v3.LoadStatsRequest>`. This is done periodically
|
||||
// based on the :ref:`load reporting interval <envoy_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`
|
||||
// For details, take a look at the :ref:`Load Reporting Service sandbox example <install_sandboxes_load_reporting_service>`.
|
||||
|
||||
service LoadReportingService {
|
||||
// Advanced API to allow for multi-dimensional load balancing by remote
|
||||
// server. For receiving LB assignments, the steps are:
|
||||
// 1, The management server is configured with per cluster/zone/load metric
|
||||
// capacity configuration. The capacity configuration definition is
|
||||
// outside of the scope of this document.
|
||||
// 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters
|
||||
// to balance.
|
||||
//
|
||||
// Independently, Envoy will initiate a StreamLoadStats bidi stream with a
|
||||
// management server:
|
||||
// 1. Once a connection establishes, the management server publishes a
|
||||
// LoadStatsResponse for all clusters it is interested in learning load
|
||||
// stats about.
|
||||
// 2. For each cluster, Envoy load balances incoming traffic to upstream hosts
|
||||
// based on per-zone weights and/or per-instance weights (if specified)
|
||||
// based on intra-zone LbPolicy. This information comes from the above
|
||||
// {Stream,Fetch}Endpoints.
|
||||
// 3. When upstream hosts reply, they optionally add header <define header
|
||||
// name> with ASCII representation of EndpointLoadMetricStats.
|
||||
// 4. Envoy aggregates load reports over the period of time given to it in
|
||||
// LoadStatsResponse.load_reporting_interval. This includes aggregation
|
||||
// stats Envoy maintains by itself (total_requests, rpc_errors etc.) as
|
||||
// well as load metrics from upstream hosts.
|
||||
// 5. When the timer of load_reporting_interval expires, Envoy sends new
|
||||
// LoadStatsRequest filled with load reports for each cluster.
|
||||
// 6. The management server uses the load reports from all reported Envoys
|
||||
// from around the world, computes global assignment and prepares traffic
|
||||
// assignment destined for each zone Envoys are located in. Goto 2.
|
||||
rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// A load report Envoy sends to the management server.
|
||||
message LoadStatsRequest {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.service.load_stats.v2.LoadStatsRequest";
|
||||
|
||||
// Node identifier for Envoy instance.
|
||||
config.core.v3.Node node = 1;
|
||||
|
||||
// A list of load stats to report.
|
||||
repeated config.endpoint.v3.ClusterStats cluster_stats = 2;
|
||||
}
|
||||
|
||||
// The management server sends envoy a LoadStatsResponse with all clusters it
|
||||
// is interested in learning load stats about.
|
||||
message LoadStatsResponse {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.service.load_stats.v2.LoadStatsResponse";
|
||||
|
||||
// Clusters to report stats for.
|
||||
// Not populated if *send_all_clusters* is true.
|
||||
repeated string clusters = 1;
|
||||
|
||||
// If true, the client should send all clusters it knows about.
|
||||
// Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their
|
||||
// :ref:`client_features<envoy_api_field_config.core.v3.Node.client_features>` field will honor this field.
|
||||
bool send_all_clusters = 4;
|
||||
|
||||
// The minimum interval of time to collect stats over. This is only a minimum for two reasons:
|
||||
//
|
||||
// 1. There may be some delay from when the timer fires until stats sampling occurs.
|
||||
// 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic
|
||||
// that is observed in between the corresponding previous *LoadStatsRequest* and this
|
||||
// *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period
|
||||
// of inobservability that might otherwise exists between the messages. New clusters are not
|
||||
// subject to this consideration.
|
||||
google.protobuf.Duration load_reporting_interval = 2;
|
||||
|
||||
// Set to *true* if the management server supports endpoint granularity
|
||||
// report.
|
||||
bool report_endpoint_granularity = 3;
|
||||
}
|
||||
Loading…
Reference in New Issue