mirror of https://github.com/grpc/grpc-ios.git
Compare commits
9 Commits
Author | SHA1 | Date |
---|---|---|
|
07d4f08634 | |
|
4226336a2c | |
|
840a3922cf | |
|
241b838d68 | |
|
842f08d10f | |
|
d76af29e2e | |
|
7210e05e70 | |
|
6e74ce6cd3 | |
|
cef80e497c |
|
@ -10,7 +10,7 @@ env:
|
|||
TAG: ${{ github.event.release.tag_name }}
|
||||
jobs:
|
||||
release-cocoapod-gRPC-Core:
|
||||
runs-on: macos-14
|
||||
runs-on: macos-12
|
||||
steps:
|
||||
- name: Repo checkout
|
||||
uses: actions/checkout@v3
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
version=${TAG#v}
|
||||
timeout 1h scripts/wait_for_pod_release.sh gRPC-Core $version
|
||||
release-cocoapod-gRPC-Cpp:
|
||||
runs-on: macos-14
|
||||
runs-on: macos-12
|
||||
needs: [release-cocoapod-gRPC-Core]
|
||||
steps:
|
||||
- name: Repo checkout
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
version=${TAG#v}
|
||||
timeout 1h scripts/wait_for_pod_release.sh gRPC-C++ $version
|
||||
release-cocoapod-gRPC-RxLibrary:
|
||||
runs-on: macos-14
|
||||
runs-on: macos-12
|
||||
steps:
|
||||
- name: Repo checkout
|
||||
uses: actions/checkout@v3
|
||||
|
@ -66,7 +66,7 @@ jobs:
|
|||
timeout 1h scripts/wait_for_pod_release.sh gRPC-RxLibrary $version
|
||||
|
||||
release-cocoapod-gRPC:
|
||||
runs-on: macos-14
|
||||
runs-on: macos-12
|
||||
needs: [release-cocoapod-gRPC-RxLibrary, release-cocoapod-gRPC-Core]
|
||||
steps:
|
||||
- name: Repo checkout
|
||||
|
@ -86,7 +86,7 @@ jobs:
|
|||
timeout 1h scripts/wait_for_pod_release.sh gRPC $version
|
||||
|
||||
release-cocoapod-gRPC-ProtoRPC:
|
||||
runs-on: macos-14
|
||||
runs-on: macos-12
|
||||
needs: [release-cocoapod-gRPC-RxLibrary, release-cocoapod-gRPC]
|
||||
steps:
|
||||
- name: Repo checkout
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
Pod::Spec.new do |s|
|
||||
s.name = 'gRPC-C++'
|
||||
# TODO (mxyan): use version that match gRPC version when pod is stabilized
|
||||
version = '1.66.0-dev'
|
||||
version = '1.66.2'
|
||||
s.version = version
|
||||
s.summary = 'gRPC C++ library'
|
||||
s.homepage = 'https://grpc.io'
|
||||
|
@ -254,6 +254,7 @@ Pod::Spec.new do |s|
|
|||
ss.dependency 'abseil/log/log', abseil_version
|
||||
ss.dependency 'abseil/memory/memory', abseil_version
|
||||
ss.dependency 'abseil/meta/type_traits', abseil_version
|
||||
ss.dependency 'abseil/numeric/bits', abseil_version
|
||||
ss.dependency 'abseil/random/bit_gen_ref', abseil_version
|
||||
ss.dependency 'abseil/random/distributions', abseil_version
|
||||
ss.dependency 'abseil/random/random', abseil_version
|
||||
|
@ -280,8 +281,10 @@ Pod::Spec.new do |s|
|
|||
'src/core/client_channel/client_channel_service_config.h',
|
||||
'src/core/client_channel/config_selector.h',
|
||||
'src/core/client_channel/connector.h',
|
||||
'src/core/client_channel/direct_channel.h',
|
||||
'src/core/client_channel/dynamic_filters.h',
|
||||
'src/core/client_channel/global_subchannel_pool.h',
|
||||
'src/core/client_channel/lb_metadata.h',
|
||||
'src/core/client_channel/load_balanced_call_destination.h',
|
||||
'src/core/client_channel/local_subchannel_pool.h',
|
||||
'src/core/client_channel/retry_filter.h',
|
||||
|
@ -950,7 +953,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
|
||||
'src/core/lib/event_engine/thready_event_engine/thready_event_engine.h',
|
||||
'src/core/lib/event_engine/time_util.h',
|
||||
'src/core/lib/event_engine/trace.h',
|
||||
'src/core/lib/event_engine/utils.h',
|
||||
'src/core/lib/event_engine/windows/grpc_polled_fd_windows.h',
|
||||
'src/core/lib/event_engine/windows/iocp.h',
|
||||
|
@ -1176,7 +1178,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/slice/slice_internal.h',
|
||||
'src/core/lib/slice/slice_refcount.h',
|
||||
'src/core/lib/slice/slice_string_helpers.h',
|
||||
'src/core/lib/surface/api_trace.h',
|
||||
'src/core/lib/surface/call.h',
|
||||
'src/core/lib/surface/call_test_only.h',
|
||||
'src/core/lib/surface/call_utils.h',
|
||||
|
@ -1201,6 +1202,7 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/transport/call_filters.h',
|
||||
'src/core/lib/transport/call_final_info.h',
|
||||
'src/core/lib/transport/call_spine.h',
|
||||
'src/core/lib/transport/call_state.h',
|
||||
'src/core/lib/transport/connectivity_state.h',
|
||||
'src/core/lib/transport/custom_metadata.h',
|
||||
'src/core/lib/transport/error_utils.h',
|
||||
|
@ -1319,32 +1321,40 @@ Pod::Spec.new do |s|
|
|||
'src/core/util/json/json_reader.h',
|
||||
'src/core/util/json/json_util.h',
|
||||
'src/core/util/json/json_writer.h',
|
||||
'src/core/util/latent_see.h',
|
||||
'src/core/util/spinlock.h',
|
||||
'src/core/util/string.h',
|
||||
'src/core/util/time_precise.h',
|
||||
'src/core/util/tmpfile.h',
|
||||
'src/core/util/upb_utils.h',
|
||||
'src/core/util/useful.h',
|
||||
'src/core/xds/grpc/certificate_provider_store.h',
|
||||
'src/core/xds/grpc/file_watcher_certificate_provider_factory.h',
|
||||
'src/core/xds/grpc/upb_utils.h',
|
||||
'src/core/xds/grpc/xds_audit_logger_registry.h',
|
||||
'src/core/xds/grpc/xds_bootstrap_grpc.h',
|
||||
'src/core/xds/grpc/xds_certificate_provider.h',
|
||||
'src/core/xds/grpc/xds_client_grpc.h',
|
||||
'src/core/xds/grpc/xds_cluster.h',
|
||||
'src/core/xds/grpc/xds_cluster_parser.h',
|
||||
'src/core/xds/grpc/xds_cluster_specifier_plugin.h',
|
||||
'src/core/xds/grpc/xds_common_types.h',
|
||||
'src/core/xds/grpc/xds_common_types_parser.h',
|
||||
'src/core/xds/grpc/xds_enabled_server.h',
|
||||
'src/core/xds/grpc/xds_endpoint.h',
|
||||
'src/core/xds/grpc/xds_endpoint_parser.h',
|
||||
'src/core/xds/grpc/xds_health_status.h',
|
||||
'src/core/xds/grpc/xds_http_fault_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filters.h',
|
||||
'src/core/xds/grpc/xds_http_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filter_registry.h',
|
||||
'src/core/xds/grpc/xds_http_rbac_filter.h',
|
||||
'src/core/xds/grpc/xds_http_stateful_session_filter.h',
|
||||
'src/core/xds/grpc/xds_lb_policy_registry.h',
|
||||
'src/core/xds/grpc/xds_listener.h',
|
||||
'src/core/xds/grpc/xds_listener_parser.h',
|
||||
'src/core/xds/grpc/xds_route_config.h',
|
||||
'src/core/xds/grpc/xds_route_config_parser.h',
|
||||
'src/core/xds/grpc/xds_routing.h',
|
||||
'src/core/xds/grpc/xds_server_grpc.h',
|
||||
'src/core/xds/grpc/xds_transport_grpc.h',
|
||||
'src/core/xds/xds_client/xds_api.h',
|
||||
'src/core/xds/xds_client/xds_bootstrap.h',
|
||||
|
@ -1463,6 +1473,7 @@ Pod::Spec.new do |s|
|
|||
'third_party/upb/upb/message/copy.h',
|
||||
'third_party/upb/upb/message/internal/accessors.h',
|
||||
'third_party/upb/upb/message/internal/array.h',
|
||||
'third_party/upb/upb/message/internal/compare_unknown.h',
|
||||
'third_party/upb/upb/message/internal/extension.h',
|
||||
'third_party/upb/upb/message/internal/map.h',
|
||||
'third_party/upb/upb/message/internal/map_entry.h',
|
||||
|
@ -1571,8 +1582,10 @@ Pod::Spec.new do |s|
|
|||
'src/core/client_channel/client_channel_service_config.h',
|
||||
'src/core/client_channel/config_selector.h',
|
||||
'src/core/client_channel/connector.h',
|
||||
'src/core/client_channel/direct_channel.h',
|
||||
'src/core/client_channel/dynamic_filters.h',
|
||||
'src/core/client_channel/global_subchannel_pool.h',
|
||||
'src/core/client_channel/lb_metadata.h',
|
||||
'src/core/client_channel/load_balanced_call_destination.h',
|
||||
'src/core/client_channel/local_subchannel_pool.h',
|
||||
'src/core/client_channel/retry_filter.h',
|
||||
|
@ -2223,7 +2236,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
|
||||
'src/core/lib/event_engine/thready_event_engine/thready_event_engine.h',
|
||||
'src/core/lib/event_engine/time_util.h',
|
||||
'src/core/lib/event_engine/trace.h',
|
||||
'src/core/lib/event_engine/utils.h',
|
||||
'src/core/lib/event_engine/windows/grpc_polled_fd_windows.h',
|
||||
'src/core/lib/event_engine/windows/iocp.h',
|
||||
|
@ -2449,7 +2461,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/slice/slice_internal.h',
|
||||
'src/core/lib/slice/slice_refcount.h',
|
||||
'src/core/lib/slice/slice_string_helpers.h',
|
||||
'src/core/lib/surface/api_trace.h',
|
||||
'src/core/lib/surface/call.h',
|
||||
'src/core/lib/surface/call_test_only.h',
|
||||
'src/core/lib/surface/call_utils.h',
|
||||
|
@ -2474,6 +2485,7 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/transport/call_filters.h',
|
||||
'src/core/lib/transport/call_final_info.h',
|
||||
'src/core/lib/transport/call_spine.h',
|
||||
'src/core/lib/transport/call_state.h',
|
||||
'src/core/lib/transport/connectivity_state.h',
|
||||
'src/core/lib/transport/custom_metadata.h',
|
||||
'src/core/lib/transport/error_utils.h',
|
||||
|
@ -2592,32 +2604,40 @@ Pod::Spec.new do |s|
|
|||
'src/core/util/json/json_reader.h',
|
||||
'src/core/util/json/json_util.h',
|
||||
'src/core/util/json/json_writer.h',
|
||||
'src/core/util/latent_see.h',
|
||||
'src/core/util/spinlock.h',
|
||||
'src/core/util/string.h',
|
||||
'src/core/util/time_precise.h',
|
||||
'src/core/util/tmpfile.h',
|
||||
'src/core/util/upb_utils.h',
|
||||
'src/core/util/useful.h',
|
||||
'src/core/xds/grpc/certificate_provider_store.h',
|
||||
'src/core/xds/grpc/file_watcher_certificate_provider_factory.h',
|
||||
'src/core/xds/grpc/upb_utils.h',
|
||||
'src/core/xds/grpc/xds_audit_logger_registry.h',
|
||||
'src/core/xds/grpc/xds_bootstrap_grpc.h',
|
||||
'src/core/xds/grpc/xds_certificate_provider.h',
|
||||
'src/core/xds/grpc/xds_client_grpc.h',
|
||||
'src/core/xds/grpc/xds_cluster.h',
|
||||
'src/core/xds/grpc/xds_cluster_parser.h',
|
||||
'src/core/xds/grpc/xds_cluster_specifier_plugin.h',
|
||||
'src/core/xds/grpc/xds_common_types.h',
|
||||
'src/core/xds/grpc/xds_common_types_parser.h',
|
||||
'src/core/xds/grpc/xds_enabled_server.h',
|
||||
'src/core/xds/grpc/xds_endpoint.h',
|
||||
'src/core/xds/grpc/xds_endpoint_parser.h',
|
||||
'src/core/xds/grpc/xds_health_status.h',
|
||||
'src/core/xds/grpc/xds_http_fault_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filters.h',
|
||||
'src/core/xds/grpc/xds_http_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filter_registry.h',
|
||||
'src/core/xds/grpc/xds_http_rbac_filter.h',
|
||||
'src/core/xds/grpc/xds_http_stateful_session_filter.h',
|
||||
'src/core/xds/grpc/xds_lb_policy_registry.h',
|
||||
'src/core/xds/grpc/xds_listener.h',
|
||||
'src/core/xds/grpc/xds_listener_parser.h',
|
||||
'src/core/xds/grpc/xds_route_config.h',
|
||||
'src/core/xds/grpc/xds_route_config_parser.h',
|
||||
'src/core/xds/grpc/xds_routing.h',
|
||||
'src/core/xds/grpc/xds_server_grpc.h',
|
||||
'src/core/xds/grpc/xds_transport_grpc.h',
|
||||
'src/core/xds/xds_client/xds_api.h',
|
||||
'src/core/xds/xds_client/xds_bootstrap.h',
|
||||
|
@ -2687,6 +2707,7 @@ Pod::Spec.new do |s|
|
|||
'third_party/upb/upb/message/copy.h',
|
||||
'third_party/upb/upb/message/internal/accessors.h',
|
||||
'third_party/upb/upb/message/internal/array.h',
|
||||
'third_party/upb/upb/message/internal/compare_unknown.h',
|
||||
'third_party/upb/upb/message/internal/extension.h',
|
||||
'third_party/upb/upb/message/internal/map.h',
|
||||
'third_party/upb/upb/message/internal/map_entry.h',
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
Pod::Spec.new do |s|
|
||||
s.name = 'gRPC-Core'
|
||||
version = '1.66.0-dev'
|
||||
version = '1.66.2'
|
||||
s.version = version
|
||||
s.summary = 'Core cross-platform gRPC library, written in C'
|
||||
s.homepage = 'https://grpc.io'
|
||||
|
@ -199,7 +199,7 @@ Pod::Spec.new do |s|
|
|||
ss.libraries = 'z'
|
||||
ss.dependency "#{s.name}/Interface", version
|
||||
ss.dependency "#{s.name}/Privacy", version
|
||||
ss.dependency 'BoringSSL-GRPC', '0.0.35'
|
||||
ss.dependency 'BoringSSL-GRPC', '0.0.36'
|
||||
ss.dependency 'abseil/algorithm/container', abseil_version
|
||||
ss.dependency 'abseil/base/base', abseil_version
|
||||
ss.dependency 'abseil/base/config', abseil_version
|
||||
|
@ -221,6 +221,7 @@ Pod::Spec.new do |s|
|
|||
ss.dependency 'abseil/log/log', abseil_version
|
||||
ss.dependency 'abseil/memory/memory', abseil_version
|
||||
ss.dependency 'abseil/meta/type_traits', abseil_version
|
||||
ss.dependency 'abseil/numeric/bits', abseil_version
|
||||
ss.dependency 'abseil/random/bit_gen_ref', abseil_version
|
||||
ss.dependency 'abseil/random/distributions', abseil_version
|
||||
ss.dependency 'abseil/random/random', abseil_version
|
||||
|
@ -257,10 +258,14 @@ Pod::Spec.new do |s|
|
|||
'src/core/client_channel/client_channel_service_config.h',
|
||||
'src/core/client_channel/config_selector.h',
|
||||
'src/core/client_channel/connector.h',
|
||||
'src/core/client_channel/direct_channel.cc',
|
||||
'src/core/client_channel/direct_channel.h',
|
||||
'src/core/client_channel/dynamic_filters.cc',
|
||||
'src/core/client_channel/dynamic_filters.h',
|
||||
'src/core/client_channel/global_subchannel_pool.cc',
|
||||
'src/core/client_channel/global_subchannel_pool.h',
|
||||
'src/core/client_channel/lb_metadata.cc',
|
||||
'src/core/client_channel/lb_metadata.h',
|
||||
'src/core/client_channel/load_balanced_call_destination.cc',
|
||||
'src/core/client_channel/load_balanced_call_destination.h',
|
||||
'src/core/client_channel/local_subchannel_pool.cc',
|
||||
|
@ -1335,7 +1340,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/event_engine/thready_event_engine/thready_event_engine.h',
|
||||
'src/core/lib/event_engine/time_util.cc',
|
||||
'src/core/lib/event_engine/time_util.h',
|
||||
'src/core/lib/event_engine/trace.h',
|
||||
'src/core/lib/event_engine/utils.cc',
|
||||
'src/core/lib/event_engine/utils.h',
|
||||
'src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc',
|
||||
|
@ -1753,7 +1757,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/slice/slice_refcount.h',
|
||||
'src/core/lib/slice/slice_string_helpers.cc',
|
||||
'src/core/lib/slice/slice_string_helpers.h',
|
||||
'src/core/lib/surface/api_trace.h',
|
||||
'src/core/lib/surface/byte_buffer.cc',
|
||||
'src/core/lib/surface/byte_buffer_reader.cc',
|
||||
'src/core/lib/surface/call.cc',
|
||||
|
@ -1806,6 +1809,8 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/transport/call_final_info.h',
|
||||
'src/core/lib/transport/call_spine.cc',
|
||||
'src/core/lib/transport/call_spine.h',
|
||||
'src/core/lib/transport/call_state.cc',
|
||||
'src/core/lib/transport/call_state.h',
|
||||
'src/core/lib/transport/connectivity_state.cc',
|
||||
'src/core/lib/transport/connectivity_state.h',
|
||||
'src/core/lib/transport/custom_metadata.h',
|
||||
|
@ -2041,6 +2046,8 @@ Pod::Spec.new do |s|
|
|||
'src/core/util/json/json_util.h',
|
||||
'src/core/util/json/json_writer.cc',
|
||||
'src/core/util/json/json_writer.h',
|
||||
'src/core/util/latent_see.cc',
|
||||
'src/core/util/latent_see.h',
|
||||
'src/core/util/linux/cpu.cc',
|
||||
'src/core/util/linux/log.cc',
|
||||
'src/core/util/log.cc',
|
||||
|
@ -2060,6 +2067,7 @@ Pod::Spec.new do |s|
|
|||
'src/core/util/time_precise.cc',
|
||||
'src/core/util/time_precise.h',
|
||||
'src/core/util/tmpfile.h',
|
||||
'src/core/util/upb_utils.h',
|
||||
'src/core/util/useful.h',
|
||||
'src/core/util/windows/cpu.cc',
|
||||
'src/core/util/windows/log.cc',
|
||||
|
@ -2072,7 +2080,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/xds/grpc/certificate_provider_store.h',
|
||||
'src/core/xds/grpc/file_watcher_certificate_provider_factory.cc',
|
||||
'src/core/xds/grpc/file_watcher_certificate_provider_factory.h',
|
||||
'src/core/xds/grpc/upb_utils.h',
|
||||
'src/core/xds/grpc/xds_audit_logger_registry.cc',
|
||||
'src/core/xds/grpc/xds_audit_logger_registry.h',
|
||||
'src/core/xds/grpc/xds_bootstrap_grpc.cc',
|
||||
|
@ -2083,18 +2090,25 @@ Pod::Spec.new do |s|
|
|||
'src/core/xds/grpc/xds_client_grpc.h',
|
||||
'src/core/xds/grpc/xds_cluster.cc',
|
||||
'src/core/xds/grpc/xds_cluster.h',
|
||||
'src/core/xds/grpc/xds_cluster_parser.cc',
|
||||
'src/core/xds/grpc/xds_cluster_parser.h',
|
||||
'src/core/xds/grpc/xds_cluster_specifier_plugin.cc',
|
||||
'src/core/xds/grpc/xds_cluster_specifier_plugin.h',
|
||||
'src/core/xds/grpc/xds_common_types.cc',
|
||||
'src/core/xds/grpc/xds_common_types.h',
|
||||
'src/core/xds/grpc/xds_common_types_parser.cc',
|
||||
'src/core/xds/grpc/xds_common_types_parser.h',
|
||||
'src/core/xds/grpc/xds_endpoint.cc',
|
||||
'src/core/xds/grpc/xds_endpoint.h',
|
||||
'src/core/xds/grpc/xds_endpoint_parser.cc',
|
||||
'src/core/xds/grpc/xds_endpoint_parser.h',
|
||||
'src/core/xds/grpc/xds_health_status.cc',
|
||||
'src/core/xds/grpc/xds_health_status.h',
|
||||
'src/core/xds/grpc/xds_http_fault_filter.cc',
|
||||
'src/core/xds/grpc/xds_http_fault_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filters.cc',
|
||||
'src/core/xds/grpc/xds_http_filters.h',
|
||||
'src/core/xds/grpc/xds_http_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filter_registry.cc',
|
||||
'src/core/xds/grpc/xds_http_filter_registry.h',
|
||||
'src/core/xds/grpc/xds_http_rbac_filter.cc',
|
||||
'src/core/xds/grpc/xds_http_rbac_filter.h',
|
||||
'src/core/xds/grpc/xds_http_stateful_session_filter.cc',
|
||||
|
@ -2103,10 +2117,16 @@ Pod::Spec.new do |s|
|
|||
'src/core/xds/grpc/xds_lb_policy_registry.h',
|
||||
'src/core/xds/grpc/xds_listener.cc',
|
||||
'src/core/xds/grpc/xds_listener.h',
|
||||
'src/core/xds/grpc/xds_listener_parser.cc',
|
||||
'src/core/xds/grpc/xds_listener_parser.h',
|
||||
'src/core/xds/grpc/xds_route_config.cc',
|
||||
'src/core/xds/grpc/xds_route_config.h',
|
||||
'src/core/xds/grpc/xds_route_config_parser.cc',
|
||||
'src/core/xds/grpc/xds_route_config_parser.h',
|
||||
'src/core/xds/grpc/xds_routing.cc',
|
||||
'src/core/xds/grpc/xds_routing.h',
|
||||
'src/core/xds/grpc/xds_server_grpc.cc',
|
||||
'src/core/xds/grpc/xds_server_grpc.h',
|
||||
'src/core/xds/grpc/xds_transport_grpc.cc',
|
||||
'src/core/xds/grpc/xds_transport_grpc.h',
|
||||
'src/core/xds/xds_client/xds_api.cc',
|
||||
|
@ -2206,6 +2226,8 @@ Pod::Spec.new do |s|
|
|||
'third_party/upb/upb/message/copy.h',
|
||||
'third_party/upb/upb/message/internal/accessors.h',
|
||||
'third_party/upb/upb/message/internal/array.h',
|
||||
'third_party/upb/upb/message/internal/compare_unknown.c',
|
||||
'third_party/upb/upb/message/internal/compare_unknown.h',
|
||||
'third_party/upb/upb/message/internal/extension.c',
|
||||
'third_party/upb/upb/message/internal/extension.h',
|
||||
'third_party/upb/upb/message/internal/map.h',
|
||||
|
@ -2361,8 +2383,10 @@ Pod::Spec.new do |s|
|
|||
'src/core/client_channel/client_channel_service_config.h',
|
||||
'src/core/client_channel/config_selector.h',
|
||||
'src/core/client_channel/connector.h',
|
||||
'src/core/client_channel/direct_channel.h',
|
||||
'src/core/client_channel/dynamic_filters.h',
|
||||
'src/core/client_channel/global_subchannel_pool.h',
|
||||
'src/core/client_channel/lb_metadata.h',
|
||||
'src/core/client_channel/load_balanced_call_destination.h',
|
||||
'src/core/client_channel/local_subchannel_pool.h',
|
||||
'src/core/client_channel/retry_filter.h',
|
||||
|
@ -2993,7 +3017,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
|
||||
'src/core/lib/event_engine/thready_event_engine/thready_event_engine.h',
|
||||
'src/core/lib/event_engine/time_util.h',
|
||||
'src/core/lib/event_engine/trace.h',
|
||||
'src/core/lib/event_engine/utils.h',
|
||||
'src/core/lib/event_engine/windows/grpc_polled_fd_windows.h',
|
||||
'src/core/lib/event_engine/windows/iocp.h',
|
||||
|
@ -3219,7 +3242,6 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/slice/slice_internal.h',
|
||||
'src/core/lib/slice/slice_refcount.h',
|
||||
'src/core/lib/slice/slice_string_helpers.h',
|
||||
'src/core/lib/surface/api_trace.h',
|
||||
'src/core/lib/surface/call.h',
|
||||
'src/core/lib/surface/call_test_only.h',
|
||||
'src/core/lib/surface/call_utils.h',
|
||||
|
@ -3244,6 +3266,7 @@ Pod::Spec.new do |s|
|
|||
'src/core/lib/transport/call_filters.h',
|
||||
'src/core/lib/transport/call_final_info.h',
|
||||
'src/core/lib/transport/call_spine.h',
|
||||
'src/core/lib/transport/call_state.h',
|
||||
'src/core/lib/transport/connectivity_state.h',
|
||||
'src/core/lib/transport/custom_metadata.h',
|
||||
'src/core/lib/transport/error_utils.h',
|
||||
|
@ -3362,31 +3385,39 @@ Pod::Spec.new do |s|
|
|||
'src/core/util/json/json_reader.h',
|
||||
'src/core/util/json/json_util.h',
|
||||
'src/core/util/json/json_writer.h',
|
||||
'src/core/util/latent_see.h',
|
||||
'src/core/util/spinlock.h',
|
||||
'src/core/util/string.h',
|
||||
'src/core/util/time_precise.h',
|
||||
'src/core/util/tmpfile.h',
|
||||
'src/core/util/upb_utils.h',
|
||||
'src/core/util/useful.h',
|
||||
'src/core/xds/grpc/certificate_provider_store.h',
|
||||
'src/core/xds/grpc/file_watcher_certificate_provider_factory.h',
|
||||
'src/core/xds/grpc/upb_utils.h',
|
||||
'src/core/xds/grpc/xds_audit_logger_registry.h',
|
||||
'src/core/xds/grpc/xds_bootstrap_grpc.h',
|
||||
'src/core/xds/grpc/xds_certificate_provider.h',
|
||||
'src/core/xds/grpc/xds_client_grpc.h',
|
||||
'src/core/xds/grpc/xds_cluster.h',
|
||||
'src/core/xds/grpc/xds_cluster_parser.h',
|
||||
'src/core/xds/grpc/xds_cluster_specifier_plugin.h',
|
||||
'src/core/xds/grpc/xds_common_types.h',
|
||||
'src/core/xds/grpc/xds_common_types_parser.h',
|
||||
'src/core/xds/grpc/xds_endpoint.h',
|
||||
'src/core/xds/grpc/xds_endpoint_parser.h',
|
||||
'src/core/xds/grpc/xds_health_status.h',
|
||||
'src/core/xds/grpc/xds_http_fault_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filters.h',
|
||||
'src/core/xds/grpc/xds_http_filter.h',
|
||||
'src/core/xds/grpc/xds_http_filter_registry.h',
|
||||
'src/core/xds/grpc/xds_http_rbac_filter.h',
|
||||
'src/core/xds/grpc/xds_http_stateful_session_filter.h',
|
||||
'src/core/xds/grpc/xds_lb_policy_registry.h',
|
||||
'src/core/xds/grpc/xds_listener.h',
|
||||
'src/core/xds/grpc/xds_listener_parser.h',
|
||||
'src/core/xds/grpc/xds_route_config.h',
|
||||
'src/core/xds/grpc/xds_route_config_parser.h',
|
||||
'src/core/xds/grpc/xds_routing.h',
|
||||
'src/core/xds/grpc/xds_server_grpc.h',
|
||||
'src/core/xds/grpc/xds_transport_grpc.h',
|
||||
'src/core/xds/xds_client/xds_api.h',
|
||||
'src/core/xds/xds_client/xds_bootstrap.h',
|
||||
|
@ -3445,6 +3476,7 @@ Pod::Spec.new do |s|
|
|||
'third_party/upb/upb/message/copy.h',
|
||||
'third_party/upb/upb/message/internal/accessors.h',
|
||||
'third_party/upb/upb/message/internal/array.h',
|
||||
'third_party/upb/upb/message/internal/compare_unknown.h',
|
||||
'third_party/upb/upb/message/internal/extension.h',
|
||||
'third_party/upb/upb/message/internal/map.h',
|
||||
'third_party/upb/upb/message/internal/map_entry.h',
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
Pod::Spec.new do |s|
|
||||
s.name = 'gRPC-ProtoRPC'
|
||||
version = '1.66.0-dev'
|
||||
version = '1.66.2'
|
||||
s.version = version
|
||||
s.summary = 'RPC library for Protocol Buffers, based on gRPC'
|
||||
s.homepage = 'https://grpc.io'
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
Pod::Spec.new do |s|
|
||||
s.name = 'gRPC-RxLibrary'
|
||||
version = '1.66.0-dev'
|
||||
version = '1.66.2'
|
||||
s.version = version
|
||||
s.summary = 'Reactive Extensions library for iOS/OSX.'
|
||||
s.homepage = 'https://grpc.io'
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
Pod::Spec.new do |s|
|
||||
s.name = 'gRPC'
|
||||
version = '1.66.0-dev'
|
||||
version = '1.66.2'
|
||||
s.version = version
|
||||
s.summary = 'gRPC client library for iOS/OSX'
|
||||
s.homepage = 'https://grpc.io'
|
||||
|
|
|
@ -503,6 +503,20 @@ bool operator!=(const EventEngine::ConnectionHandle& lhs,
|
|||
std::ostream& operator<<(std::ostream& out,
|
||||
const EventEngine::ConnectionHandle& handle);
|
||||
|
||||
namespace detail {
|
||||
std::string FormatHandleString(uint64_t key1, uint64_t key2);
|
||||
}
|
||||
|
||||
template <typename Sink>
|
||||
void AbslStringify(Sink& out, const EventEngine::ConnectionHandle& handle) {
|
||||
out.Append(detail::FormatHandleString(handle.keys[0], handle.keys[1]));
|
||||
}
|
||||
|
||||
template <typename Sink>
|
||||
void AbslStringify(Sink& out, const EventEngine::TaskHandle& handle) {
|
||||
out.Append(detail::FormatHandleString(handle.keys[0], handle.keys[1]));
|
||||
}
|
||||
|
||||
} // namespace experimental
|
||||
} // namespace grpc_event_engine
|
||||
|
||||
|
|
|
@ -60,6 +60,9 @@ class Extensible {
|
|||
/// if (endpoint != nullptr) endpoint->Process();
|
||||
///
|
||||
virtual void* QueryExtension(absl::string_view /*id*/) { return nullptr; }
|
||||
|
||||
protected:
|
||||
~Extensible() = default;
|
||||
};
|
||||
|
||||
} // namespace experimental
|
||||
|
|
|
@ -46,6 +46,24 @@ class MemoryRequest {
|
|||
size_t min() const { return min_; }
|
||||
size_t max() const { return max_; }
|
||||
|
||||
bool operator==(const MemoryRequest& other) const {
|
||||
return min_ == other.min_ && max_ == other.max_;
|
||||
}
|
||||
bool operator!=(const MemoryRequest& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
template <typename Sink>
|
||||
friend void AbslStringify(Sink& s, const MemoryRequest& r) {
|
||||
if (r.min_ == r.max_) {
|
||||
s.Append(r.min_);
|
||||
} else {
|
||||
s.Append(r.min_);
|
||||
s.Append("..");
|
||||
s.Append(r.max_);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
size_t min_;
|
||||
size_t max_;
|
||||
|
|
|
@ -28,15 +28,10 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** GPR log API.
|
||||
|
||||
Usage (within grpc):
|
||||
|
||||
int argument1 = 3;
|
||||
char* argument2 = "hello";
|
||||
gpr_log(GPR_DEBUG, "format string %d", argument1);
|
||||
gpr_log(GPR_INFO, "hello world");
|
||||
gpr_log(GPR_ERROR, "%d %s!!", argument1, argument2); */
|
||||
/**
|
||||
* Logging functions in this file are deprecated.
|
||||
* Please use absl ABSL_LOG instead.
|
||||
*/
|
||||
|
||||
/** The severity of a log message - use the #defines below when calling into
|
||||
gpr_log to additionally supply file and line data */
|
||||
|
@ -46,9 +41,6 @@ typedef enum gpr_log_severity {
|
|||
GPR_LOG_SEVERITY_ERROR
|
||||
} gpr_log_severity;
|
||||
|
||||
/** Returns a string representation of the log severity */
|
||||
GPRAPI const char* gpr_log_severity_string(gpr_log_severity severity);
|
||||
|
||||
/** Macros to build log contexts at various severity levels */
|
||||
#define GPR_DEBUG __FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG
|
||||
#define GPR_INFO __FILE__, __LINE__, GPR_LOG_SEVERITY_INFO
|
||||
|
@ -65,7 +57,7 @@ GPRAPI void gpr_log_message(const char* file, int line,
|
|||
gpr_log_severity severity, const char* message);
|
||||
|
||||
/** Set global log verbosity */
|
||||
GPRAPI void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print);
|
||||
GPRAPI void gpr_set_log_verbosity(gpr_log_severity deprecated_setting);
|
||||
|
||||
GPRAPI void gpr_log_verbosity_init(void);
|
||||
|
||||
|
@ -83,10 +75,7 @@ typedef struct gpr_log_func_args gpr_log_func_args;
|
|||
|
||||
typedef void (*gpr_log_func)(gpr_log_func_args* args);
|
||||
|
||||
GPRAPI void gpr_set_log_function(gpr_log_func func);
|
||||
|
||||
GPRAPI void gpr_assertion_failed(const char* filename, int line,
|
||||
const char* message) GPR_ATTRIBUTE_NORETURN;
|
||||
GPRAPI void gpr_set_log_function(gpr_log_func deprecated_setting);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include "absl/strings/string_view.h"
|
||||
|
||||
#include <grpc/event_engine/endpoint_config.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
namespace grpc_core {
|
||||
|
@ -26,9 +27,10 @@ namespace experimental {
|
|||
// plugins.
|
||||
class StatsPluginChannelScope {
|
||||
public:
|
||||
StatsPluginChannelScope(absl::string_view target,
|
||||
absl::string_view default_authority)
|
||||
: target_(target), default_authority_(default_authority) {}
|
||||
StatsPluginChannelScope(
|
||||
absl::string_view target, absl::string_view default_authority,
|
||||
const grpc_event_engine::experimental::EndpointConfig& args)
|
||||
: target_(target), default_authority_(default_authority), args_(args) {}
|
||||
|
||||
/// Returns the target used for creating the channel in the canonical form.
|
||||
/// (Canonicalized target definition -
|
||||
|
@ -36,13 +38,22 @@ class StatsPluginChannelScope {
|
|||
absl::string_view target() const { return target_; }
|
||||
/// Returns the default authority for the channel.
|
||||
absl::string_view default_authority() const { return default_authority_; }
|
||||
/// Returns channel arguments. THIS METHOD IS EXPERIMENTAL.
|
||||
// TODO(roth, ctiller, yashkt): Find a better representation for
|
||||
// channel args before de-experimentalizing this API.
|
||||
const grpc_event_engine::experimental::EndpointConfig& experimental_args()
|
||||
const {
|
||||
return args_;
|
||||
}
|
||||
|
||||
private:
|
||||
// Disable copy constructor and copy-assignment operator.
|
||||
StatsPluginChannelScope(const StatsPluginChannelScope&) = delete;
|
||||
StatsPluginChannelScope& operator=(const StatsPluginChannelScope&) = delete;
|
||||
|
||||
absl::string_view target_;
|
||||
absl::string_view default_authority_;
|
||||
const grpc_event_engine::experimental::EndpointConfig& args_;
|
||||
};
|
||||
|
||||
} // namespace experimental
|
||||
|
|
|
@ -746,6 +746,22 @@ extern void gpr_unreachable_code(const char* reason, const char* file,
|
|||
#endif
|
||||
#endif /* GPR_ATTRIBUTE_NOINLINE */
|
||||
|
||||
#ifndef GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION
|
||||
#ifdef __cplusplus
|
||||
#if GPR_HAS_CPP_ATTRIBUTE(clang::always_inline)
|
||||
#define GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION [[clang::always_inline]]
|
||||
#elif GPR_HAS_ATTRIBUTE(always_inline)
|
||||
#define GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION __attribute__((always_inline))
|
||||
#else
|
||||
// TODO(ctiller): add __forceinline for MSVC
|
||||
#define GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION
|
||||
#endif
|
||||
#else
|
||||
// Disable for C code
|
||||
#define GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION
|
||||
#endif
|
||||
#endif /* GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION */
|
||||
|
||||
#ifndef GPR_NO_UNIQUE_ADDRESS
|
||||
#if GPR_HAS_CPP_ATTRIBUTE(no_unique_address)
|
||||
#define GPR_NO_UNIQUE_ADDRESS [[no_unique_address]]
|
||||
|
@ -820,6 +836,12 @@ extern void gpr_unreachable_code(const char* reason, const char* file,
|
|||
#endif /* __GPR_WINDOWS */
|
||||
#endif /* GRPC_ALLOW_EXCEPTIONS */
|
||||
|
||||
#ifdef __has_builtin
|
||||
#define GRPC_HAS_BUILTIN(a) __has_builtin(a)
|
||||
#else
|
||||
#define GRPC_HAS_BUILTIN(a) 0
|
||||
#endif
|
||||
|
||||
/* Use GPR_LIKELY only in cases where you are sure that a certain outcome is the
|
||||
* most likely. Ideally, also collect performance numbers to justify the claim.
|
||||
*/
|
||||
|
|
|
@ -116,9 +116,6 @@ std::shared_ptr<ServerCredentials> AltsServerCredentials(
|
|||
const AltsServerCredentialsOptions& options);
|
||||
|
||||
/// Builds Local ServerCredentials.
|
||||
std::shared_ptr<ServerCredentials> AltsServerCredentials(
|
||||
const AltsServerCredentialsOptions& options);
|
||||
|
||||
std::shared_ptr<ServerCredentials> LocalServerCredentials(
|
||||
grpc_local_connect_type type);
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
|
||||
#define GRPC_CPP_VERSION_MAJOR 1
|
||||
#define GRPC_CPP_VERSION_MINOR 66
|
||||
#define GRPC_CPP_VERSION_PATCH 0
|
||||
#define GRPC_CPP_VERSION_TAG "dev"
|
||||
#define GRPC_CPP_VERSION_STRING "1.66.0-dev"
|
||||
#define GRPC_CPP_VERSION_PATCH 2
|
||||
#define GRPC_CPP_VERSION_TAG ""
|
||||
#define GRPC_CPP_VERSION_STRING "1.66.2"
|
||||
|
||||
#endif // GRPCPP_VERSION_INFO_H
|
||||
|
|
|
@ -29,8 +29,10 @@ targets = (
|
|||
'upb_base_lib',
|
||||
'upb_mem_lib',
|
||||
'upb_message_lib',
|
||||
'upb_mini_descriptor_lib',
|
||||
'upb_json_lib',
|
||||
'upb_textformat_lib',
|
||||
'upb_wire_lib',
|
||||
'utf8_range_lib',
|
||||
're2',
|
||||
'z',
|
||||
|
|
|
@ -10,3 +10,4 @@ set -ex
|
|||
brew install coreutils
|
||||
|
||||
brew install pyyaml
|
||||
pip3 install pyyaml || true
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "absl/cleanup/cleanup.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/status/statusor.h"
|
||||
#include "absl/strings/cord.h"
|
||||
|
@ -43,7 +44,6 @@
|
|||
#include <grpc/slice.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/json.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/metrics.h>
|
||||
#include <grpc/support/string_util.h>
|
||||
#include <grpc/support/time.h>
|
||||
|
@ -61,6 +61,7 @@
|
|||
#include "src/core/lib/channel/status_util.h"
|
||||
#include "src/core/lib/config/core_configuration.h"
|
||||
#include "src/core/lib/debug/trace.h"
|
||||
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
|
||||
#include "src/core/lib/gprpp/crash.h"
|
||||
#include "src/core/lib/gprpp/debug_location.h"
|
||||
#include "src/core/lib/gprpp/sync.h"
|
||||
|
@ -73,6 +74,7 @@
|
|||
#include "src/core/lib/promise/poll.h"
|
||||
#include "src/core/lib/promise/sleep.h"
|
||||
#include "src/core/lib/promise/try_seq.h"
|
||||
#include "src/core/lib/resource_quota/arena.h"
|
||||
#include "src/core/lib/security/credentials/credentials.h"
|
||||
#include "src/core/lib/slice/slice.h"
|
||||
#include "src/core/lib/slice/slice_internal.h"
|
||||
|
@ -283,10 +285,10 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
|
|||
}
|
||||
}
|
||||
} else {
|
||||
gpr_log(GPR_ERROR,
|
||||
"client_channel=%p: Illegal keepalive throttling value %s",
|
||||
subchannel_wrapper_->client_channel_.get(),
|
||||
std::string(keepalive_throttling.value()).c_str());
|
||||
LOG(ERROR) << "client_channel="
|
||||
<< subchannel_wrapper_->client_channel_.get()
|
||||
<< ": Illegal keepalive throttling value "
|
||||
<< std::string(keepalive_throttling.value());
|
||||
}
|
||||
}
|
||||
// Propagate status only in state TF.
|
||||
|
@ -453,10 +455,10 @@ class ClientChannel::ClientChannelControlHelper
|
|||
const char* extra = client_channel_->disconnect_error_.ok()
|
||||
? ""
|
||||
: " (ignoring -- channel shutting down)";
|
||||
gpr_log(GPR_INFO,
|
||||
"client_channel=%p: update: state=%s status=(%s) picker=%p%s",
|
||||
client_channel_.get(), ConnectivityStateName(state),
|
||||
status.ToString().c_str(), picker.get(), extra);
|
||||
LOG(INFO) << "client_channel=" << client_channel_.get()
|
||||
<< ": update: state=" << ConnectivityStateName(state)
|
||||
<< " status=(" << status << ") picker=" << picker.get()
|
||||
<< extra;
|
||||
}
|
||||
// Do update only if not shutting down.
|
||||
if (client_channel_->disconnect_error_.ok()) {
|
||||
|
@ -625,6 +627,7 @@ ClientChannel::ClientChannel(
|
|||
work_serializer_(std::make_shared<WorkSerializer>(event_engine_)),
|
||||
state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
|
||||
subchannel_pool_(GetSubchannelPool(channel_args_)) {
|
||||
CHECK(event_engine_.get() != nullptr);
|
||||
GRPC_TRACE_LOG(client_channel, INFO)
|
||||
<< "client_channel=" << this << ": creating client_channel";
|
||||
// Set initial keepalive time.
|
||||
|
@ -635,8 +638,10 @@ ClientChannel::ClientChannel(
|
|||
keepalive_time_ = -1; // unset
|
||||
}
|
||||
// Get stats plugins for channel.
|
||||
experimental::StatsPluginChannelScope scope(this->target(),
|
||||
default_authority_);
|
||||
grpc_event_engine::experimental::ChannelArgsEndpointConfig endpoint_config(
|
||||
channel_args_);
|
||||
experimental::StatsPluginChannelScope scope(
|
||||
this->target(), default_authority_, endpoint_config);
|
||||
stats_plugin_group_ =
|
||||
GlobalStatsPluginRegistry::GetStatsPluginsForChannel(scope);
|
||||
}
|
||||
|
@ -683,10 +688,93 @@ grpc_connectivity_state ClientChannel::CheckConnectivityState(
|
|||
return state;
|
||||
}
|
||||
|
||||
void ClientChannel::WatchConnectivityState(grpc_connectivity_state, Timestamp,
|
||||
grpc_completion_queue*, void*) {
|
||||
// TODO(ctiller): implement
|
||||
Crash("not implemented");
|
||||
namespace {
|
||||
|
||||
// A fire-and-forget object to handle external connectivity state watches.
|
||||
class ExternalStateWatcher : public RefCounted<ExternalStateWatcher> {
|
||||
public:
|
||||
ExternalStateWatcher(WeakRefCountedPtr<ClientChannel> channel,
|
||||
grpc_completion_queue* cq, void* tag,
|
||||
grpc_connectivity_state last_observed_state,
|
||||
Timestamp deadline)
|
||||
: channel_(std::move(channel)), cq_(cq), tag_(tag) {
|
||||
MutexLock lock(&mu_);
|
||||
// Start watch. This inherits the ref from creation.
|
||||
auto watcher =
|
||||
MakeOrphanable<Watcher>(RefCountedPtr<ExternalStateWatcher>(this));
|
||||
watcher_ = watcher.get();
|
||||
channel_->AddConnectivityWatcher(last_observed_state, std::move(watcher));
|
||||
// Start timer. This takes a second ref.
|
||||
const Duration timeout = deadline - Timestamp::Now();
|
||||
timer_handle_ =
|
||||
channel_->event_engine()->RunAfter(timeout, [self = Ref()]() mutable {
|
||||
ApplicationCallbackExecCtx callback_exec_ctx;
|
||||
ExecCtx exec_ctx;
|
||||
self->MaybeStartCompletion(absl::DeadlineExceededError(
|
||||
"Timed out waiting for connection state change"));
|
||||
// ExternalStateWatcher deletion might require an active ExecCtx.
|
||||
self.reset();
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
class Watcher : public AsyncConnectivityStateWatcherInterface {
|
||||
public:
|
||||
explicit Watcher(RefCountedPtr<ExternalStateWatcher> external_state_watcher)
|
||||
: external_state_watcher_(std::move(external_state_watcher)) {}
|
||||
|
||||
void OnConnectivityStateChange(grpc_connectivity_state /*new_state*/,
|
||||
const absl::Status& /*status*/) override {
|
||||
external_state_watcher_->MaybeStartCompletion(absl::OkStatus());
|
||||
}
|
||||
|
||||
private:
|
||||
RefCountedPtr<ExternalStateWatcher> external_state_watcher_;
|
||||
};
|
||||
|
||||
// This is called both when the watch reports a new connectivity state
|
||||
// and when the timer fires. It will trigger a CQ notification only
|
||||
// on the first call. Subsequent calls will be ignored, because
|
||||
// events can come in asynchronously.
|
||||
void MaybeStartCompletion(absl::Status status) {
|
||||
MutexLock lock(&mu_);
|
||||
if (watcher_ == nullptr) return; // Ignore subsequent notifications.
|
||||
// Cancel watch.
|
||||
channel_->RemoveConnectivityWatcher(watcher_);
|
||||
watcher_ = nullptr;
|
||||
// Cancel timer.
|
||||
channel_->event_engine()->Cancel(timer_handle_);
|
||||
// Send CQ completion.
|
||||
Ref().release(); // Released in FinishedCompletion().
|
||||
grpc_cq_end_op(cq_, tag_, status, FinishedCompletion, this,
|
||||
&completion_storage_);
|
||||
}
|
||||
|
||||
// Called when the completion is returned to the CQ.
|
||||
static void FinishedCompletion(void* arg, grpc_cq_completion* /*ignored*/) {
|
||||
auto* self = static_cast<ExternalStateWatcher*>(arg);
|
||||
self->Unref();
|
||||
}
|
||||
|
||||
WeakRefCountedPtr<ClientChannel> channel_;
|
||||
|
||||
Mutex mu_;
|
||||
grpc_completion_queue* cq_ ABSL_GUARDED_BY(&mu_);
|
||||
void* tag_ ABSL_GUARDED_BY(&mu_);
|
||||
grpc_cq_completion completion_storage_ ABSL_GUARDED_BY(&mu_);
|
||||
Watcher* watcher_ ABSL_GUARDED_BY(&mu_) = nullptr;
|
||||
grpc_event_engine::experimental::EventEngine::TaskHandle timer_handle_
|
||||
ABSL_GUARDED_BY(&mu_);
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void ClientChannel::WatchConnectivityState(grpc_connectivity_state state,
|
||||
Timestamp deadline,
|
||||
grpc_completion_queue* cq,
|
||||
void* tag) {
|
||||
new ExternalStateWatcher(WeakRefAsSubclass<ClientChannel>(), cq, tag, state,
|
||||
deadline);
|
||||
}
|
||||
|
||||
void ClientChannel::AddConnectivityWatcher(
|
||||
|
@ -768,10 +856,12 @@ grpc_call* ClientChannel::CreateCall(
|
|||
grpc_call* parent_call, uint32_t propagation_mask,
|
||||
grpc_completion_queue* cq, grpc_pollset_set* /*pollset_set_alternative*/,
|
||||
Slice path, absl::optional<Slice> authority, Timestamp deadline, bool) {
|
||||
auto arena = call_arena_allocator()->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
event_engine());
|
||||
return MakeClientCall(parent_call, propagation_mask, cq, std::move(path),
|
||||
std::move(authority), false, deadline,
|
||||
compression_options(), event_engine_.get(),
|
||||
call_arena_allocator()->MakeArena(), Ref());
|
||||
compression_options(), std::move(arena), Ref());
|
||||
}
|
||||
|
||||
void ClientChannel::StartCall(UnstartedCallHandler unstarted_handler) {
|
||||
|
@ -907,15 +997,13 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
|
|||
.LoadBalancingPolicyExists(*policy_name, &requires_config) ||
|
||||
requires_config)) {
|
||||
if (requires_config) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"LB policy: %s passed through channel_args must not "
|
||||
"require a config. Using pick_first instead.",
|
||||
std::string(*policy_name).c_str());
|
||||
LOG(ERROR) << "LB policy: " << *policy_name
|
||||
<< " passed through channel_args must not "
|
||||
"require a config. Using pick_first instead.";
|
||||
} else {
|
||||
gpr_log(GPR_ERROR,
|
||||
"LB policy: %s passed through channel_args does not exist. "
|
||||
"Using pick_first instead.",
|
||||
std::string(*policy_name).c_str());
|
||||
LOG(ERROR) << "LB policy: " << *policy_name
|
||||
<< " passed through channel_args does not exist. "
|
||||
"Using pick_first instead.";
|
||||
}
|
||||
policy_name = "pick_first";
|
||||
}
|
||||
|
@ -1257,6 +1345,9 @@ void ClientChannel::StartIdleTimer() {
|
|||
}
|
||||
});
|
||||
});
|
||||
auto arena = SimpleArenaAllocator(0)->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
event_engine());
|
||||
idle_activity_.Set(MakeActivity(
|
||||
std::move(promise), ExecCtxWakeupScheduler{},
|
||||
[self = std::move(self)](absl::Status status) mutable {
|
||||
|
@ -1268,13 +1359,14 @@ void ClientChannel::StartIdleTimer() {
|
|||
GRPC_CHANNEL_IDLE, absl::OkStatus(),
|
||||
"channel entering IDLE", nullptr);
|
||||
// TODO(roth): In case there's a race condition, we
|
||||
// might need to check for any calls that are queued
|
||||
// waiting for a resolver result or an LB pick.
|
||||
// might need to check for any calls that are
|
||||
// queued waiting for a resolver result or an LB
|
||||
// pick.
|
||||
},
|
||||
DEBUG_LOCATION);
|
||||
}
|
||||
},
|
||||
GetContext<EventEngine>()));
|
||||
std::move(arena)));
|
||||
}
|
||||
|
||||
absl::Status ClientChannel::ApplyServiceConfigToCall(
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include "absl/cleanup/cleanup.h"
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/status/statusor.h"
|
||||
#include "absl/strings/cord.h"
|
||||
|
@ -46,7 +47,6 @@
|
|||
#include <grpc/slice.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/json.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/string_util.h>
|
||||
#include <grpc/support/time.h>
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
|||
#include "src/core/client_channel/config_selector.h"
|
||||
#include "src/core/client_channel/dynamic_filters.h"
|
||||
#include "src/core/client_channel/global_subchannel_pool.h"
|
||||
#include "src/core/client_channel/lb_metadata.h"
|
||||
#include "src/core/client_channel/local_subchannel_pool.h"
|
||||
#include "src/core/client_channel/retry_filter.h"
|
||||
#include "src/core/client_channel/subchannel.h"
|
||||
|
@ -316,7 +317,7 @@ const grpc_channel_filter ClientChannelFilter::kFilter = {
|
|||
grpc_channel_stack_no_post_init,
|
||||
ClientChannelFilter::Destroy,
|
||||
ClientChannelFilter::GetChannelInfo,
|
||||
"client-channel",
|
||||
GRPC_UNIQUE_TYPE_NAME_HERE("client-channel"),
|
||||
};
|
||||
|
||||
//
|
||||
|
@ -443,7 +444,7 @@ const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
|
|||
grpc_channel_stack_no_post_init,
|
||||
DynamicTerminationFilter::Destroy,
|
||||
DynamicTerminationFilter::GetChannelInfo,
|
||||
"dynamic_filter_termination",
|
||||
GRPC_UNIQUE_TYPE_NAME_HERE("dynamic_filter_termination"),
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
@ -705,9 +706,9 @@ class ClientChannelFilter::SubchannelWrapper final
|
|||
}
|
||||
}
|
||||
} else {
|
||||
gpr_log(GPR_ERROR, "chand=%p: Illegal keepalive throttling value %s",
|
||||
parent_->chand_,
|
||||
std::string(keepalive_throttling.value()).c_str());
|
||||
LOG(ERROR) << "chand=" << parent_->chand_
|
||||
<< ": Illegal keepalive throttling value "
|
||||
<< std::string(keepalive_throttling.value());
|
||||
}
|
||||
}
|
||||
// Propagate status only in state TF.
|
||||
|
@ -1210,15 +1211,13 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
|
|||
.LoadBalancingPolicyExists(*policy_name, &requires_config) ||
|
||||
requires_config)) {
|
||||
if (requires_config) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"LB policy: %s passed through channel_args must not "
|
||||
"require a config. Using pick_first instead.",
|
||||
std::string(*policy_name).c_str());
|
||||
LOG(ERROR) << "LB policy: " << *policy_name
|
||||
<< " passed through channel_args must not "
|
||||
"require a config. Using pick_first instead.";
|
||||
} else {
|
||||
gpr_log(GPR_ERROR,
|
||||
"LB policy: %s passed through channel_args does not exist. "
|
||||
"Using pick_first instead.",
|
||||
std::string(*policy_name).c_str());
|
||||
LOG(ERROR) << "LB policy: " << *policy_name
|
||||
<< " passed through channel_args does not exist. "
|
||||
"Using pick_first instead.";
|
||||
}
|
||||
policy_name = "pick_first";
|
||||
}
|
||||
|
@ -2016,8 +2015,9 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
|
|||
auto* chand = static_cast<ClientChannelFilter*>(elem->channel_data);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(client_channel_call) &&
|
||||
!GRPC_TRACE_FLAG_ENABLED(channel)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from above: %s", chand,
|
||||
calld, grpc_transport_stream_op_batch_string(batch, false).c_str());
|
||||
LOG(INFO) << "chand=" << chand << " calld=" << calld
|
||||
<< ": batch started from above: "
|
||||
<< grpc_transport_stream_op_batch_string(batch, false);
|
||||
}
|
||||
// Intercept recv_trailing_metadata to commit the call, in case we wind up
|
||||
// failing the call before we get down to the retry or LB call layer.
|
||||
|
@ -2159,9 +2159,8 @@ void ClientChannelFilter::FilterBasedCallData::PendingBatchesFail(
|
|||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
if (pending_batches_[i] != nullptr) ++num_batches;
|
||||
}
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
|
||||
chand(), this, num_batches, StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << chand() << " calld=" << this << ": failing "
|
||||
<< num_batches << " pending batches: " << StatusToString(error);
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
|
@ -2202,10 +2201,9 @@ void ClientChannelFilter::FilterBasedCallData::PendingBatchesResume() {
|
|||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
if (pending_batches_[i] != nullptr) ++num_batches;
|
||||
}
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: starting %" PRIuPTR
|
||||
" pending batches on dynamic_call=%p",
|
||||
chand(), this, num_batches, dynamic_call_.get());
|
||||
LOG(INFO) << "chand=" << chand() << " calld=" << this << ": starting "
|
||||
<< num_batches
|
||||
<< " pending batches on dynamic_call=" << dynamic_call_.get();
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
|
@ -2356,81 +2354,6 @@ class ClientChannelFilter::LoadBalancedCall::LbCallState final
|
|||
LoadBalancedCall* lb_call_;
|
||||
};
|
||||
|
||||
//
|
||||
// ClientChannelFilter::LoadBalancedCall::Metadata
|
||||
//
|
||||
|
||||
class ClientChannelFilter::LoadBalancedCall::Metadata final
|
||||
: public LoadBalancingPolicy::MetadataInterface {
|
||||
public:
|
||||
explicit Metadata(grpc_metadata_batch* batch) : batch_(batch) {}
|
||||
|
||||
void Add(absl::string_view key, absl::string_view value) override {
|
||||
if (batch_ == nullptr) return;
|
||||
// Gross, egregious hack to support legacy grpclb behavior.
|
||||
// TODO(ctiller): Use a promise context for this once that plumbing is done.
|
||||
if (key == GrpcLbClientStatsMetadata::key()) {
|
||||
batch_->Set(
|
||||
GrpcLbClientStatsMetadata(),
|
||||
const_cast<GrpcLbClientStats*>(
|
||||
reinterpret_cast<const GrpcLbClientStats*>(value.data())));
|
||||
return;
|
||||
}
|
||||
batch_->Append(key, Slice::FromStaticString(value),
|
||||
[key](absl::string_view error, const Slice& value) {
|
||||
gpr_log(GPR_ERROR, "%s",
|
||||
absl::StrCat(error, " key:", key,
|
||||
" value:", value.as_string_view())
|
||||
.c_str());
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> TestOnlyCopyToVector()
|
||||
override {
|
||||
if (batch_ == nullptr) return {};
|
||||
Encoder encoder;
|
||||
batch_->Encode(&encoder);
|
||||
return encoder.Take();
|
||||
}
|
||||
|
||||
absl::optional<absl::string_view> Lookup(absl::string_view key,
|
||||
std::string* buffer) const override {
|
||||
if (batch_ == nullptr) return absl::nullopt;
|
||||
return batch_->GetStringValue(key, buffer);
|
||||
}
|
||||
|
||||
private:
|
||||
class Encoder final {
|
||||
public:
|
||||
void Encode(const Slice& key, const Slice& value) {
|
||||
out_.emplace_back(std::string(key.as_string_view()),
|
||||
std::string(value.as_string_view()));
|
||||
}
|
||||
|
||||
template <class Which>
|
||||
void Encode(Which, const typename Which::ValueType& value) {
|
||||
auto value_slice = Which::Encode(value);
|
||||
out_.emplace_back(std::string(Which::key()),
|
||||
std::string(value_slice.as_string_view()));
|
||||
}
|
||||
|
||||
void Encode(GrpcTimeoutMetadata,
|
||||
const typename GrpcTimeoutMetadata::ValueType&) {}
|
||||
void Encode(HttpPathMetadata, const Slice&) {}
|
||||
void Encode(HttpMethodMetadata,
|
||||
const typename HttpMethodMetadata::ValueType&) {}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> Take() {
|
||||
return std::move(out_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::pair<std::string, std::string>> out_;
|
||||
};
|
||||
|
||||
grpc_metadata_batch* batch_;
|
||||
};
|
||||
|
||||
//
|
||||
// ClientChannelFilter::LoadBalancedCall::LbCallState
|
||||
//
|
||||
|
@ -2541,7 +2464,7 @@ void ClientChannelFilter::LoadBalancedCall::RecordCallCompletion(
|
|||
// If the LB policy requested a callback for trailing metadata, invoke
|
||||
// the callback.
|
||||
if (lb_subchannel_call_tracker_ != nullptr) {
|
||||
Metadata trailing_metadata(recv_trailing_metadata);
|
||||
LbMetadata trailing_metadata(recv_trailing_metadata);
|
||||
BackendMetricAccessor backend_metric_accessor(this, recv_trailing_metadata);
|
||||
LoadBalancingPolicy::SubchannelCallTrackerInterface::FinishArgs args = {
|
||||
peer_address, status, &trailing_metadata, &backend_metric_accessor};
|
||||
|
@ -2688,7 +2611,7 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
|
|||
pick_args.path = path->as_string_view();
|
||||
LbCallState lb_call_state(this);
|
||||
pick_args.call_state = &lb_call_state;
|
||||
Metadata initial_metadata(send_initial_metadata());
|
||||
LbMetadata initial_metadata(send_initial_metadata());
|
||||
pick_args.initial_metadata = &initial_metadata;
|
||||
auto result = picker->Pick(pick_args);
|
||||
return HandlePickResult<bool>(
|
||||
|
@ -2721,6 +2644,11 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
|
|||
if (lb_subchannel_call_tracker_ != nullptr) {
|
||||
lb_subchannel_call_tracker_->Start();
|
||||
}
|
||||
// Handle metadata mutations.
|
||||
MetadataMutationHandler::Apply(complete_pick->metadata_mutations,
|
||||
send_initial_metadata());
|
||||
MaybeOverrideAuthority(std::move(complete_pick->authority_override),
|
||||
send_initial_metadata());
|
||||
return true;
|
||||
},
|
||||
// QueuePick
|
||||
|
@ -2847,9 +2775,8 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesFail(
|
|||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
if (pending_batches_[i] != nullptr) ++num_batches;
|
||||
}
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p lb_call=%p: failing %" PRIuPTR " pending batches: %s",
|
||||
chand(), this, num_batches, StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << chand() << " lb_call=" << this << ": failing "
|
||||
<< num_batches << " pending batches: " << StatusToString(error);
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
|
@ -2889,10 +2816,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesResume() {
|
|||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
if (pending_batches_[i] != nullptr) ++num_batches;
|
||||
}
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p lb_call=%p: starting %" PRIuPTR
|
||||
" pending batches on subchannel_call=%p",
|
||||
chand(), this, num_batches, subchannel_call_.get());
|
||||
LOG(INFO) << "chand=" << chand() << " lb_call=" << this << ": starting "
|
||||
<< num_batches << " pending batches on subchannel_call="
|
||||
<< subchannel_call_.get();
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
|
@ -2915,12 +2841,10 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
|
|||
StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(channel)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p lb_call=%p: batch started from above: %s, "
|
||||
"call_attempt_tracer()=%p",
|
||||
chand(), this,
|
||||
grpc_transport_stream_op_batch_string(batch, false).c_str(),
|
||||
call_attempt_tracer());
|
||||
LOG(INFO) << "chand=" << chand() << " lb_call=" << this
|
||||
<< ": batch started from above: "
|
||||
<< grpc_transport_stream_op_batch_string(batch, false)
|
||||
<< ", call_attempt_tracer()=" << call_attempt_tracer();
|
||||
}
|
||||
// Handle call tracing.
|
||||
if (call_attempt_tracer() != nullptr) {
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/lib/gprpp/down_cast.h"
|
||||
#include "src/core/lib/gprpp/unique_type_name.h"
|
||||
#include "src/core/lib/resource_quota/arena.h"
|
||||
#include "src/core/lib/transport/call_destination.h"
|
||||
|
@ -49,6 +50,11 @@ namespace grpc_core {
|
|||
// LB policies to access internal call attributes.
|
||||
class ClientChannelLbCallState : public LoadBalancingPolicy::CallState {
|
||||
public:
|
||||
template <typename A>
|
||||
A* GetCallAttribute() const {
|
||||
return DownCast<A*>(GetCallAttribute(A::TypeName()));
|
||||
}
|
||||
|
||||
virtual ServiceConfigCallData::CallAttributeInterface* GetCallAttribute(
|
||||
UniqueTypeName type) const = 0;
|
||||
virtual ClientCallTracer::CallAttemptTracer* GetCallAttemptTracer() const = 0;
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2024 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "src/core/client_channel/direct_channel.h"
|
||||
|
||||
#include "src/core/lib/config/core_configuration.h"
|
||||
#include "src/core/lib/event_engine/event_engine_context.h"
|
||||
#include "src/core/lib/gprpp/orphanable.h"
|
||||
#include "src/core/lib/surface/channel_stack_type.h"
|
||||
#include "src/core/lib/surface/client_call.h"
|
||||
#include "src/core/lib/transport/interception_chain.h"
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
absl::StatusOr<RefCountedPtr<DirectChannel>> DirectChannel::Create(
|
||||
std::string target, const ChannelArgs& args) {
|
||||
auto* transport = args.GetObject<Transport>();
|
||||
if (transport == nullptr) {
|
||||
return absl::InvalidArgumentError("Transport not set in ChannelArgs");
|
||||
}
|
||||
if (transport->client_transport() == nullptr) {
|
||||
return absl::InvalidArgumentError("Transport is not a client transport");
|
||||
}
|
||||
auto transport_call_destination = MakeRefCounted<TransportCallDestination>(
|
||||
OrphanablePtr<ClientTransport>(transport->client_transport()));
|
||||
auto event_engine =
|
||||
args.GetObjectRef<grpc_event_engine::experimental::EventEngine>();
|
||||
if (event_engine == nullptr) {
|
||||
return absl::InvalidArgumentError("EventEngine not set in ChannelArgs");
|
||||
}
|
||||
InterceptionChainBuilder builder(args);
|
||||
CoreConfiguration::Get().channel_init().AddToInterceptionChainBuilder(
|
||||
GRPC_CLIENT_DIRECT_CHANNEL, builder);
|
||||
auto interception_chain = builder.Build(transport_call_destination);
|
||||
if (!interception_chain.ok()) return interception_chain.status();
|
||||
return MakeRefCounted<DirectChannel>(
|
||||
std::move(target), args, std::move(event_engine),
|
||||
std::move(transport_call_destination), std::move(*interception_chain));
|
||||
}
|
||||
|
||||
void DirectChannel::Orphaned() {
|
||||
transport_call_destination_.reset();
|
||||
interception_chain_.reset();
|
||||
}
|
||||
|
||||
void DirectChannel::StartCall(UnstartedCallHandler unstarted_handler) {
|
||||
unstarted_handler.SpawnInfallible(
|
||||
"start",
|
||||
[interception_chain = interception_chain_, unstarted_handler]() mutable {
|
||||
interception_chain->StartCall(std::move(unstarted_handler));
|
||||
return []() { return Empty{}; };
|
||||
});
|
||||
}
|
||||
|
||||
void DirectChannel::GetInfo(const grpc_channel_info*) {
|
||||
// TODO(roth): Implement this.
|
||||
}
|
||||
|
||||
grpc_call* DirectChannel::CreateCall(
|
||||
grpc_call* parent_call, uint32_t propagation_mask,
|
||||
grpc_completion_queue* cq, grpc_pollset_set* /*pollset_set_alternative*/,
|
||||
Slice path, absl::optional<Slice> authority, Timestamp deadline,
|
||||
bool /*registered_method*/) {
|
||||
auto arena = call_arena_allocator()->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
event_engine_.get());
|
||||
return MakeClientCall(parent_call, propagation_mask, cq, std::move(path),
|
||||
std::move(authority), false, deadline,
|
||||
compression_options(), std::move(arena), Ref());
|
||||
}
|
||||
|
||||
} // namespace grpc_core
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2024 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef GRPC_SRC_CORE_CLIENT_CHANNEL_DIRECT_CHANNEL_H
|
||||
#define GRPC_SRC_CORE_CLIENT_CHANNEL_DIRECT_CHANNEL_H
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "src/core/lib/surface/channel.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
class DirectChannel final : public Channel {
|
||||
public:
|
||||
class TransportCallDestination final : public CallDestination {
|
||||
public:
|
||||
explicit TransportCallDestination(OrphanablePtr<ClientTransport> transport)
|
||||
: transport_(std::move(transport)) {}
|
||||
|
||||
ClientTransport* transport() { return transport_.get(); }
|
||||
|
||||
void HandleCall(CallHandler handler) override {
|
||||
transport_->StartCall(std::move(handler));
|
||||
}
|
||||
|
||||
void Orphaned() override { transport_.reset(); }
|
||||
|
||||
private:
|
||||
OrphanablePtr<ClientTransport> transport_;
|
||||
};
|
||||
|
||||
static absl::StatusOr<RefCountedPtr<DirectChannel>> Create(
|
||||
std::string target, const ChannelArgs& args);
|
||||
|
||||
DirectChannel(
|
||||
std::string target, const ChannelArgs& args,
|
||||
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
|
||||
event_engine,
|
||||
RefCountedPtr<TransportCallDestination> transport_call_destination,
|
||||
RefCountedPtr<UnstartedCallDestination> interception_chain)
|
||||
: Channel(std::move(target), args),
|
||||
transport_call_destination_(std::move(transport_call_destination)),
|
||||
interception_chain_(std::move(interception_chain)),
|
||||
event_engine_(std::move(event_engine)) {}
|
||||
|
||||
void Orphaned() override;
|
||||
void StartCall(UnstartedCallHandler unstarted_handler) override;
|
||||
bool IsLame() const override { return false; }
|
||||
grpc_call* CreateCall(grpc_call* parent_call, uint32_t propagation_mask,
|
||||
grpc_completion_queue* cq,
|
||||
grpc_pollset_set* pollset_set_alternative, Slice path,
|
||||
absl::optional<Slice> authority, Timestamp deadline,
|
||||
bool registered_method) override;
|
||||
grpc_event_engine::experimental::EventEngine* event_engine() const override {
|
||||
return event_engine_.get();
|
||||
}
|
||||
bool SupportsConnectivityWatcher() const override { return false; }
|
||||
grpc_connectivity_state CheckConnectivityState(bool) override {
|
||||
Crash("CheckConnectivityState not supported");
|
||||
}
|
||||
void WatchConnectivityState(grpc_connectivity_state, Timestamp,
|
||||
grpc_completion_queue*, void*) override {
|
||||
Crash("WatchConnectivityState not supported");
|
||||
}
|
||||
void AddConnectivityWatcher(
|
||||
grpc_connectivity_state,
|
||||
OrphanablePtr<AsyncConnectivityStateWatcherInterface>) override {
|
||||
Crash("AddConnectivityWatcher not supported");
|
||||
}
|
||||
void RemoveConnectivityWatcher(
|
||||
AsyncConnectivityStateWatcherInterface*) override {
|
||||
Crash("RemoveConnectivityWatcher not supported");
|
||||
}
|
||||
void GetInfo(const grpc_channel_info* channel_info) override;
|
||||
void ResetConnectionBackoff() override {}
|
||||
void Ping(grpc_completion_queue*, void*) override {
|
||||
Crash("Ping not supported");
|
||||
}
|
||||
|
||||
private:
|
||||
RefCountedPtr<TransportCallDestination> transport_call_destination_;
|
||||
RefCountedPtr<UnstartedCallDestination> interception_chain_;
|
||||
const std::shared_ptr<grpc_event_engine::experimental::EventEngine>
|
||||
event_engine_;
|
||||
};
|
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif // GRPC_SRC_CORE_CLIENT_CHANNEL_DIRECT_CHANNEL_H
|
|
@ -78,7 +78,9 @@ void DynamicFilters::Call::StartTransportStreamOpBatch(
|
|||
grpc_transport_stream_op_batch* batch) {
|
||||
grpc_call_stack* call_stack = CALL_TO_CALL_STACK(this);
|
||||
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
|
||||
GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
|
||||
GRPC_TRACE_LOG(channel, INFO)
|
||||
<< "OP[" << top_elem->filter->name << ":" << top_elem
|
||||
<< "]: " << grpc_transport_stream_op_batch_string(batch, false);
|
||||
top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
// Copyright 2024 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "src/core/client_channel/lb_metadata.h"
|
||||
|
||||
#include "absl/log/log.h"
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
//
|
||||
// LbMetadata
|
||||
//
|
||||
|
||||
namespace {
|
||||
|
||||
class Encoder {
|
||||
public:
|
||||
void Encode(const Slice& key, const Slice& value) {
|
||||
out_.emplace_back(std::string(key.as_string_view()),
|
||||
std::string(value.as_string_view()));
|
||||
}
|
||||
|
||||
template <class Which>
|
||||
void Encode(Which, const typename Which::ValueType& value) {
|
||||
auto value_slice = Which::Encode(value);
|
||||
out_.emplace_back(std::string(Which::key()),
|
||||
std::string(value_slice.as_string_view()));
|
||||
}
|
||||
|
||||
void Encode(GrpcTimeoutMetadata,
|
||||
const typename GrpcTimeoutMetadata::ValueType&) {}
|
||||
void Encode(HttpPathMetadata, const Slice&) {}
|
||||
void Encode(HttpMethodMetadata,
|
||||
const typename HttpMethodMetadata::ValueType&) {}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> Take() {
|
||||
return std::move(out_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::pair<std::string, std::string>> out_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
absl::optional<absl::string_view> LbMetadata::Lookup(
|
||||
absl::string_view key, std::string* buffer) const {
|
||||
if (batch_ == nullptr) return absl::nullopt;
|
||||
return batch_->GetStringValue(key, buffer);
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>>
|
||||
LbMetadata::TestOnlyCopyToVector() const {
|
||||
if (batch_ == nullptr) return {};
|
||||
Encoder encoder;
|
||||
batch_->Encode(&encoder);
|
||||
return encoder.Take();
|
||||
}
|
||||
|
||||
//
|
||||
// MetadataMutationHandler
|
||||
//
|
||||
|
||||
void MetadataMutationHandler::Apply(
|
||||
LoadBalancingPolicy::MetadataMutations& metadata_mutations,
|
||||
grpc_metadata_batch* metadata) {
|
||||
for (auto& p : metadata_mutations.metadata_) {
|
||||
absl::string_view key = p.first;
|
||||
Slice& value =
|
||||
grpc_event_engine::experimental::internal::SliceCast<Slice>(p.second);
|
||||
// TODO(roth): Should we prevent this from setting special keys like
|
||||
// :authority, :path, content-type, etc?
|
||||
metadata->Remove(key);
|
||||
// Gross, egregious hack to support legacy grpclb behavior.
|
||||
// TODO(ctiller): Use a promise context for this once that plumbing is done.
|
||||
if (key == GrpcLbClientStatsMetadata::key()) {
|
||||
metadata->Set(
|
||||
GrpcLbClientStatsMetadata(),
|
||||
const_cast<GrpcLbClientStats*>(
|
||||
reinterpret_cast<const GrpcLbClientStats*>(value.data())));
|
||||
continue;
|
||||
}
|
||||
metadata->Append(key, std::move(value),
|
||||
[key](absl::string_view error, const Slice& value) {
|
||||
LOG(ERROR) << error << " key:" << key
|
||||
<< " value:" << value.as_string_view();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// MaybeOverrideAuthority()
|
||||
//
|
||||
|
||||
void MaybeOverrideAuthority(
|
||||
grpc_event_engine::experimental::Slice authority_override,
|
||||
grpc_metadata_batch* metadata) {
|
||||
// Skip if no override requested.
|
||||
if (authority_override.empty()) return;
|
||||
// Skip if authority already set by the application on this RPC.
|
||||
if (metadata->get_pointer(HttpAuthorityMetadata()) != nullptr) return;
|
||||
// Otherwise, apply override.
|
||||
Slice& authority =
|
||||
grpc_event_engine::experimental::internal::SliceCast<Slice>(
|
||||
authority_override);
|
||||
metadata->Set(HttpAuthorityMetadata(), std::move(authority));
|
||||
}
|
||||
|
||||
} // namespace grpc_core
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2024 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef GRPC_SRC_CORE_CLIENT_CHANNEL_LB_METADATA_H
|
||||
#define GRPC_SRC_CORE_CLIENT_CHANNEL_LB_METADATA_H
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include <grpc/event_engine/slice.h>
|
||||
|
||||
#include "src/core/lib/transport/metadata_batch.h"
|
||||
#include "src/core/load_balancing/lb_policy.h"
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
class LbMetadata : public LoadBalancingPolicy::MetadataInterface {
|
||||
public:
|
||||
explicit LbMetadata(grpc_metadata_batch* batch) : batch_(batch) {}
|
||||
|
||||
absl::optional<absl::string_view> Lookup(absl::string_view key,
|
||||
std::string* buffer) const override;
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> TestOnlyCopyToVector() const;
|
||||
|
||||
private:
|
||||
grpc_metadata_batch* batch_;
|
||||
};
|
||||
|
||||
class MetadataMutationHandler {
|
||||
public:
|
||||
static void Apply(LoadBalancingPolicy::MetadataMutations& metadata_mutations,
|
||||
grpc_metadata_batch* metadata);
|
||||
};
|
||||
|
||||
void MaybeOverrideAuthority(
|
||||
grpc_event_engine::experimental::Slice authority_override,
|
||||
grpc_metadata_batch* metadata);
|
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif // GRPC_SRC_CORE_CLIENT_CHANNEL_LB_METADATA_H
|
|
@ -14,8 +14,11 @@
|
|||
|
||||
#include "src/core/client_channel/load_balanced_call_destination.h"
|
||||
|
||||
#include "absl/log/log.h"
|
||||
|
||||
#include "src/core/client_channel/client_channel.h"
|
||||
#include "src/core/client_channel/client_channel_internal.h"
|
||||
#include "src/core/client_channel/lb_metadata.h"
|
||||
#include "src/core/client_channel/subchannel.h"
|
||||
#include "src/core/lib/channel/status_util.h"
|
||||
#include "src/core/lib/config/core_configuration.h"
|
||||
|
@ -26,76 +29,6 @@ namespace grpc_core {
|
|||
|
||||
namespace {
|
||||
|
||||
class LbMetadata : public LoadBalancingPolicy::MetadataInterface {
|
||||
public:
|
||||
explicit LbMetadata(grpc_metadata_batch* batch) : batch_(batch) {}
|
||||
|
||||
void Add(absl::string_view key, absl::string_view value) override {
|
||||
if (batch_ == nullptr) return;
|
||||
// Gross, egregious hack to support legacy grpclb behavior.
|
||||
// TODO(ctiller): Use a promise context for this once that plumbing is done.
|
||||
if (key == GrpcLbClientStatsMetadata::key()) {
|
||||
batch_->Set(
|
||||
GrpcLbClientStatsMetadata(),
|
||||
const_cast<GrpcLbClientStats*>(
|
||||
reinterpret_cast<const GrpcLbClientStats*>(value.data())));
|
||||
return;
|
||||
}
|
||||
batch_->Append(key, Slice::FromStaticString(value),
|
||||
[key](absl::string_view error, const Slice& value) {
|
||||
gpr_log(GPR_ERROR, "%s",
|
||||
absl::StrCat(error, " key:", key,
|
||||
" value:", value.as_string_view())
|
||||
.c_str());
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> TestOnlyCopyToVector()
|
||||
override {
|
||||
if (batch_ == nullptr) return {};
|
||||
Encoder encoder;
|
||||
batch_->Encode(&encoder);
|
||||
return encoder.Take();
|
||||
}
|
||||
|
||||
absl::optional<absl::string_view> Lookup(absl::string_view key,
|
||||
std::string* buffer) const override {
|
||||
if (batch_ == nullptr) return absl::nullopt;
|
||||
return batch_->GetStringValue(key, buffer);
|
||||
}
|
||||
|
||||
private:
|
||||
class Encoder {
|
||||
public:
|
||||
void Encode(const Slice& key, const Slice& value) {
|
||||
out_.emplace_back(std::string(key.as_string_view()),
|
||||
std::string(value.as_string_view()));
|
||||
}
|
||||
|
||||
template <class Which>
|
||||
void Encode(Which, const typename Which::ValueType& value) {
|
||||
auto value_slice = Which::Encode(value);
|
||||
out_.emplace_back(std::string(Which::key()),
|
||||
std::string(value_slice.as_string_view()));
|
||||
}
|
||||
|
||||
void Encode(GrpcTimeoutMetadata,
|
||||
const typename GrpcTimeoutMetadata::ValueType&) {}
|
||||
void Encode(HttpPathMetadata, const Slice&) {}
|
||||
void Encode(HttpMethodMetadata,
|
||||
const typename HttpMethodMetadata::ValueType&) {}
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> Take() {
|
||||
return std::move(out_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::pair<std::string, std::string>> out_;
|
||||
};
|
||||
|
||||
grpc_metadata_batch* batch_;
|
||||
};
|
||||
|
||||
void MaybeCreateCallAttemptTracer(bool is_transparent_retry) {
|
||||
auto* call_tracer = MaybeGetContext<ClientCallTracer>();
|
||||
if (call_tracer == nullptr) return;
|
||||
|
@ -208,6 +141,11 @@ LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> PickSubchannel(
|
|||
complete_pick->subchannel_call_tracker->Start();
|
||||
SetContext(complete_pick->subchannel_call_tracker.release());
|
||||
}
|
||||
// Apply metadata mutations, if any.
|
||||
MetadataMutationHandler::Apply(complete_pick->metadata_mutations,
|
||||
&client_initial_metadata);
|
||||
MaybeOverrideAuthority(std::move(complete_pick->authority_override),
|
||||
&client_initial_metadata);
|
||||
// Return the connected subchannel.
|
||||
return call_destination;
|
||||
},
|
||||
|
|
|
@ -147,7 +147,7 @@ const grpc_channel_filter RetryFilter::kVtable = {
|
|||
grpc_channel_stack_no_post_init,
|
||||
RetryFilter::Destroy,
|
||||
RetryFilter::GetChannelInfo,
|
||||
"retry_filter",
|
||||
GRPC_UNIQUE_TYPE_NAME_HERE("retry_filter"),
|
||||
};
|
||||
|
||||
} // namespace grpc_core
|
||||
|
|
|
@ -22,11 +22,10 @@
|
|||
#include <new>
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/client_channel/client_channel_internal.h"
|
||||
#include "src/core/client_channel/retry_service_config.h"
|
||||
#include "src/core/client_channel/retry_throttle.h"
|
||||
|
@ -141,9 +140,9 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
|
|||
},
|
||||
is_transparent_retry);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: created attempt, lb_call=%p",
|
||||
calld->chand_, calld, this, lb_call_.get());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << this
|
||||
<< ": created attempt, lb_call=" << lb_call_.get();
|
||||
}
|
||||
// If per_attempt_recv_timeout is set, start a timer.
|
||||
if (calld->retry_policy_ != nullptr &&
|
||||
|
@ -151,10 +150,9 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
|
|||
const Duration per_attempt_recv_timeout =
|
||||
*calld->retry_policy_->per_attempt_recv_timeout();
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64
|
||||
" ms",
|
||||
calld->chand_, calld, this, per_attempt_recv_timeout.millis());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << this << ": per-attempt timeout in "
|
||||
<< per_attempt_recv_timeout.millis() << " ms";
|
||||
}
|
||||
// Schedule retry after computed delay.
|
||||
GRPC_CALL_STACK_REF(calld->owning_call_, "OnPerAttemptRecvTimer");
|
||||
|
@ -170,8 +168,8 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
|
|||
|
||||
RetryFilter::LegacyCallData::CallAttempt::~CallAttempt() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying call attempt",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": destroying call attempt";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,10 +235,10 @@ void RetryFilter::LegacyCallData::CallAttempt::MaybeSwitchToFastPath() {
|
|||
if (recv_trailing_metadata_internal_batch_ != nullptr) return;
|
||||
// Switch to fast path.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: retry state no longer needed; "
|
||||
"moving LB call to parent and unreffing the call attempt",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": retry state no longer needed; "
|
||||
"moving LB call to parent and unreffing the call attempt";
|
||||
}
|
||||
calld_->committed_call_ = std::move(lb_call_);
|
||||
calld_->call_attempt_.reset(DEBUG_LOCATION, "MaybeSwitchToFastPath");
|
||||
|
@ -256,10 +254,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
|
|||
if (calld_->seen_send_initial_metadata_ && !started_send_initial_metadata_ &&
|
||||
!calld_->pending_send_initial_metadata_) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: replaying previously completed "
|
||||
"send_initial_metadata op",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": replaying previously completed "
|
||||
"send_initial_metadata op";
|
||||
}
|
||||
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
|
||||
replay_batch_data->AddRetriableSendInitialMetadataOp();
|
||||
|
@ -270,10 +268,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
|
|||
started_send_message_count_ == completed_send_message_count_ &&
|
||||
!calld_->pending_send_message_) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: replaying previously completed "
|
||||
"send_message op",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": replaying previously completed "
|
||||
"send_message op";
|
||||
}
|
||||
if (replay_batch_data == nullptr) {
|
||||
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
|
||||
|
@ -289,10 +287,10 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
|
|||
!started_send_trailing_metadata_ &&
|
||||
!calld_->pending_send_trailing_metadata_) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: replaying previously completed "
|
||||
"send_trailing_metadata op",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": replaying previously completed "
|
||||
"send_trailing_metadata op";
|
||||
}
|
||||
if (replay_batch_data == nullptr) {
|
||||
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
|
||||
|
@ -320,9 +318,9 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
|
|||
grpc_transport_stream_op_batch* batch, const char* reason,
|
||||
CallCombinerClosureList* closures) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: adding batch (%s): %s",
|
||||
calld_->chand_, calld_, this, reason,
|
||||
grpc_transport_stream_op_batch_string(batch, false).c_str());
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": adding batch (" << reason
|
||||
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
|
||||
}
|
||||
batch->handler_private.extra_arg = lb_call_.get();
|
||||
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
|
||||
|
@ -333,10 +331,10 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
|
|||
void RetryFilter::LegacyCallData::CallAttempt::
|
||||
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: call failed but "
|
||||
"recv_trailing_metadata not started; starting it internally",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": call failed but recv_trailing_metadata not started; "
|
||||
"starting it internally";
|
||||
}
|
||||
// Create batch_data with 2 refs, since this batch will be unreffed twice:
|
||||
// once for the recv_trailing_metadata_ready callback when the batch
|
||||
|
@ -523,9 +521,8 @@ void RetryFilter::LegacyCallData::CallAttempt::AddRetriableBatches(
|
|||
|
||||
void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: constructing retriable batches",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": constructing retriable batches";
|
||||
}
|
||||
// Construct list of closures to execute, one for each pending batch.
|
||||
CallCombinerClosureList closures;
|
||||
|
@ -533,10 +530,9 @@ void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
|
|||
// Note: This will yield the call combiner.
|
||||
// Start batches on LB call.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: starting %" PRIuPTR
|
||||
" retriable batches on lb_call=%p",
|
||||
calld_->chand_, calld_, this, closures.size(), lb_call_.get());
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": starting " << closures.size()
|
||||
<< " retriable batches on lb_call=" << lb_call_.get();
|
||||
}
|
||||
closures.RunClosures(calld_->call_combiner_);
|
||||
}
|
||||
|
@ -561,19 +557,18 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
|
|||
calld_->retry_throttle_data_->RecordSuccess();
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: call succeeded",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": call succeeded";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Status is not OK. Check whether the status is retryable.
|
||||
if (!calld_->retry_policy_->retryable_status_codes().Contains(*status)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: status %s not configured as "
|
||||
"retryable",
|
||||
calld_->chand_, calld_, this,
|
||||
grpc_status_code_to_string(*status));
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": status "
|
||||
<< grpc_status_code_to_string(*status)
|
||||
<< " not configured as retryable";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -588,17 +583,16 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
|
|||
if (calld_->retry_throttle_data_ != nullptr &&
|
||||
!calld_->retry_throttle_data_->RecordFailure()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: retries throttled",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": retries throttled";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Check whether the call is committed.
|
||||
if (calld_->retry_committed_) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: retries already committed",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": retries already committed";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -607,9 +601,9 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
|
|||
if (calld_->num_attempts_completed_ >=
|
||||
calld_->retry_policy_->max_attempts()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(
|
||||
GPR_INFO, "chand=%p calld=%p attempt=%p: exceeded %d retry attempts",
|
||||
calld_->chand_, calld_, this, calld_->retry_policy_->max_attempts());
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": exceeded "
|
||||
<< calld_->retry_policy_->max_attempts() << " retry attempts";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -617,19 +611,16 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
|
|||
if (server_pushback.has_value()) {
|
||||
if (*server_pushback < Duration::Zero()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: not retrying due to server "
|
||||
"push-back",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": not retrying due to server push-back";
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(
|
||||
GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64
|
||||
" ms",
|
||||
calld_->chand_, calld_, this, server_pushback->millis());
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this << ": server push-back: retry in "
|
||||
<< server_pushback->millis() << " ms";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -675,11 +666,12 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
|
|||
auto* call_attempt = static_cast<CallAttempt*>(arg);
|
||||
auto* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: perAttemptRecvTimeout timer fired: "
|
||||
"error=%s, per_attempt_recv_timer_handle_.has_value()=%d",
|
||||
calld->chand_, calld, call_attempt, StatusToString(error).c_str(),
|
||||
call_attempt->per_attempt_recv_timer_handle_.has_value());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< ": perAttemptRecvTimeout timer fired: error="
|
||||
<< StatusToString(error)
|
||||
<< ", per_attempt_recv_timer_handle_.has_value()="
|
||||
<< call_attempt->per_attempt_recv_timer_handle_.has_value();
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
call_attempt->per_attempt_recv_timer_handle_.reset();
|
||||
|
@ -714,10 +706,9 @@ void RetryFilter::LegacyCallData::CallAttempt::
|
|||
MaybeCancelPerAttemptRecvTimer() {
|
||||
if (per_attempt_recv_timer_handle_.has_value()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: cancelling "
|
||||
"perAttemptRecvTimeout timer",
|
||||
calld_->chand_, calld_, this);
|
||||
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
|
||||
<< " attempt=" << this
|
||||
<< ": cancelling perAttemptRecvTimeout timer";
|
||||
}
|
||||
if (calld_->chand_->event_engine()->Cancel(
|
||||
*per_attempt_recv_timer_handle_)) {
|
||||
|
@ -738,9 +729,9 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
|
|||
refcount),
|
||||
call_attempt_(attempt.release()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: creating batch %p",
|
||||
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
|
||||
<< " calld=" << call_attempt_->calld_
|
||||
<< " attempt=" << call_attempt_ << ": creating batch " << this;
|
||||
}
|
||||
// We hold a ref to the call stack for every batch sent on a call attempt.
|
||||
// This is because some batches on the call attempt may not complete
|
||||
|
@ -759,9 +750,9 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
|
|||
|
||||
RetryFilter::LegacyCallData::CallAttempt::BatchData::~BatchData() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying batch %p",
|
||||
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
|
||||
<< " calld=" << call_attempt_->calld_
|
||||
<< " attempt=" << call_attempt_ << ": destroying batch " << this;
|
||||
}
|
||||
CallAttempt* call_attempt = std::exchange(call_attempt_, nullptr);
|
||||
grpc_call_stack* owning_call = call_attempt->calld_->owning_call_;
|
||||
|
@ -832,11 +823,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
CallAttempt* call_attempt = batch_data->call_attempt_;
|
||||
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
||||
"got recv_initial_metadata_ready, error=%s",
|
||||
calld->chand_, calld, call_attempt, batch_data.get(),
|
||||
StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< " batch_data=" << batch_data.get()
|
||||
<< ": got recv_initial_metadata_ready, error="
|
||||
<< StatusToString(error);
|
||||
}
|
||||
call_attempt->completed_recv_initial_metadata_ = true;
|
||||
// If this attempt has been abandoned, then we're not going to use the
|
||||
|
@ -859,10 +850,9 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
|
||||
!call_attempt->completed_recv_trailing_metadata_)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: deferring "
|
||||
"recv_initial_metadata_ready (Trailers-Only)",
|
||||
calld->chand_, calld, call_attempt);
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
|
||||
}
|
||||
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
|
||||
std::move(batch_data);
|
||||
|
@ -931,11 +921,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
|
|||
CallAttempt* call_attempt = batch_data->call_attempt_;
|
||||
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
||||
"got recv_message_ready, error=%s",
|
||||
calld->chand_, calld, call_attempt, batch_data.get(),
|
||||
StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< " batch_data=" << batch_data.get()
|
||||
<< ": got recv_message_ready, error=" << StatusToString(error);
|
||||
}
|
||||
++call_attempt->completed_recv_message_count_;
|
||||
// If this attempt has been abandoned, then we're not going to use the
|
||||
|
@ -961,10 +950,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
|
|||
(!call_attempt->recv_message_.has_value() || !error.ok()) &&
|
||||
!call_attempt->completed_recv_trailing_metadata_)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
|
||||
"(nullptr message and recv_trailing_metadata pending)",
|
||||
calld->chand_, calld, call_attempt);
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< ": deferring recv_message_ready (nullptr message and "
|
||||
"recv_trailing_metadata pending)";
|
||||
}
|
||||
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
|
||||
call_attempt->recv_message_error_ = error;
|
||||
|
@ -1125,11 +1114,11 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
CallAttempt* call_attempt = batch_data->call_attempt_;
|
||||
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
||||
"got recv_trailing_metadata_ready, error=%s",
|
||||
calld->chand_, calld, call_attempt, batch_data.get(),
|
||||
StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< " batch_data=" << batch_data.get()
|
||||
<< ": got recv_trailing_metadata_ready, error="
|
||||
<< StatusToString(error);
|
||||
}
|
||||
call_attempt->completed_recv_trailing_metadata_ = true;
|
||||
// If this attempt has been abandoned, then we're not going to use the
|
||||
|
@ -1152,17 +1141,15 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
GetCallStatus(calld->deadline_, md_batch, error, &status, &server_pushback,
|
||||
&is_lb_drop, &stream_network_state);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: call finished, status=%s "
|
||||
"server_pushback=%s is_lb_drop=%d stream_network_state=%s",
|
||||
calld->chand_, calld, call_attempt,
|
||||
grpc_status_code_to_string(status),
|
||||
server_pushback.has_value() ? server_pushback->ToString().c_str()
|
||||
: "N/A",
|
||||
is_lb_drop,
|
||||
stream_network_state.has_value()
|
||||
? absl::StrCat(*stream_network_state).c_str()
|
||||
: "N/A");
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt << ": call finished, status="
|
||||
<< grpc_status_code_to_string(status) << " server_pushback="
|
||||
<< (server_pushback.has_value() ? server_pushback->ToString()
|
||||
: "N/A")
|
||||
<< " is_lb_drop=" << is_lb_drop << " stream_network_state="
|
||||
<< (stream_network_state.has_value()
|
||||
? absl::StrCat(*stream_network_state)
|
||||
: "N/A");
|
||||
}
|
||||
// Check if we should retry.
|
||||
if (!is_lb_drop) { // Never retry on LB drops.
|
||||
|
@ -1273,10 +1260,9 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
}
|
||||
if (have_pending_send_ops) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: starting next batch for pending "
|
||||
"send op(s)",
|
||||
calld->chand_, calld, call_attempt_);
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt_
|
||||
<< ": starting next batch for pending send op(s)";
|
||||
}
|
||||
call_attempt_->AddRetriableBatches(closures);
|
||||
}
|
||||
|
@ -1288,13 +1274,13 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
|
|||
CallAttempt* call_attempt = batch_data->call_attempt_;
|
||||
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
||||
"got on_complete, error=%s, batch=%s",
|
||||
calld->chand_, calld, call_attempt, batch_data.get(),
|
||||
StatusToString(error).c_str(),
|
||||
grpc_transport_stream_op_batch_string(&batch_data->batch_, false)
|
||||
.c_str());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< " batch_data=" << batch_data.get()
|
||||
<< ": got on_complete, error=" << StatusToString(error)
|
||||
<< ", batch="
|
||||
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
|
||||
false);
|
||||
}
|
||||
// If this attempt has been abandoned, then we're not going to propagate
|
||||
// the completion of this batch, so do nothing.
|
||||
|
@ -1310,8 +1296,8 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
|
|||
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
|
||||
!call_attempt->completed_recv_trailing_metadata_)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
|
||||
calld->chand_, calld, call_attempt);
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt << ": deferring on_complete";
|
||||
}
|
||||
call_attempt->on_complete_deferred_batches_.emplace_back(
|
||||
std::move(batch_data), error);
|
||||
|
@ -1364,19 +1350,18 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnCompleteForCancelOp(
|
|||
CallAttempt* call_attempt = batch_data->call_attempt_;
|
||||
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p batch_data=%p: "
|
||||
"got on_complete for cancel_stream batch, error=%s, batch=%s",
|
||||
calld->chand_, calld, call_attempt, batch_data.get(),
|
||||
StatusToString(error).c_str(),
|
||||
grpc_transport_stream_op_batch_string(&batch_data->batch_, false)
|
||||
.c_str());
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt
|
||||
<< " batch_data=" << batch_data.get()
|
||||
<< ": got on_complete for cancel_stream batch, error="
|
||||
<< StatusToString(error) << ", batch="
|
||||
<< grpc_transport_stream_op_batch_string(&batch_data->batch_,
|
||||
false);
|
||||
}
|
||||
GRPC_CALL_COMBINER_STOP(
|
||||
calld->call_combiner_,
|
||||
"on_complete for internally generated cancel_stream op");
|
||||
}
|
||||
|
||||
//
|
||||
// retriable batch construction
|
||||
//
|
||||
|
@ -1408,12 +1393,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
|
|||
AddRetriableSendMessageOp() {
|
||||
auto* calld = call_attempt_->calld_;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(
|
||||
GPR_INFO,
|
||||
"chand=%p calld=%p attempt=%p: starting calld->send_messages[%" PRIuPTR
|
||||
"]",
|
||||
calld->chand_, calld, call_attempt_,
|
||||
call_attempt_->started_send_message_count_);
|
||||
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
|
||||
<< " attempt=" << call_attempt_
|
||||
<< ": starting calld->send_messages["
|
||||
<< call_attempt_->started_send_message_count_ << "]";
|
||||
}
|
||||
CachedSendMessage cache =
|
||||
calld->send_messages_[call_attempt_->started_send_message_count_];
|
||||
|
@ -1497,8 +1480,8 @@ grpc_error_handle RetryFilter::LegacyCallData::Init(
|
|||
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
|
||||
new (elem->call_data) RetryFilter::LegacyCallData(chand, *args);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", chand,
|
||||
elem->call_data);
|
||||
LOG(INFO) << "chand=" << chand << " calld=" << elem->call_data
|
||||
<< ": created call";
|
||||
}
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
@ -1579,9 +1562,9 @@ RetryFilter::LegacyCallData::~LegacyCallData() {
|
|||
void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
||||
grpc_transport_stream_op_batch* batch) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry) && !GRPC_TRACE_FLAG_ENABLED(channel)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from surface: %s",
|
||||
chand_, this,
|
||||
grpc_transport_stream_op_batch_string(batch, false).c_str());
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": batch started from surface: "
|
||||
<< grpc_transport_stream_op_batch_string(batch, false);
|
||||
}
|
||||
// If we have an LB call, delegate to the LB call.
|
||||
if (committed_call_ != nullptr) {
|
||||
|
@ -1602,8 +1585,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
|||
// Save cancel_error in case subsequent batches are started.
|
||||
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_,
|
||||
this, StatusToString(cancelled_from_surface_).c_str());
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": cancelled from surface: "
|
||||
<< StatusToString(cancelled_from_surface_);
|
||||
}
|
||||
// Fail any pending batches.
|
||||
PendingBatchesFail(cancelled_from_surface_);
|
||||
|
@ -1625,8 +1609,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
|||
// Cancel retry timer if needed.
|
||||
if (retry_timer_handle_.has_value()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling retry timer", chand_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": cancelling retry timer";
|
||||
}
|
||||
if (chand_->event_engine()->Cancel(*retry_timer_handle_)) {
|
||||
GRPC_CALL_STACK_UNREF(owning_call_, "OnRetryTimer");
|
||||
|
@ -1671,10 +1655,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
|||
(retry_policy_ == nullptr ||
|
||||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: retry committed before first attempt; "
|
||||
"creating LB call",
|
||||
chand_, this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": retry committed before first attempt; "
|
||||
<< "creating LB call";
|
||||
}
|
||||
PendingBatchClear(pending);
|
||||
auto* service_config_call_data =
|
||||
|
@ -1690,8 +1673,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
|||
// The attempt will automatically start any necessary replays or
|
||||
// pending batches.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: creating call attempt", chand_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": creating call attempt";
|
||||
}
|
||||
retry_codepath_started_ = true;
|
||||
CreateCallAttempt(/*is_transparent_retry=*/false);
|
||||
|
@ -1699,8 +1682,8 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
|
|||
}
|
||||
// Send batches to call attempt.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on attempt=%p", chand_,
|
||||
this, call_attempt_.get());
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": starting batch on attempt=" << call_attempt_.get();
|
||||
}
|
||||
call_attempt_->StartRetriableBatches();
|
||||
}
|
||||
|
@ -1757,8 +1740,8 @@ void RetryFilter::LegacyCallData::MaybeCacheSendOpsForBatch(
|
|||
|
||||
void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_initial_metadata",
|
||||
chand_, this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": destroying send_initial_metadata";
|
||||
}
|
||||
send_initial_metadata_.Clear();
|
||||
}
|
||||
|
@ -1766,9 +1749,8 @@ void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
|
|||
void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
|
||||
if (send_messages_[idx].slices != nullptr) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: destroying send_messages[%" PRIuPTR "]",
|
||||
chand_, this, idx);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": destroying send_messages[" << idx << "]";
|
||||
}
|
||||
Destruct(std::exchange(send_messages_[idx].slices, nullptr));
|
||||
}
|
||||
|
@ -1776,8 +1758,8 @@ void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
|
|||
|
||||
void RetryFilter::LegacyCallData::FreeCachedSendTrailingMetadata() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_trailing_metadata",
|
||||
chand_, this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": destroying send_trailing_metadata";
|
||||
}
|
||||
send_trailing_metadata_.Clear();
|
||||
}
|
||||
|
@ -1815,9 +1797,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
|
|||
grpc_transport_stream_op_batch* batch) {
|
||||
const size_t idx = GetBatchIndex(batch);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
|
||||
chand_, this, idx);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": adding pending batch at index " << idx;
|
||||
}
|
||||
PendingBatch* pending = &pending_batches_[idx];
|
||||
CHECK_EQ(pending->batch, nullptr);
|
||||
|
@ -1846,9 +1827,8 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
|
|||
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
|
||||
chand_->per_rpc_retry_buffer_size())) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: exceeded retry buffer size, committing",
|
||||
chand_, this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": exceeded retry buffer size, committing";
|
||||
}
|
||||
RetryCommit(call_attempt_.get());
|
||||
}
|
||||
|
@ -1883,8 +1863,8 @@ void RetryFilter::LegacyCallData::MaybeClearPendingBatch(
|
|||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
|
||||
nullptr)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": clearing pending batch";
|
||||
}
|
||||
PendingBatchClear(pending);
|
||||
}
|
||||
|
@ -1910,9 +1890,8 @@ void RetryFilter::LegacyCallData::PendingBatchesFail(grpc_error_handle error) {
|
|||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
if (pending_batches_[i].batch != nullptr) ++num_batches;
|
||||
}
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
|
||||
chand_, this, num_batches, StatusToString(error).c_str());
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this << ": failing "
|
||||
<< num_batches << " pending batches: " << StatusToString(error);
|
||||
}
|
||||
CallCombinerClosureList closures;
|
||||
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
|
||||
|
@ -1940,9 +1919,8 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
|
|||
grpc_transport_stream_op_batch* batch = pending->batch;
|
||||
if (batch != nullptr && predicate(batch)) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR,
|
||||
chand_, this, log_message, i);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this << ": "
|
||||
<< log_message << " pending batch at index " << i;
|
||||
}
|
||||
return pending;
|
||||
}
|
||||
|
@ -1958,7 +1936,8 @@ void RetryFilter::LegacyCallData::RetryCommit(CallAttempt* call_attempt) {
|
|||
if (retry_committed_) return;
|
||||
retry_committed_ = true;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand_, this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": committing retries";
|
||||
}
|
||||
if (call_attempt != nullptr) {
|
||||
// If the call attempt's LB call has been committed, invoke the
|
||||
|
@ -1992,9 +1971,9 @@ void RetryFilter::LegacyCallData::StartRetryTimer(
|
|||
next_attempt_timeout = retry_backoff_.NextAttemptTime() - Timestamp::Now();
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_,
|
||||
this, next_attempt_timeout.millis());
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": retrying failed call in " << next_attempt_timeout.millis()
|
||||
<< " ms";
|
||||
}
|
||||
// Schedule retry after computed delay.
|
||||
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
|
||||
|
@ -2025,8 +2004,8 @@ void RetryFilter::LegacyCallData::OnRetryTimerLocked(
|
|||
void RetryFilter::LegacyCallData::AddClosureToStartTransparentRetry(
|
||||
CallCombinerClosureList* closures) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
|
||||
gpr_log(GPR_INFO, "chand=%p calld=%p: scheduling transparent retry", chand_,
|
||||
this);
|
||||
LOG(INFO) << "chand=" << chand_ << " calld=" << this
|
||||
<< ": scheduling transparent retry";
|
||||
}
|
||||
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
|
||||
GRPC_CLOSURE_INIT(&retry_closure_, StartTransparentRetry, this, nullptr);
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/slice.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/channelz/channel_trace.h"
|
||||
#include "src/core/channelz/channelz.h"
|
||||
|
@ -98,14 +97,11 @@ using ::grpc_event_engine::experimental::EventEngine;
|
|||
// ConnectedSubchannel
|
||||
//
|
||||
|
||||
ConnectedSubchannel::ConnectedSubchannel(
|
||||
const ChannelArgs& args,
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel)
|
||||
ConnectedSubchannel::ConnectedSubchannel(const ChannelArgs& args)
|
||||
: RefCounted<ConnectedSubchannel>(
|
||||
GRPC_TRACE_FLAG_ENABLED(subchannel_refcount) ? "ConnectedSubchannel"
|
||||
: nullptr),
|
||||
args_(args),
|
||||
channelz_subchannel_(std::move(channelz_subchannel)) {}
|
||||
args_(args) {}
|
||||
|
||||
//
|
||||
// LegacyConnectedSubchannel
|
||||
|
@ -115,14 +111,19 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
|
|||
public:
|
||||
LegacyConnectedSubchannel(
|
||||
RefCountedPtr<grpc_channel_stack> channel_stack, const ChannelArgs& args,
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel)
|
||||
: ConnectedSubchannel(args, std::move(channelz_subchannel)),
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_node)
|
||||
: ConnectedSubchannel(args),
|
||||
channelz_node_(std::move(channelz_node)),
|
||||
channel_stack_(std::move(channel_stack)) {}
|
||||
|
||||
~LegacyConnectedSubchannel() override {
|
||||
channel_stack_.reset(DEBUG_LOCATION, "ConnectedSubchannel");
|
||||
}
|
||||
|
||||
channelz::SubchannelNode* channelz_node() const {
|
||||
return channelz_node_.get();
|
||||
}
|
||||
|
||||
void StartWatch(
|
||||
grpc_pollset_set* interested_parties,
|
||||
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) override {
|
||||
|
@ -163,6 +164,7 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
|
|||
}
|
||||
|
||||
private:
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_node_;
|
||||
RefCountedPtr<grpc_channel_stack> channel_stack_;
|
||||
};
|
||||
|
||||
|
@ -192,9 +194,8 @@ class NewConnectedSubchannel : public ConnectedSubchannel {
|
|||
NewConnectedSubchannel(
|
||||
RefCountedPtr<UnstartedCallDestination> call_destination,
|
||||
RefCountedPtr<TransportCallDestination> transport,
|
||||
const ChannelArgs& args,
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel)
|
||||
: ConnectedSubchannel(args, std::move(channelz_subchannel)),
|
||||
const ChannelArgs& args)
|
||||
: ConnectedSubchannel(args),
|
||||
call_destination_(std::move(call_destination)),
|
||||
transport_(std::move(transport)) {}
|
||||
|
||||
|
@ -241,7 +242,8 @@ RefCountedPtr<SubchannelCall> SubchannelCall::Create(Args args,
|
|||
}
|
||||
|
||||
SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
|
||||
: connected_subchannel_(std::move(args.connected_subchannel)),
|
||||
: connected_subchannel_(args.connected_subchannel
|
||||
.TakeAsSubclass<LegacyConnectedSubchannel>()),
|
||||
deadline_(args.deadline) {
|
||||
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
||||
const grpc_call_element_args call_args = {
|
||||
|
@ -260,7 +262,7 @@ SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
|
|||
return;
|
||||
}
|
||||
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
|
||||
auto* channelz_node = connected_subchannel_->channelz_subchannel();
|
||||
auto* channelz_node = connected_subchannel_->channelz_node();
|
||||
if (channelz_node != nullptr) {
|
||||
channelz_node->RecordCallStarted();
|
||||
}
|
||||
|
@ -271,7 +273,9 @@ void SubchannelCall::StartTransportStreamOpBatch(
|
|||
MaybeInterceptRecvTrailingMetadata(batch);
|
||||
grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
||||
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
|
||||
GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
|
||||
GRPC_TRACE_LOG(channel, INFO)
|
||||
<< "OP[" << top_elem->filter->name << ":" << top_elem
|
||||
<< "]: " << grpc_transport_stream_op_batch_string(batch, false);
|
||||
top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
|
||||
}
|
||||
|
||||
|
@ -326,13 +330,9 @@ void SubchannelCall::Destroy(void* arg, grpc_error_handle /*error*/) {
|
|||
void SubchannelCall::MaybeInterceptRecvTrailingMetadata(
|
||||
grpc_transport_stream_op_batch* batch) {
|
||||
// only intercept payloads with recv trailing.
|
||||
if (!batch->recv_trailing_metadata) {
|
||||
return;
|
||||
}
|
||||
if (!batch->recv_trailing_metadata) return;
|
||||
// only add interceptor is channelz is enabled.
|
||||
if (connected_subchannel_->channelz_subchannel() == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (connected_subchannel_->channelz_node() == nullptr) return;
|
||||
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
|
||||
this, grpc_schedule_on_exec_ctx);
|
||||
// save some state needed for the interception callback.
|
||||
|
@ -365,13 +365,13 @@ void SubchannelCall::RecvTrailingMetadataReady(void* arg,
|
|||
CHECK_NE(call->recv_trailing_metadata_, nullptr);
|
||||
grpc_status_code status = GRPC_STATUS_OK;
|
||||
GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_, error);
|
||||
channelz::SubchannelNode* channelz_subchannel =
|
||||
call->connected_subchannel_->channelz_subchannel();
|
||||
CHECK_NE(channelz_subchannel, nullptr);
|
||||
channelz::SubchannelNode* channelz_node =
|
||||
call->connected_subchannel_->channelz_node();
|
||||
CHECK_NE(channelz_node, nullptr);
|
||||
if (status == GRPC_STATUS_OK) {
|
||||
channelz_subchannel->RecordCallSucceeded();
|
||||
channelz_node->RecordCallSucceeded();
|
||||
} else {
|
||||
channelz_subchannel->RecordCallFailed();
|
||||
channelz_node->RecordCallFailed();
|
||||
}
|
||||
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_, error);
|
||||
}
|
||||
|
@ -418,10 +418,10 @@ class Subchannel::ConnectedSubchannelStateWatcher final
|
|||
if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
|
||||
new_state == GRPC_CHANNEL_SHUTDOWN) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"subchannel %p %s: Connected subchannel %p reports %s: %s", c,
|
||||
c->key_.ToString().c_str(), c->connected_subchannel_.get(),
|
||||
ConnectivityStateName(new_state), status.ToString().c_str());
|
||||
LOG(INFO) << "subchannel " << c << " " << c->key_.ToString()
|
||||
<< ": Connected subchannel "
|
||||
<< c->connected_subchannel_.get() << " reports "
|
||||
<< ConnectivityStateName(new_state) << ": " << status;
|
||||
}
|
||||
c->connected_subchannel_.reset();
|
||||
if (c->channelz_node() != nullptr) {
|
||||
|
@ -603,8 +603,8 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
|
|||
if (new_keepalive_time > keepalive_time_) {
|
||||
keepalive_time_ = new_keepalive_time;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
|
||||
gpr_log(GPR_INFO, "subchannel %p %s: throttling keepalive time to %d",
|
||||
this, key_.ToString().c_str(), new_keepalive_time);
|
||||
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": throttling keepalive time to " << new_keepalive_time;
|
||||
}
|
||||
args_ = args_.Set(GRPC_ARG_KEEPALIVE_TIME_MS, new_keepalive_time);
|
||||
}
|
||||
|
@ -758,8 +758,10 @@ void Subchannel::OnRetryTimer() {
|
|||
|
||||
void Subchannel::OnRetryTimerLocked() {
|
||||
if (shutdown_) return;
|
||||
gpr_log(GPR_INFO, "subchannel %p %s: backoff delay elapsed, reporting IDLE",
|
||||
this, key_.ToString().c_str());
|
||||
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
|
||||
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": backoff delay elapsed, reporting IDLE";
|
||||
}
|
||||
SetConnectivityStateLocked(GRPC_CHANNEL_IDLE, absl::OkStatus());
|
||||
}
|
||||
|
||||
|
@ -803,11 +805,12 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
|
|||
if (connecting_result_.transport == nullptr || !PublishTransportLocked()) {
|
||||
const Duration time_until_next_attempt =
|
||||
next_attempt_time_ - Timestamp::Now();
|
||||
gpr_log(GPR_INFO,
|
||||
"subchannel %p %s: connect failed (%s), backing off for %" PRId64
|
||||
" ms",
|
||||
this, key_.ToString().c_str(), StatusToString(error).c_str(),
|
||||
time_until_next_attempt.millis());
|
||||
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
|
||||
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": connect failed (" << StatusToString(error)
|
||||
<< "), backing off for " << time_until_next_attempt.millis()
|
||||
<< " ms";
|
||||
}
|
||||
SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
|
||||
grpc_error_to_absl_status(error));
|
||||
retry_timer_handle_ = event_engine_->RunAfter(
|
||||
|
@ -844,9 +847,8 @@ bool Subchannel::PublishTransportLocked() {
|
|||
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> stack = builder.Build();
|
||||
if (!stack.ok()) {
|
||||
connecting_result_.Reset();
|
||||
gpr_log(GPR_ERROR,
|
||||
"subchannel %p %s: error initializing subchannel stack: %s", this,
|
||||
key_.ToString().c_str(), stack.status().ToString().c_str());
|
||||
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": error initializing subchannel stack: " << stack.status();
|
||||
return false;
|
||||
}
|
||||
connected_subchannel_ = MakeRefCounted<LegacyConnectedSubchannel>(
|
||||
|
@ -857,6 +859,24 @@ bool Subchannel::PublishTransportLocked() {
|
|||
->client_transport());
|
||||
InterceptionChainBuilder builder(
|
||||
connecting_result_.channel_args.SetObject(transport.get()));
|
||||
if (channelz_node_ != nullptr) {
|
||||
// TODO(ctiller): If/when we have a good way to access the subchannel
|
||||
// from a filter (maybe GetContext<Subchannel>?), consider replacing
|
||||
// these two hooks with a filter so that we can avoid storing two
|
||||
// separate refs to the channelz node in each connection.
|
||||
builder.AddOnClientInitialMetadata(
|
||||
[channelz_node = channelz_node_](ClientMetadata&) {
|
||||
channelz_node->RecordCallStarted();
|
||||
});
|
||||
builder.AddOnServerTrailingMetadata(
|
||||
[channelz_node = channelz_node_](ServerMetadata& metadata) {
|
||||
if (IsStatusOk(metadata)) {
|
||||
channelz_node->RecordCallSucceeded();
|
||||
} else {
|
||||
channelz_node->RecordCallFailed();
|
||||
}
|
||||
});
|
||||
}
|
||||
CoreConfiguration::Get().channel_init().AddToInterceptionChainBuilder(
|
||||
GRPC_CLIENT_SUBCHANNEL, builder);
|
||||
auto transport_destination =
|
||||
|
@ -865,21 +885,20 @@ bool Subchannel::PublishTransportLocked() {
|
|||
auto call_destination = builder.Build(transport_destination);
|
||||
if (!call_destination.ok()) {
|
||||
connecting_result_.Reset();
|
||||
gpr_log(GPR_ERROR,
|
||||
"subchannel %p %s: error initializing subchannel stack: %s", this,
|
||||
key_.ToString().c_str(),
|
||||
call_destination.status().ToString().c_str());
|
||||
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": error initializing subchannel stack: "
|
||||
<< call_destination.status();
|
||||
return false;
|
||||
}
|
||||
connected_subchannel_ = MakeRefCounted<NewConnectedSubchannel>(
|
||||
std::move(*call_destination), std::move(transport_destination), args_,
|
||||
channelz_node_);
|
||||
std::move(*call_destination), std::move(transport_destination), args_);
|
||||
}
|
||||
connecting_result_.Reset();
|
||||
// Publish.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
|
||||
gpr_log(GPR_INFO, "subchannel %p %s: new connected subchannel at %p", this,
|
||||
key_.ToString().c_str(), connected_subchannel_.get());
|
||||
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
|
||||
<< ": new connected subchannel at "
|
||||
<< connected_subchannel_.get();
|
||||
}
|
||||
if (channelz_node_ != nullptr) {
|
||||
channelz_node_->SetChildSocket(std::move(socket_node));
|
||||
|
|
|
@ -66,9 +66,6 @@ class SubchannelCall;
|
|||
class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
|
||||
public:
|
||||
const ChannelArgs& args() const { return args_; }
|
||||
channelz::SubchannelNode* channelz_subchannel() const {
|
||||
return channelz_subchannel_.get();
|
||||
}
|
||||
|
||||
virtual void StartWatch(
|
||||
grpc_pollset_set* interested_parties,
|
||||
|
@ -85,17 +82,14 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
|
|||
virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0;
|
||||
|
||||
protected:
|
||||
ConnectedSubchannel(
|
||||
const ChannelArgs& args,
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel);
|
||||
explicit ConnectedSubchannel(const ChannelArgs& args);
|
||||
|
||||
private:
|
||||
ChannelArgs args_;
|
||||
// ref counted pointer to the channelz node in this connected subchannel's
|
||||
// owning subchannel.
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel_;
|
||||
};
|
||||
|
||||
class LegacyConnectedSubchannel;
|
||||
|
||||
// Implements the interface of RefCounted<>.
|
||||
class SubchannelCall final {
|
||||
public:
|
||||
|
@ -150,7 +144,7 @@ class SubchannelCall final {
|
|||
|
||||
static void Destroy(void* arg, grpc_error_handle error);
|
||||
|
||||
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
|
||||
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel_;
|
||||
grpc_closure* after_call_stack_destroy_ = nullptr;
|
||||
// State needed to support channelz interception of recv trailing metadata.
|
||||
grpc_closure recv_trailing_metadata_ready_;
|
||||
|
|
|
@ -24,13 +24,13 @@
|
|||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "upb/base/string_view.h"
|
||||
#include "upb/mem/arena.hpp"
|
||||
#include "xds/data/orca/v3/orca_load_report.upb.h"
|
||||
|
||||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/lib/channel/channel_stack.h"
|
||||
#include "src/core/lib/channel/promise_based_filter.h"
|
||||
|
@ -117,8 +117,7 @@ absl::optional<std::string> MaybeSerializeBackendMetrics(
|
|||
} // namespace
|
||||
|
||||
const grpc_channel_filter BackendMetricFilter::kFilter =
|
||||
MakePromiseBasedFilter<BackendMetricFilter, FilterEndpoint::kServer>(
|
||||
"backend_metric");
|
||||
MakePromiseBasedFilter<BackendMetricFilter, FilterEndpoint::kServer>();
|
||||
|
||||
absl::StatusOr<std::unique_ptr<BackendMetricFilter>>
|
||||
BackendMetricFilter::Create(const ChannelArgs&, ChannelFilter::Args) {
|
||||
|
@ -130,20 +129,20 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
|
|||
auto* ctx = MaybeGetContext<BackendMetricProvider>();
|
||||
if (ctx == nullptr) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
|
||||
gpr_log(GPR_INFO, "[%p] No BackendMetricProvider.", this);
|
||||
LOG(INFO) << "[" << this << "] No BackendMetricProvider.";
|
||||
}
|
||||
return;
|
||||
}
|
||||
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
|
||||
if (serialized.has_value() && !serialized->empty()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
|
||||
gpr_log(GPR_INFO, "[%p] Backend metrics serialized. size: %" PRIuPTR,
|
||||
this, serialized->size());
|
||||
LOG(INFO) << "[" << this
|
||||
<< "] Backend metrics serialized. size: " << serialized->size();
|
||||
}
|
||||
md.Set(EndpointLoadMetricsBinMetadata(),
|
||||
Slice::FromCopiedString(std::move(*serialized)));
|
||||
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
|
||||
gpr_log(GPR_INFO, "[%p] No backend metrics.", this);
|
||||
LOG(INFO) << "[" << this << "] No backend metrics.";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,8 @@ class BackendMetricFilter : public ImplementChannelFilter<BackendMetricFilter> {
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "backend_metric"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<BackendMetricFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args);
|
||||
|
||||
|
|
|
@ -22,18 +22,18 @@
|
|||
#include <grpc/grpc.h>
|
||||
|
||||
#include "src/core/lib/debug/trace.h"
|
||||
#include "src/core/lib/surface/api_trace.h"
|
||||
#include "src/core/lib/surface/call.h"
|
||||
|
||||
void grpc_census_call_set_context(grpc_call* call, census_context* context) {
|
||||
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
|
||||
(call, context));
|
||||
GRPC_TRACE_LOG(api, INFO) << "grpc_census_call_set_context(call=" << call
|
||||
<< ", census_context=" << context << ")";
|
||||
if (context != nullptr) {
|
||||
grpc_call_get_arena(call)->SetContext<census_context>(context);
|
||||
}
|
||||
}
|
||||
|
||||
census_context* grpc_census_call_get_context(grpc_call* call) {
|
||||
GRPC_API_TRACE("grpc_census_call_get_context(call=%p)", 1, (call));
|
||||
GRPC_TRACE_LOG(api, INFO)
|
||||
<< "grpc_census_call_get_context(call=" << call << ")";
|
||||
return grpc_call_get_arena(call)->GetContext<census_context>();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
#include "src/core/lib/promise/promise.h"
|
||||
#include "src/core/lib/promise/sleep.h"
|
||||
#include "src/core/lib/promise/try_seq.h"
|
||||
#include "src/core/lib/resource_quota/arena.h"
|
||||
#include "src/core/lib/surface/channel_stack_type.h"
|
||||
#include "src/core/lib/transport/http2_errors.h"
|
||||
#include "src/core/lib/transport/metadata_batch.h"
|
||||
|
@ -70,13 +71,6 @@ const auto kMaxConnectionAgeJitter = 0.1;
|
|||
|
||||
} // namespace
|
||||
|
||||
#define GRPC_IDLE_FILTER_LOG(format, ...) \
|
||||
do { \
|
||||
if (GRPC_TRACE_FLAG_ENABLED(client_idle_filter)) { \
|
||||
gpr_log(GPR_INFO, "(client idle filter) " format, ##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
Duration GetClientIdleTimeout(const ChannelArgs& args) {
|
||||
return args.GetDurationFromIntMillis(GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS)
|
||||
.value_or(kDefaultIdleTimeout);
|
||||
|
@ -176,6 +170,9 @@ void LegacyMaxAgeFilter::PostInit() {
|
|||
|
||||
// Start the max age timer
|
||||
if (max_connection_age_ != Duration::Infinity()) {
|
||||
auto arena = SimpleArenaAllocator(0)->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
channel_stack->EventEngine());
|
||||
max_age_activity_.Set(MakeActivity(
|
||||
TrySeq(
|
||||
// First sleep until the max connection age
|
||||
|
@ -213,7 +210,7 @@ void LegacyMaxAgeFilter::PostInit() {
|
|||
// (if it did not, it was cancelled)
|
||||
if (status.ok()) CloseChannel();
|
||||
},
|
||||
channel_stack->EventEngine()));
|
||||
std::move(arena)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,7 +252,8 @@ void LegacyChannelIdleFilter::DecreaseCallCount() {
|
|||
}
|
||||
|
||||
void LegacyChannelIdleFilter::StartIdleTimer() {
|
||||
GRPC_IDLE_FILTER_LOG("timer has started");
|
||||
GRPC_TRACE_LOG(client_idle_filter, INFO)
|
||||
<< "(client idle filter) timer has started";
|
||||
auto idle_filter_state = idle_filter_state_;
|
||||
// Hold a ref to the channel stack for the timer callback.
|
||||
auto channel_stack = channel_stack_->Ref();
|
||||
|
@ -270,12 +268,15 @@ void LegacyChannelIdleFilter::StartIdleTimer() {
|
|||
}
|
||||
});
|
||||
});
|
||||
auto arena = SimpleArenaAllocator()->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
channel_stack_->EventEngine());
|
||||
activity_.Set(MakeActivity(
|
||||
std::move(promise), ExecCtxWakeupScheduler{},
|
||||
[channel_stack, this](absl::Status status) {
|
||||
if (status.ok()) CloseChannel();
|
||||
},
|
||||
channel_stack->EventEngine()));
|
||||
std::move(arena)));
|
||||
}
|
||||
|
||||
void LegacyChannelIdleFilter::CloseChannel() {
|
||||
|
@ -289,11 +290,9 @@ void LegacyChannelIdleFilter::CloseChannel() {
|
|||
}
|
||||
|
||||
const grpc_channel_filter LegacyClientIdleFilter::kFilter =
|
||||
MakePromiseBasedFilter<LegacyClientIdleFilter, FilterEndpoint::kClient>(
|
||||
"client_idle");
|
||||
MakePromiseBasedFilter<LegacyClientIdleFilter, FilterEndpoint::kClient>();
|
||||
const grpc_channel_filter LegacyMaxAgeFilter::kFilter =
|
||||
MakePromiseBasedFilter<LegacyMaxAgeFilter, FilterEndpoint::kServer>(
|
||||
"max_age");
|
||||
MakePromiseBasedFilter<LegacyMaxAgeFilter, FilterEndpoint::kServer>();
|
||||
|
||||
void RegisterLegacyChannelIdleFilters(CoreConfiguration::Builder* builder) {
|
||||
builder->channel_init()
|
||||
|
|
|
@ -96,6 +96,8 @@ class LegacyClientIdleFilter final : public LegacyChannelIdleFilter {
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "client_idle"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<LegacyClientIdleFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
@ -107,6 +109,8 @@ class LegacyMaxAgeFilter final : public LegacyChannelIdleFilter {
|
|||
static const grpc_channel_filter kFilter;
|
||||
struct Config;
|
||||
|
||||
static absl::string_view TypeName() { return "max_age"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<LegacyMaxAgeFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/meta/type_traits.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/status/statusor.h"
|
||||
|
@ -36,7 +37,6 @@
|
|||
#include "absl/types/optional.h"
|
||||
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/ext/filters/fault_injection/fault_injection_service_config_parser.h"
|
||||
#include "src/core/lib/channel/channel_stack.h"
|
||||
|
@ -151,8 +151,8 @@ ArenaPromise<absl::Status> FaultInjectionFilter::Call::OnClientInitialMetadata(
|
|||
ClientMetadata& md, FaultInjectionFilter* filter) {
|
||||
auto decision = filter->MakeInjectionDecision(md);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(fault_injection_filter)) {
|
||||
gpr_log(GPR_INFO, "chand=%p: Fault injection triggered %s", this,
|
||||
decision.ToString().c_str());
|
||||
LOG(INFO) << "chand=" << this << ": Fault injection triggered "
|
||||
<< decision.ToString();
|
||||
}
|
||||
auto delay = decision.DelayUntil();
|
||||
return TrySeq(Sleep(delay), [decision = std::move(decision)]() {
|
||||
|
@ -270,8 +270,7 @@ std::string FaultInjectionFilter::InjectionDecision::ToString() const {
|
|||
}
|
||||
|
||||
const grpc_channel_filter FaultInjectionFilter::kFilter =
|
||||
MakePromiseBasedFilter<FaultInjectionFilter, FilterEndpoint::kClient>(
|
||||
"fault_injection_filter");
|
||||
MakePromiseBasedFilter<FaultInjectionFilter, FilterEndpoint::kClient>();
|
||||
|
||||
void FaultInjectionFilterRegister(CoreConfiguration::Builder* builder) {
|
||||
FaultInjectionServiceConfigParser::Register(builder);
|
||||
|
|
|
@ -45,6 +45,8 @@ class FaultInjectionFilter
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "fault_injection_filter"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<FaultInjectionFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ const NoInterceptor HttpClientFilter::Call::OnFinalize;
|
|||
|
||||
const grpc_channel_filter HttpClientFilter::kFilter =
|
||||
MakePromiseBasedFilter<HttpClientFilter, FilterEndpoint::kClient,
|
||||
kFilterExaminesServerInitialMetadata>("http-client");
|
||||
kFilterExaminesServerInitialMetadata>();
|
||||
|
||||
namespace {
|
||||
absl::Status CheckServerMetadata(ServerMetadata* b) {
|
||||
|
|
|
@ -35,6 +35,8 @@ class HttpClientFilter : public ImplementChannelFilter<HttpClientFilter> {
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "http-client"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<HttpClientFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -66,8 +66,7 @@ void ClientAuthorityFilter::Call::OnClientInitialMetadata(
|
|||
}
|
||||
|
||||
const grpc_channel_filter ClientAuthorityFilter::kFilter =
|
||||
MakePromiseBasedFilter<ClientAuthorityFilter, FilterEndpoint::kClient>(
|
||||
"authority");
|
||||
MakePromiseBasedFilter<ClientAuthorityFilter, FilterEndpoint::kClient>();
|
||||
|
||||
namespace {
|
||||
bool NeedsClientAuthorityFilter(const ChannelArgs& args) {
|
||||
|
|
|
@ -39,6 +39,8 @@ class ClientAuthorityFilter final
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "authority"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<ClientAuthorityFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args);
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <grpc/grpc.h>
|
||||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/impl/compression_types.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/ext/filters/message_size/message_size_filter.h"
|
||||
#include "src/core/lib/channel/channel_args.h"
|
||||
|
@ -66,12 +65,12 @@ const grpc_channel_filter ClientCompressionFilter::kFilter =
|
|||
MakePromiseBasedFilter<ClientCompressionFilter, FilterEndpoint::kClient,
|
||||
kFilterExaminesServerInitialMetadata |
|
||||
kFilterExaminesInboundMessages |
|
||||
kFilterExaminesOutboundMessages>("compression");
|
||||
kFilterExaminesOutboundMessages>();
|
||||
const grpc_channel_filter ServerCompressionFilter::kFilter =
|
||||
MakePromiseBasedFilter<ServerCompressionFilter, FilterEndpoint::kServer,
|
||||
kFilterExaminesServerInitialMetadata |
|
||||
kFilterExaminesInboundMessages |
|
||||
kFilterExaminesOutboundMessages>("compression");
|
||||
kFilterExaminesOutboundMessages>();
|
||||
|
||||
absl::StatusOr<std::unique_ptr<ClientCompressionFilter>>
|
||||
ClientCompressionFilter::Create(const ChannelArgs& args, ChannelFilter::Args) {
|
||||
|
@ -104,9 +103,8 @@ ChannelCompression::ChannelCompression(const ChannelArgs& args)
|
|||
&name)) {
|
||||
name = "<unknown>";
|
||||
}
|
||||
gpr_log(GPR_ERROR,
|
||||
"default compression algorithm %s not enabled: switching to none",
|
||||
name);
|
||||
LOG(ERROR) << "default compression algorithm " << name
|
||||
<< " not enabled: switching to none";
|
||||
default_compression_algorithm_ = GRPC_COMPRESS_NONE;
|
||||
}
|
||||
}
|
||||
|
@ -114,8 +112,8 @@ ChannelCompression::ChannelCompression(const ChannelArgs& args)
|
|||
MessageHandle ChannelCompression::CompressMessage(
|
||||
MessageHandle message, grpc_compression_algorithm algorithm) const {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
|
||||
gpr_log(GPR_INFO, "CompressMessage: len=%" PRIdPTR " alg=%d flags=%d",
|
||||
message->payload()->Length(), algorithm, message->flags());
|
||||
LOG(INFO) << "CompressMessage: len=" << message->payload()->Length()
|
||||
<< " alg=" << algorithm << " flags=" << message->flags();
|
||||
}
|
||||
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
|
||||
if (call_tracer != nullptr) {
|
||||
|
@ -144,10 +142,10 @@ MessageHandle ChannelCompression::CompressMessage(
|
|||
const float savings_ratio = 1.0f - static_cast<float>(after_size) /
|
||||
static_cast<float>(before_size);
|
||||
CHECK(grpc_compression_algorithm_name(algorithm, &algo_name));
|
||||
gpr_log(GPR_INFO,
|
||||
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
|
||||
" bytes (%.2f%% savings)",
|
||||
algo_name, before_size, after_size, 100 * savings_ratio);
|
||||
LOG(INFO) << absl::StrFormat(
|
||||
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
|
||||
" bytes (%.2f%% savings)",
|
||||
algo_name, before_size, after_size, 100 * savings_ratio);
|
||||
}
|
||||
tmp.Swap(payload);
|
||||
flags |= GRPC_WRITE_INTERNAL_COMPRESS;
|
||||
|
@ -158,10 +156,9 @@ MessageHandle ChannelCompression::CompressMessage(
|
|||
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
|
||||
const char* algo_name;
|
||||
CHECK(grpc_compression_algorithm_name(algorithm, &algo_name));
|
||||
gpr_log(GPR_INFO,
|
||||
"Algorithm '%s' enabled but decided not to compress. Input size: "
|
||||
"%" PRIuPTR,
|
||||
algo_name, payload->Length());
|
||||
LOG(INFO) << "Algorithm '" << algo_name
|
||||
<< "' enabled but decided not to compress. Input size: "
|
||||
<< payload->Length();
|
||||
}
|
||||
}
|
||||
return message;
|
||||
|
@ -170,9 +167,9 @@ MessageHandle ChannelCompression::CompressMessage(
|
|||
absl::StatusOr<MessageHandle> ChannelCompression::DecompressMessage(
|
||||
bool is_client, MessageHandle message, DecompressArgs args) const {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
|
||||
gpr_log(GPR_INFO, "DecompressMessage: len=%" PRIdPTR " max=%d alg=%d",
|
||||
message->payload()->Length(),
|
||||
args.max_recv_message_length.value_or(-1), args.algorithm);
|
||||
LOG(INFO) << "DecompressMessage: len=" << message->payload()->Length()
|
||||
<< " max=" << args.max_recv_message_length.value_or(-1)
|
||||
<< " alg=" << args.algorithm;
|
||||
}
|
||||
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
|
||||
if (call_tracer != nullptr) {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "absl/status/statusor.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include <grpc/impl/compression_types.h>
|
||||
|
@ -110,6 +111,8 @@ class ClientCompressionFilter final
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "compression"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<ClientCompressionFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
@ -147,6 +150,8 @@ class ServerCompressionFilter final
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "compression"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<ServerCompressionFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@
|
|||
#include <utility>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/lib/channel/channel_args.h"
|
||||
#include "src/core/lib/channel/channel_stack.h"
|
||||
|
@ -55,7 +55,7 @@ const NoInterceptor HttpServerFilter::Call::OnFinalize;
|
|||
|
||||
const grpc_channel_filter HttpServerFilter::kFilter =
|
||||
MakePromiseBasedFilter<HttpServerFilter, FilterEndpoint::kServer,
|
||||
kFilterExaminesServerInitialMetadata>("http-server");
|
||||
kFilterExaminesServerInitialMetadata>();
|
||||
|
||||
namespace {
|
||||
void FilterOutgoingMetadata(ServerMetadata* md) {
|
||||
|
@ -140,8 +140,8 @@ ServerMetadataHandle HttpServerFilter::Call::OnClientInitialMetadata(
|
|||
|
||||
void HttpServerFilter::Call::OnServerInitialMetadata(ServerMetadata& md) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(call)) {
|
||||
gpr_log(GPR_INFO, "%s[http-server] Write metadata",
|
||||
GetContext<Activity>()->DebugTag().c_str());
|
||||
LOG(INFO) << GetContext<Activity>()->DebugTag()
|
||||
<< "[http-server] Write metadata";
|
||||
}
|
||||
FilterOutgoingMetadata(&md);
|
||||
md.Set(HttpStatusMetadata(), 200);
|
||||
|
|
|
@ -36,6 +36,8 @@ class HttpServerFilter : public ImplementChannelFilter<HttpServerFilter> {
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "http-server"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<HttpServerFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
#include <functional>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
|
||||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/log.h>
|
||||
|
||||
#include "src/core/lib/channel/channel_args.h"
|
||||
#include "src/core/lib/channel/channel_stack.h"
|
||||
|
@ -137,12 +137,12 @@ size_t MessageSizeParser::ParserIndex() {
|
|||
const grpc_channel_filter ClientMessageSizeFilter::kFilter =
|
||||
MakePromiseBasedFilter<ClientMessageSizeFilter, FilterEndpoint::kClient,
|
||||
kFilterExaminesOutboundMessages |
|
||||
kFilterExaminesInboundMessages>("message_size");
|
||||
kFilterExaminesInboundMessages>();
|
||||
|
||||
const grpc_channel_filter ServerMessageSizeFilter::kFilter =
|
||||
MakePromiseBasedFilter<ServerMessageSizeFilter, FilterEndpoint::kServer,
|
||||
kFilterExaminesOutboundMessages |
|
||||
kFilterExaminesInboundMessages>("message_size");
|
||||
kFilterExaminesInboundMessages>();
|
||||
|
||||
absl::StatusOr<std::unique_ptr<ClientMessageSizeFilter>>
|
||||
ClientMessageSizeFilter::Create(const ChannelArgs& args, ChannelFilter::Args) {
|
||||
|
@ -160,19 +160,17 @@ ServerMetadataHandle CheckPayload(const Message& msg,
|
|||
bool is_client, bool is_send) {
|
||||
if (!max_length.has_value()) return nullptr;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(call)) {
|
||||
gpr_log(GPR_INFO, "%s[message_size] %s len:%" PRIdPTR " max:%d",
|
||||
GetContext<Activity>()->DebugTag().c_str(),
|
||||
is_send ? "send" : "recv", msg.payload()->Length(), *max_length);
|
||||
LOG(INFO) << GetContext<Activity>()->DebugTag() << "[message_size] "
|
||||
<< (is_send ? "send" : "recv")
|
||||
<< " len:" << msg.payload()->Length() << " max:" << *max_length;
|
||||
}
|
||||
if (msg.payload()->Length() <= *max_length) return nullptr;
|
||||
auto r = Arena::MakePooled<ServerMetadata>();
|
||||
r->Set(GrpcStatusMetadata(), GRPC_STATUS_RESOURCE_EXHAUSTED);
|
||||
r->Set(GrpcMessageMetadata(),
|
||||
Slice::FromCopiedString(absl::StrFormat(
|
||||
"%s: %s message larger than max (%u vs. %d)",
|
||||
is_client ? "CLIENT" : "SERVER", is_send ? "Sent" : "Received",
|
||||
msg.payload()->Length(), *max_length)));
|
||||
return r;
|
||||
return ServerMetadataFromStatus(
|
||||
GRPC_STATUS_RESOURCE_EXHAUSTED,
|
||||
absl::StrFormat("%s: %s message larger than max (%u vs. %d)",
|
||||
is_client ? "CLIENT" : "SERVER",
|
||||
is_send ? "Sent" : "Received", msg.payload()->Length(),
|
||||
*max_length));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
|
|
@ -89,6 +89,8 @@ class ServerMessageSizeFilter final
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "message_size"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<ServerMessageSizeFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
@ -117,6 +119,8 @@ class ClientMessageSizeFilter final
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "message_size"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<ClientMessageSizeFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ absl::Status RbacFilter::Call::OnClientInitialMetadata(ClientMetadata& md,
|
|||
}
|
||||
|
||||
const grpc_channel_filter RbacFilter::kFilterVtable =
|
||||
MakePromiseBasedFilter<RbacFilter, FilterEndpoint::kServer>("rbac_filter");
|
||||
MakePromiseBasedFilter<RbacFilter, FilterEndpoint::kServer>();
|
||||
|
||||
RbacFilter::RbacFilter(size_t index,
|
||||
EvaluateArgs::PerChannelArgs per_channel_evaluate_args)
|
||||
|
|
|
@ -42,6 +42,8 @@ class RbacFilter : public ImplementChannelFilter<RbacFilter> {
|
|||
// and enforces the RBAC policy.
|
||||
static const grpc_channel_filter kFilterVtable;
|
||||
|
||||
static absl::string_view TypeName() { return "rbac_filter"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<RbacFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@ UniqueTypeName XdsOverrideHostAttribute::TypeName() {
|
|||
|
||||
const grpc_channel_filter StatefulSessionFilter::kFilter =
|
||||
MakePromiseBasedFilter<StatefulSessionFilter, FilterEndpoint::kClient,
|
||||
kFilterExaminesServerInitialMetadata>(
|
||||
"stateful_session_filter");
|
||||
kFilterExaminesServerInitialMetadata>();
|
||||
|
||||
absl::StatusOr<std::unique_ptr<StatefulSessionFilter>>
|
||||
StatefulSessionFilter::Create(const ChannelArgs&,
|
||||
|
|
|
@ -74,6 +74,8 @@ class StatefulSessionFilter
|
|||
public:
|
||||
static const grpc_channel_filter kFilter;
|
||||
|
||||
static absl::string_view TypeName() { return "stateful_session_filter"; }
|
||||
|
||||
static absl::StatusOr<std::unique_ptr<StatefulSessionFilter>> Create(
|
||||
const ChannelArgs& args, ChannelFilter::Args filter_args);
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/alloc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
#include <grpc/support/sync.h>
|
||||
|
||||
|
@ -66,7 +65,6 @@
|
|||
#include "src/core/lib/security/credentials/credentials.h"
|
||||
#include "src/core/lib/security/credentials/insecure/insecure_credentials.h"
|
||||
#include "src/core/lib/security/security_connector/security_connector.h"
|
||||
#include "src/core/lib/surface/api_trace.h"
|
||||
#include "src/core/lib/surface/channel.h"
|
||||
#include "src/core/lib/surface/channel_create.h"
|
||||
#include "src/core/lib/surface/channel_stack_type.h"
|
||||
|
@ -120,10 +118,12 @@ void Chttp2Connector::Connect(const Args& args, Result* result,
|
|||
CoreConfiguration::Get().handshaker_registry().AddHandshakers(
|
||||
HANDSHAKER_CLIENT, channel_args, args_.interested_parties,
|
||||
handshake_mgr_.get());
|
||||
Ref().release(); // Ref held by OnHandshakeDone().
|
||||
handshake_mgr_->DoHandshake(nullptr /* endpoint */, channel_args,
|
||||
args.deadline, nullptr /* acceptor */,
|
||||
OnHandshakeDone, this);
|
||||
handshake_mgr_->DoHandshake(
|
||||
/*endpoint=*/nullptr, channel_args, args.deadline, /*acceptor=*/nullptr,
|
||||
[self = RefAsSubclass<Chttp2Connector>()](
|
||||
absl::StatusOr<HandshakerArgs*> result) {
|
||||
self->OnHandshakeDone(std::move(result));
|
||||
});
|
||||
}
|
||||
|
||||
void Chttp2Connector::Shutdown(grpc_error_handle error) {
|
||||
|
@ -135,54 +135,44 @@ void Chttp2Connector::Shutdown(grpc_error_handle error) {
|
|||
}
|
||||
}
|
||||
|
||||
void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
|
||||
auto* args = static_cast<HandshakerArgs*>(arg);
|
||||
Chttp2Connector* self = static_cast<Chttp2Connector*>(args->user_data);
|
||||
{
|
||||
MutexLock lock(&self->mu_);
|
||||
if (!error.ok() || self->shutdown_) {
|
||||
if (error.ok()) {
|
||||
error = GRPC_ERROR_CREATE("connector shutdown");
|
||||
// We were shut down after handshaking completed successfully, so
|
||||
// destroy the endpoint here.
|
||||
if (args->endpoint != nullptr) {
|
||||
grpc_endpoint_destroy(args->endpoint);
|
||||
grpc_slice_buffer_destroy(args->read_buffer);
|
||||
gpr_free(args->read_buffer);
|
||||
}
|
||||
}
|
||||
self->result_->Reset();
|
||||
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
|
||||
} else if (args->endpoint != nullptr) {
|
||||
self->result_->transport =
|
||||
grpc_create_chttp2_transport(args->args, args->endpoint, true);
|
||||
CHECK_NE(self->result_->transport, nullptr);
|
||||
self->result_->socket_node =
|
||||
grpc_chttp2_transport_get_socket_node(self->result_->transport);
|
||||
self->result_->channel_args = args->args;
|
||||
self->Ref().release(); // Ref held by OnReceiveSettings()
|
||||
GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings, self,
|
||||
grpc_schedule_on_exec_ctx);
|
||||
grpc_chttp2_transport_start_reading(
|
||||
self->result_->transport, args->read_buffer,
|
||||
&self->on_receive_settings_, self->args_.interested_parties, nullptr);
|
||||
self->timer_handle_ = self->event_engine_->RunAfter(
|
||||
self->args_.deadline - Timestamp::Now(),
|
||||
[self = self->RefAsSubclass<Chttp2Connector>()] {
|
||||
ApplicationCallbackExecCtx callback_exec_ctx;
|
||||
ExecCtx exec_ctx;
|
||||
self->OnTimeout();
|
||||
});
|
||||
} else {
|
||||
// If the handshaking succeeded but there is no endpoint, then the
|
||||
// handshaker may have handed off the connection to some external
|
||||
// code. Just verify that exit_early flag is set.
|
||||
DCHECK(args->exit_early);
|
||||
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
|
||||
void Chttp2Connector::OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
|
||||
MutexLock lock(&mu_);
|
||||
if (!result.ok() || shutdown_) {
|
||||
if (result.ok()) {
|
||||
result = GRPC_ERROR_CREATE("connector shutdown");
|
||||
}
|
||||
self->handshake_mgr_.reset();
|
||||
result_->Reset();
|
||||
NullThenSchedClosure(DEBUG_LOCATION, ¬ify_, result.status());
|
||||
} else if ((*result)->endpoint != nullptr) {
|
||||
result_->transport = grpc_create_chttp2_transport(
|
||||
(*result)->args, std::move((*result)->endpoint), true);
|
||||
CHECK_NE(result_->transport, nullptr);
|
||||
result_->socket_node =
|
||||
grpc_chttp2_transport_get_socket_node(result_->transport);
|
||||
result_->channel_args = std::move((*result)->args);
|
||||
Ref().release(); // Ref held by OnReceiveSettings()
|
||||
GRPC_CLOSURE_INIT(&on_receive_settings_, OnReceiveSettings, this,
|
||||
grpc_schedule_on_exec_ctx);
|
||||
grpc_chttp2_transport_start_reading(
|
||||
result_->transport, (*result)->read_buffer.c_slice_buffer(),
|
||||
&on_receive_settings_, args_.interested_parties, nullptr);
|
||||
timer_handle_ = event_engine_->RunAfter(
|
||||
args_.deadline - Timestamp::Now(),
|
||||
[self = RefAsSubclass<Chttp2Connector>()]() mutable {
|
||||
ApplicationCallbackExecCtx callback_exec_ctx;
|
||||
ExecCtx exec_ctx;
|
||||
self->OnTimeout();
|
||||
// Ensure the Chttp2Connector is deleted under an ExecCtx.
|
||||
self.reset();
|
||||
});
|
||||
} else {
|
||||
// If the handshaking succeeded but there is no endpoint, then the
|
||||
// handshaker may have handed off the connection to some external
|
||||
// code. Just verify that exit_early flag is set.
|
||||
DCHECK((*result)->exit_early);
|
||||
NullThenSchedClosure(DEBUG_LOCATION, ¬ify_, result.status());
|
||||
}
|
||||
self->Unref();
|
||||
handshake_mgr_.reset();
|
||||
}
|
||||
|
||||
void Chttp2Connector::OnReceiveSettings(void* arg, grpc_error_handle error) {
|
||||
|
@ -246,10 +236,8 @@ class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
|
|||
const grpc_resolved_address& address, const ChannelArgs& args) override {
|
||||
absl::StatusOr<ChannelArgs> new_args = GetSecureNamingChannelArgs(args);
|
||||
if (!new_args.ok()) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Failed to create channel args during subchannel creation: %s; "
|
||||
"Got args: %s",
|
||||
new_args.status().ToString().c_str(), args.ToString().c_str());
|
||||
LOG(ERROR) << "Failed to create channel args during subchannel creation: "
|
||||
<< new_args.status() << "; Got args: " << args.ToString();
|
||||
return nullptr;
|
||||
}
|
||||
RefCountedPtr<Subchannel> s = Subchannel::Create(
|
||||
|
@ -321,8 +309,9 @@ grpc_channel* grpc_channel_create(const char* target,
|
|||
grpc_channel_credentials* creds,
|
||||
const grpc_channel_args* c_args) {
|
||||
grpc_core::ExecCtx exec_ctx;
|
||||
GRPC_API_TRACE("grpc_secure_channel_create(target=%s, creds=%p, args=%p)", 3,
|
||||
(target, (void*)creds, (void*)c_args));
|
||||
GRPC_TRACE_LOG(api, INFO)
|
||||
<< "grpc_secure_channel_create(target=" << target
|
||||
<< ", creds=" << (void*)creds << ", args=" << (void*)c_args << ")";
|
||||
grpc_channel* channel = nullptr;
|
||||
grpc_error_handle error;
|
||||
if (creds != nullptr) {
|
||||
|
@ -361,9 +350,9 @@ grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
|
|||
grpc_channel_credentials* creds,
|
||||
const grpc_channel_args* args) {
|
||||
grpc_core::ExecCtx exec_ctx;
|
||||
GRPC_API_TRACE(
|
||||
"grpc_channel_create_from_fd(target=%p, fd=%d, creds=%p, args=%p)", 4,
|
||||
(target, fd, creds, args));
|
||||
GRPC_TRACE_LOG(api, INFO)
|
||||
<< "grpc_channel_create_from_fd(target=" << target << ", fd=" << fd
|
||||
<< ", creds=" << creds << ", args=" << args << ")";
|
||||
// For now, we only support insecure channel credentials.
|
||||
if (creds == nullptr ||
|
||||
creds->type() != grpc_core::InsecureCredentials::Type()) {
|
||||
|
@ -380,12 +369,12 @@ grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
|
|||
|
||||
int flags = fcntl(fd, F_GETFL, 0);
|
||||
CHECK_EQ(fcntl(fd, F_SETFL, flags | O_NONBLOCK), 0);
|
||||
grpc_endpoint* client = grpc_tcp_create_from_fd(
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> client(grpc_tcp_create_from_fd(
|
||||
grpc_fd_create(fd, "client", true),
|
||||
grpc_event_engine::experimental::ChannelArgsEndpointConfig(final_args),
|
||||
"fd-client");
|
||||
"fd-client"));
|
||||
grpc_core::Transport* transport =
|
||||
grpc_create_chttp2_transport(final_args, client, true);
|
||||
grpc_create_chttp2_transport(final_args, std::move(client), true);
|
||||
CHECK(transport);
|
||||
auto channel = grpc_core::ChannelCreate(
|
||||
target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
|
||||
|
|
|
@ -41,7 +41,7 @@ class Chttp2Connector : public SubchannelConnector {
|
|||
void Shutdown(grpc_error_handle error) override;
|
||||
|
||||
private:
|
||||
static void OnHandshakeDone(void* arg, grpc_error_handle error);
|
||||
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
|
||||
static void OnReceiveSettings(void* arg, grpc_error_handle error);
|
||||
void OnTimeout() ABSL_LOCKS_EXCLUDED(mu_);
|
||||
|
||||
|
|
|
@ -84,7 +84,6 @@
|
|||
#include "src/core/lib/security/credentials/credentials.h"
|
||||
#include "src/core/lib/security/credentials/insecure/insecure_credentials.h"
|
||||
#include "src/core/lib/security/security_connector/security_connector.h"
|
||||
#include "src/core/lib/surface/api_trace.h"
|
||||
#include "src/core/lib/transport/error_utils.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/lib/uri/uri_parser.h"
|
||||
|
@ -107,6 +106,13 @@ const char kUnixUriPrefix[] = "unix:";
|
|||
const char kUnixAbstractUriPrefix[] = "unix-abstract:";
|
||||
const char kVSockUriPrefix[] = "vsock:";
|
||||
|
||||
struct AcceptorDeleter {
|
||||
void operator()(grpc_tcp_server_acceptor* acceptor) const {
|
||||
gpr_free(acceptor);
|
||||
}
|
||||
};
|
||||
using AcceptorPtr = std::unique_ptr<grpc_tcp_server_acceptor, AcceptorDeleter>;
|
||||
|
||||
class Chttp2ServerListener : public Server::ListenerInterface {
|
||||
public:
|
||||
static grpc_error_handle Create(Server* server, grpc_resolved_address* addr,
|
||||
|
@ -167,15 +173,15 @@ class Chttp2ServerListener : public Server::ListenerInterface {
|
|||
class HandshakingState : public InternallyRefCounted<HandshakingState> {
|
||||
public:
|
||||
HandshakingState(RefCountedPtr<ActiveConnection> connection_ref,
|
||||
grpc_pollset* accepting_pollset,
|
||||
grpc_tcp_server_acceptor* acceptor,
|
||||
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
|
||||
const ChannelArgs& args);
|
||||
|
||||
~HandshakingState() override;
|
||||
|
||||
void Orphan() override;
|
||||
|
||||
void Start(grpc_endpoint* endpoint, const ChannelArgs& args);
|
||||
void Start(OrphanablePtr<grpc_endpoint> endpoint,
|
||||
const ChannelArgs& args);
|
||||
|
||||
// Needed to be able to grab an external ref in
|
||||
// ActiveConnection::Start()
|
||||
|
@ -184,10 +190,10 @@ class Chttp2ServerListener : public Server::ListenerInterface {
|
|||
private:
|
||||
void OnTimeout() ABSL_LOCKS_EXCLUDED(&connection_->mu_);
|
||||
static void OnReceiveSettings(void* arg, grpc_error_handle /* error */);
|
||||
static void OnHandshakeDone(void* arg, grpc_error_handle error);
|
||||
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
|
||||
RefCountedPtr<ActiveConnection> const connection_;
|
||||
grpc_pollset* const accepting_pollset_;
|
||||
grpc_tcp_server_acceptor* acceptor_;
|
||||
AcceptorPtr acceptor_;
|
||||
RefCountedPtr<HandshakeManager> handshake_mgr_
|
||||
ABSL_GUARDED_BY(&connection_->mu_);
|
||||
// State for enforcing handshake timeout on receiving HTTP/2 settings.
|
||||
|
@ -198,8 +204,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
|
|||
grpc_pollset_set* const interested_parties_;
|
||||
};
|
||||
|
||||
ActiveConnection(grpc_pollset* accepting_pollset,
|
||||
grpc_tcp_server_acceptor* acceptor,
|
||||
ActiveConnection(grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
|
||||
EventEngine* event_engine, const ChannelArgs& args,
|
||||
MemoryOwner memory_owner);
|
||||
~ActiveConnection() override;
|
||||
|
@ -209,7 +214,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
|
|||
void SendGoAway();
|
||||
|
||||
void Start(RefCountedPtr<Chttp2ServerListener> listener,
|
||||
grpc_endpoint* endpoint, const ChannelArgs& args);
|
||||
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& args);
|
||||
|
||||
// Needed to be able to grab an external ref in
|
||||
// Chttp2ServerListener::OnAccept()
|
||||
|
@ -367,11 +372,11 @@ Timestamp GetConnectionDeadline(const ChannelArgs& args) {
|
|||
|
||||
Chttp2ServerListener::ActiveConnection::HandshakingState::HandshakingState(
|
||||
RefCountedPtr<ActiveConnection> connection_ref,
|
||||
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
|
||||
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
|
||||
const ChannelArgs& args)
|
||||
: connection_(std::move(connection_ref)),
|
||||
accepting_pollset_(accepting_pollset),
|
||||
acceptor_(acceptor),
|
||||
acceptor_(std::move(acceptor)),
|
||||
handshake_mgr_(MakeRefCounted<HandshakeManager>()),
|
||||
deadline_(GetConnectionDeadline(args)),
|
||||
interested_parties_(grpc_pollset_set_create()) {
|
||||
|
@ -387,7 +392,6 @@ Chttp2ServerListener::ActiveConnection::HandshakingState::~HandshakingState() {
|
|||
grpc_pollset_set_del_pollset(interested_parties_, accepting_pollset_);
|
||||
}
|
||||
grpc_pollset_set_destroy(interested_parties_);
|
||||
gpr_free(acceptor_);
|
||||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::HandshakingState::Orphan() {
|
||||
|
@ -401,16 +405,18 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::Orphan() {
|
|||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::HandshakingState::Start(
|
||||
grpc_endpoint* endpoint, const ChannelArgs& channel_args) {
|
||||
Ref().release(); // Held by OnHandshakeDone
|
||||
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& channel_args) {
|
||||
RefCountedPtr<HandshakeManager> handshake_mgr;
|
||||
{
|
||||
MutexLock lock(&connection_->mu_);
|
||||
if (handshake_mgr_ == nullptr) return;
|
||||
handshake_mgr = handshake_mgr_;
|
||||
}
|
||||
handshake_mgr->DoHandshake(endpoint, channel_args, deadline_, acceptor_,
|
||||
OnHandshakeDone, this);
|
||||
handshake_mgr->DoHandshake(
|
||||
std::move(endpoint), channel_args, deadline_, acceptor_.get(),
|
||||
[self = Ref()](absl::StatusOr<HandshakerArgs*> result) {
|
||||
self->OnHandshakeDone(std::move(result));
|
||||
});
|
||||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::HandshakingState::OnTimeout() {
|
||||
|
@ -444,61 +450,50 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::
|
|||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
|
||||
void* arg, grpc_error_handle error) {
|
||||
auto* args = static_cast<HandshakerArgs*>(arg);
|
||||
HandshakingState* self = static_cast<HandshakingState*>(args->user_data);
|
||||
absl::StatusOr<HandshakerArgs*> result) {
|
||||
OrphanablePtr<HandshakingState> handshaking_state_ref;
|
||||
RefCountedPtr<HandshakeManager> handshake_mgr;
|
||||
bool cleanup_connection = false;
|
||||
bool release_connection = false;
|
||||
{
|
||||
MutexLock connection_lock(&self->connection_->mu_);
|
||||
if (!error.ok() || self->connection_->shutdown_) {
|
||||
std::string error_str = StatusToString(error);
|
||||
MutexLock connection_lock(&connection_->mu_);
|
||||
if (!result.ok() || connection_->shutdown_) {
|
||||
cleanup_connection = true;
|
||||
release_connection = true;
|
||||
if (error.ok() && args->endpoint != nullptr) {
|
||||
// We were shut down or stopped serving after handshaking completed
|
||||
// successfully, so destroy the endpoint here.
|
||||
grpc_endpoint_destroy(args->endpoint);
|
||||
grpc_slice_buffer_destroy(args->read_buffer);
|
||||
gpr_free(args->read_buffer);
|
||||
}
|
||||
} else {
|
||||
// If the handshaking succeeded but there is no endpoint, then the
|
||||
// handshaker may have handed off the connection to some external
|
||||
// code, so we can just clean up here without creating a transport.
|
||||
if (args->endpoint != nullptr) {
|
||||
if ((*result)->endpoint != nullptr) {
|
||||
RefCountedPtr<Transport> transport =
|
||||
grpc_create_chttp2_transport(args->args, args->endpoint, false)
|
||||
grpc_create_chttp2_transport((*result)->args,
|
||||
std::move((*result)->endpoint), false)
|
||||
->Ref();
|
||||
grpc_error_handle channel_init_err =
|
||||
self->connection_->listener_->server_->SetupTransport(
|
||||
transport.get(), self->accepting_pollset_, args->args,
|
||||
connection_->listener_->server_->SetupTransport(
|
||||
transport.get(), accepting_pollset_, (*result)->args,
|
||||
grpc_chttp2_transport_get_socket_node(transport.get()));
|
||||
if (channel_init_err.ok()) {
|
||||
// Use notify_on_receive_settings callback to enforce the
|
||||
// handshake deadline.
|
||||
self->connection_->transport_ =
|
||||
connection_->transport_ =
|
||||
DownCast<grpc_chttp2_transport*>(transport.get())->Ref();
|
||||
self->Ref().release(); // Held by OnReceiveSettings().
|
||||
GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings,
|
||||
self, grpc_schedule_on_exec_ctx);
|
||||
Ref().release(); // Held by OnReceiveSettings().
|
||||
GRPC_CLOSURE_INIT(&on_receive_settings_, OnReceiveSettings, this,
|
||||
grpc_schedule_on_exec_ctx);
|
||||
// If the listener has been configured with a config fetcher, we
|
||||
// need to watch on the transport being closed so that we can an
|
||||
// updated list of active connections.
|
||||
grpc_closure* on_close = nullptr;
|
||||
if (self->connection_->listener_->config_fetcher_watcher_ !=
|
||||
nullptr) {
|
||||
if (connection_->listener_->config_fetcher_watcher_ != nullptr) {
|
||||
// Refs helds by OnClose()
|
||||
self->connection_->Ref().release();
|
||||
on_close = &self->connection_->on_close_;
|
||||
connection_->Ref().release();
|
||||
on_close = &connection_->on_close_;
|
||||
} else {
|
||||
// Remove the connection from the connections_ map since OnClose()
|
||||
// will not be invoked when a config fetcher is set.
|
||||
auto connection_quota =
|
||||
self->connection_->listener_->connection_quota_->Ref()
|
||||
.release();
|
||||
connection_->listener_->connection_quota_->Ref().release();
|
||||
auto on_close_transport = [](void* arg,
|
||||
grpc_error_handle /*handle*/) {
|
||||
ConnectionQuota* connection_quota =
|
||||
|
@ -511,11 +506,10 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
|
|||
cleanup_connection = true;
|
||||
}
|
||||
grpc_chttp2_transport_start_reading(
|
||||
transport.get(), args->read_buffer, &self->on_receive_settings_,
|
||||
nullptr, on_close);
|
||||
self->timer_handle_ = self->connection_->event_engine_->RunAfter(
|
||||
self->deadline_ - Timestamp::Now(),
|
||||
[self = self->Ref()]() mutable {
|
||||
transport.get(), (*result)->read_buffer.c_slice_buffer(),
|
||||
&on_receive_settings_, nullptr, on_close);
|
||||
timer_handle_ = connection_->event_engine_->RunAfter(
|
||||
deadline_ - Timestamp::Now(), [self = Ref()]() mutable {
|
||||
ApplicationCallbackExecCtx callback_exec_ctx;
|
||||
ExecCtx exec_ctx;
|
||||
self->OnTimeout();
|
||||
|
@ -527,8 +521,6 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
|
|||
LOG(ERROR) << "Failed to create channel: "
|
||||
<< StatusToString(channel_init_err);
|
||||
transport->Orphan();
|
||||
grpc_slice_buffer_destroy(args->read_buffer);
|
||||
gpr_free(args->read_buffer);
|
||||
cleanup_connection = true;
|
||||
release_connection = true;
|
||||
}
|
||||
|
@ -541,25 +533,21 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
|
|||
// shutdown the handshake when the listener needs to stop serving.
|
||||
// Avoid calling the destructor of HandshakeManager and HandshakingState
|
||||
// from within the critical region.
|
||||
handshake_mgr = std::move(self->handshake_mgr_);
|
||||
handshaking_state_ref = std::move(self->connection_->handshaking_state_);
|
||||
handshake_mgr = std::move(handshake_mgr_);
|
||||
handshaking_state_ref = std::move(connection_->handshaking_state_);
|
||||
}
|
||||
gpr_free(self->acceptor_);
|
||||
self->acceptor_ = nullptr;
|
||||
OrphanablePtr<ActiveConnection> connection;
|
||||
if (cleanup_connection) {
|
||||
MutexLock listener_lock(&self->connection_->listener_->mu_);
|
||||
MutexLock listener_lock(&connection_->listener_->mu_);
|
||||
if (release_connection) {
|
||||
self->connection_->listener_->connection_quota_->ReleaseConnections(1);
|
||||
connection_->listener_->connection_quota_->ReleaseConnections(1);
|
||||
}
|
||||
auto it = self->connection_->listener_->connections_.find(
|
||||
self->connection_.get());
|
||||
if (it != self->connection_->listener_->connections_.end()) {
|
||||
auto it = connection_->listener_->connections_.find(connection_.get());
|
||||
if (it != connection_->listener_->connections_.end()) {
|
||||
connection = std::move(it->second);
|
||||
self->connection_->listener_->connections_.erase(it);
|
||||
connection_->listener_->connections_.erase(it);
|
||||
}
|
||||
}
|
||||
self->Unref();
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -567,11 +555,11 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
|
|||
//
|
||||
|
||||
Chttp2ServerListener::ActiveConnection::ActiveConnection(
|
||||
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
|
||||
grpc_pollset* accepting_pollset, AcceptorPtr acceptor,
|
||||
EventEngine* event_engine, const ChannelArgs& args,
|
||||
MemoryOwner memory_owner)
|
||||
: handshaking_state_(memory_owner.MakeOrphanable<HandshakingState>(
|
||||
Ref(), accepting_pollset, acceptor, args)),
|
||||
Ref(), accepting_pollset, std::move(acceptor), args)),
|
||||
event_engine_(event_engine) {
|
||||
GRPC_CLOSURE_INIT(&on_close_, ActiveConnection::OnClose, this,
|
||||
grpc_schedule_on_exec_ctx);
|
||||
|
@ -625,29 +613,21 @@ void Chttp2ServerListener::ActiveConnection::SendGoAway() {
|
|||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::Start(
|
||||
RefCountedPtr<Chttp2ServerListener> listener, grpc_endpoint* endpoint,
|
||||
const ChannelArgs& args) {
|
||||
RefCountedPtr<HandshakingState> handshaking_state_ref;
|
||||
RefCountedPtr<Chttp2ServerListener> listener,
|
||||
OrphanablePtr<grpc_endpoint> endpoint, const ChannelArgs& args) {
|
||||
listener_ = std::move(listener);
|
||||
if (listener_->tcp_server_ != nullptr) {
|
||||
grpc_tcp_server_ref(listener_->tcp_server_);
|
||||
}
|
||||
RefCountedPtr<HandshakingState> handshaking_state_ref;
|
||||
{
|
||||
ReleasableMutexLock lock(&mu_);
|
||||
if (shutdown_) {
|
||||
lock.Release();
|
||||
// If the Connection is already shutdown at this point, it implies the
|
||||
// owning Chttp2ServerListener and all associated ActiveConnections have
|
||||
// been orphaned. The generated endpoints need to be shutdown here to
|
||||
// ensure the tcp connections are closed appropriately.
|
||||
grpc_endpoint_destroy(endpoint);
|
||||
return;
|
||||
}
|
||||
MutexLock lock(&mu_);
|
||||
// If the Connection is already shutdown at this point, it implies the
|
||||
// owning Chttp2ServerListener and all associated ActiveConnections have
|
||||
// been orphaned.
|
||||
if (shutdown_) return;
|
||||
// Hold a ref to HandshakingState to allow starting the handshake outside
|
||||
// the critical region.
|
||||
handshaking_state_ref = handshaking_state_->Ref();
|
||||
}
|
||||
handshaking_state_ref->Start(endpoint, args);
|
||||
handshaking_state_ref->Start(std::move(endpoint), args);
|
||||
}
|
||||
|
||||
void Chttp2ServerListener::ActiveConnection::OnClose(
|
||||
|
@ -841,48 +821,41 @@ void Chttp2ServerListener::AcceptConnectedEndpoint(
|
|||
|
||||
void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
|
||||
grpc_pollset* accepting_pollset,
|
||||
grpc_tcp_server_acceptor* acceptor) {
|
||||
grpc_tcp_server_acceptor* server_acceptor) {
|
||||
Chttp2ServerListener* self = static_cast<Chttp2ServerListener*>(arg);
|
||||
ChannelArgs args = self->args_;
|
||||
OrphanablePtr<grpc_endpoint> endpoint(tcp);
|
||||
AcceptorPtr acceptor(server_acceptor);
|
||||
RefCountedPtr<grpc_server_config_fetcher::ConnectionManager>
|
||||
connection_manager;
|
||||
{
|
||||
MutexLock lock(&self->mu_);
|
||||
connection_manager = self->connection_manager_;
|
||||
}
|
||||
auto endpoint_cleanup = [&]() {
|
||||
grpc_endpoint_destroy(tcp);
|
||||
gpr_free(acceptor);
|
||||
};
|
||||
if (!self->connection_quota_->AllowIncomingConnection(
|
||||
self->memory_quota_, grpc_endpoint_get_peer(tcp))) {
|
||||
endpoint_cleanup();
|
||||
self->memory_quota_, grpc_endpoint_get_peer(endpoint.get()))) {
|
||||
return;
|
||||
}
|
||||
if (self->config_fetcher_ != nullptr) {
|
||||
if (connection_manager == nullptr) {
|
||||
endpoint_cleanup();
|
||||
return;
|
||||
}
|
||||
absl::StatusOr<ChannelArgs> args_result =
|
||||
connection_manager->UpdateChannelArgsForConnection(args, tcp);
|
||||
if (!args_result.ok()) {
|
||||
endpoint_cleanup();
|
||||
return;
|
||||
}
|
||||
grpc_error_handle error;
|
||||
args = self->args_modifier_(*args_result, &error);
|
||||
if (!error.ok()) {
|
||||
endpoint_cleanup();
|
||||
return;
|
||||
}
|
||||
}
|
||||
auto memory_owner = self->memory_quota_->CreateMemoryOwner();
|
||||
EventEngine* const event_engine = self->args_.GetObject<EventEngine>();
|
||||
auto connection = memory_owner.MakeOrphanable<ActiveConnection>(
|
||||
accepting_pollset, acceptor, event_engine, args, std::move(memory_owner));
|
||||
// We no longer own acceptor
|
||||
acceptor = nullptr;
|
||||
accepting_pollset, std::move(acceptor), event_engine, args,
|
||||
std::move(memory_owner));
|
||||
// Hold a ref to connection to allow starting handshake outside the
|
||||
// critical region
|
||||
RefCountedPtr<ActiveConnection> connection_ref = connection->Ref();
|
||||
|
@ -893,19 +866,21 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
|
|||
// connection manager has changed.
|
||||
if (!self->shutdown_ && self->is_serving_ &&
|
||||
connection_manager == self->connection_manager_) {
|
||||
// This ref needs to be taken in the critical region after having made
|
||||
// sure that the listener has not been Orphaned, so as to avoid
|
||||
// heap-use-after-free issues where `Ref()` is invoked when the ref of
|
||||
// tcp_server_ has already reached 0. (Ref() implementation of
|
||||
// Chttp2ServerListener is grpc_tcp_server_ref().)
|
||||
// The ref for both the listener and tcp_server need to be taken in the
|
||||
// critical region after having made sure that the listener has not been
|
||||
// Orphaned, so as to avoid heap-use-after-free issues where `Ref()` is
|
||||
// invoked when the listener is already shutdown. Note that the listener
|
||||
// holds a ref to the tcp_server but this ref is given away when the
|
||||
// listener is orphaned (shutdown).
|
||||
if (self->tcp_server_ != nullptr) {
|
||||
grpc_tcp_server_ref(self->tcp_server_);
|
||||
}
|
||||
listener_ref = self->RefAsSubclass<Chttp2ServerListener>();
|
||||
self->connections_.emplace(connection.get(), std::move(connection));
|
||||
}
|
||||
}
|
||||
if (connection != nullptr) {
|
||||
endpoint_cleanup();
|
||||
} else {
|
||||
connection_ref->Start(std::move(listener_ref), tcp, args);
|
||||
if (connection == nullptr && listener_ref != nullptr) {
|
||||
connection_ref->Start(std::move(listener_ref), std::move(endpoint), args);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1103,8 +1078,8 @@ int grpc_server_add_http2_port(grpc_server* server, const char* addr,
|
|||
int port_num = 0;
|
||||
grpc_core::Server* core_server = grpc_core::Server::FromC(server);
|
||||
grpc_core::ChannelArgs args = core_server->channel_args();
|
||||
GRPC_API_TRACE("grpc_server_add_http2_port(server=%p, addr=%s, creds=%p)", 3,
|
||||
(server, addr, creds));
|
||||
GRPC_TRACE_LOG(api, INFO) << "grpc_server_add_http2_port(server=" << server
|
||||
<< ", addr=" << addr << ", creds=" << creds << ")";
|
||||
// Create security context.
|
||||
if (creds == nullptr) {
|
||||
err = GRPC_ERROR_CREATE(
|
||||
|
@ -1161,15 +1136,17 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
|
|||
std::string name = absl::StrCat("fd:", fd);
|
||||
auto memory_quota =
|
||||
server_args.GetObject<grpc_core::ResourceQuota>()->memory_quota();
|
||||
grpc_endpoint* server_endpoint = grpc_tcp_create_from_fd(
|
||||
grpc_fd_create(fd, name.c_str(), true),
|
||||
grpc_event_engine::experimental::ChannelArgsEndpointConfig(server_args),
|
||||
name);
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> server_endpoint(
|
||||
grpc_tcp_create_from_fd(
|
||||
grpc_fd_create(fd, name.c_str(), true),
|
||||
grpc_event_engine::experimental::ChannelArgsEndpointConfig(
|
||||
server_args),
|
||||
name));
|
||||
for (grpc_pollset* pollset : core_server->pollsets()) {
|
||||
grpc_endpoint_add_to_pollset(server_endpoint, pollset);
|
||||
grpc_endpoint_add_to_pollset(server_endpoint.get(), pollset);
|
||||
}
|
||||
grpc_core::Transport* transport = grpc_create_chttp2_transport(
|
||||
server_args, server_endpoint, false // is_client
|
||||
server_args, std::move(server_endpoint), false // is_client
|
||||
);
|
||||
grpc_error_handle error =
|
||||
core_server->SetupTransport(transport, nullptr, server_args, nullptr);
|
||||
|
@ -1197,8 +1174,9 @@ absl::Status grpc_server_add_passive_listener(
|
|||
std::shared_ptr<grpc_core::experimental::PassiveListenerImpl>
|
||||
passive_listener) {
|
||||
grpc_core::ExecCtx exec_ctx;
|
||||
GRPC_API_TRACE("grpc_server_add_passive_listener(server=%p, credentials=%p)",
|
||||
2, (server, credentials));
|
||||
GRPC_TRACE_LOG(api, INFO)
|
||||
<< "grpc_server_add_passive_listener(server=" << server
|
||||
<< ", credentials=" << credentials << ")";
|
||||
// Create security context.
|
||||
if (credentials == nullptr) {
|
||||
return absl::UnavailableError(
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
|
||||
#include <grpc/support/alloc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/lib/slice/slice.h"
|
||||
|
@ -58,10 +58,8 @@ static bool input_is_valid(const uint8_t* input_ptr, size_t length) {
|
|||
|
||||
for (i = 0; i < length; ++i) {
|
||||
if (GPR_UNLIKELY((decode_table[input_ptr[i]] & 0xC0) != 0)) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Base64 decoding failed, invalid character '%c' in base64 "
|
||||
"input.\n",
|
||||
static_cast<char>(*input_ptr));
|
||||
LOG(ERROR) << "Base64 decoding failed, invalid character '"
|
||||
<< static_cast<char>(*input_ptr) << "' in base64 input.\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -89,17 +87,14 @@ size_t grpc_chttp2_base64_infer_length_after_decode(const grpc_slice& slice) {
|
|||
len--;
|
||||
}
|
||||
if (GPR_UNLIKELY(GRPC_SLICE_LENGTH(slice) - len > 2)) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Base64 decoding failed. Input has more than 2 paddings.");
|
||||
LOG(ERROR) << "Base64 decoding failed. Input has more than 2 paddings.";
|
||||
return 0;
|
||||
}
|
||||
size_t tuples = len / 4;
|
||||
size_t tail_case = len % 4;
|
||||
if (GPR_UNLIKELY(tail_case == 1)) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Base64 decoding failed. Input has a length of %zu (without"
|
||||
" padding), which is invalid.\n",
|
||||
len);
|
||||
LOG(ERROR) << "Base64 decoding failed. Input has a length of " << len
|
||||
<< " (without padding), which is invalid.\n";
|
||||
return 0;
|
||||
}
|
||||
return tuples * 3 + tail_xtra[tail_case];
|
||||
|
@ -167,11 +162,9 @@ grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input) {
|
|||
grpc_slice output;
|
||||
|
||||
if (GPR_UNLIKELY(input_length % 4 != 0)) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Base64 decoding failed, input of "
|
||||
"grpc_chttp2_base64_decode has a length of %d, which is not a "
|
||||
"multiple of 4.\n",
|
||||
static_cast<int>(input_length));
|
||||
LOG(ERROR) << "Base64 decoding failed, input of "
|
||||
"grpc_chttp2_base64_decode has a length of "
|
||||
<< input_length << ", which is not a multiple of 4.\n";
|
||||
return grpc_empty_slice();
|
||||
}
|
||||
|
||||
|
@ -194,7 +187,7 @@ grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input) {
|
|||
|
||||
if (GPR_UNLIKELY(!grpc_base64_decode_partial(&ctx))) {
|
||||
char* s = grpc_slice_to_c_string(input);
|
||||
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
|
||||
LOG(ERROR) << "Base64 decoding failed, input string:\n" << s << "\n";
|
||||
gpr_free(s);
|
||||
grpc_core::CSliceUnref(output);
|
||||
return grpc_empty_slice();
|
||||
|
@ -212,23 +205,18 @@ grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
|
|||
|
||||
// The length of a base64 string cannot be 4 * n + 1
|
||||
if (GPR_UNLIKELY(input_length % 4 == 1)) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"Base64 decoding failed, input of "
|
||||
"grpc_chttp2_base64_decode_with_length has a length of %d, which "
|
||||
"has a tail of 1 byte.\n",
|
||||
static_cast<int>(input_length));
|
||||
LOG(ERROR) << "Base64 decoding failed, input of "
|
||||
"grpc_chttp2_base64_decode_with_length has a length of "
|
||||
<< input_length << ", which has a tail of 1 byte.\n";
|
||||
grpc_core::CSliceUnref(output);
|
||||
return grpc_empty_slice();
|
||||
}
|
||||
|
||||
if (GPR_UNLIKELY(output_length >
|
||||
input_length / 4 * 3 + tail_xtra[input_length % 4])) {
|
||||
gpr_log(
|
||||
GPR_ERROR,
|
||||
"Base64 decoding failed, output_length %d is longer "
|
||||
"than the max possible output length %d.\n",
|
||||
static_cast<int>(output_length),
|
||||
static_cast<int>(input_length / 4 * 3 + tail_xtra[input_length % 4]));
|
||||
LOG(ERROR) << "Base64 decoding failed, output_length " << output_length
|
||||
<< " is longer than the max possible output length "
|
||||
<< (input_length / 4 * 3 + tail_xtra[input_length % 4]) << ".\n";
|
||||
grpc_core::CSliceUnref(output);
|
||||
return grpc_empty_slice();
|
||||
}
|
||||
|
@ -241,7 +229,7 @@ grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
|
|||
|
||||
if (GPR_UNLIKELY(!grpc_base64_decode_partial(&ctx))) {
|
||||
char* s = grpc_slice_to_c_string(input);
|
||||
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
|
||||
LOG(ERROR) << "Base64 decoding failed, input string:\n" << s << "\n";
|
||||
gpr_free(s);
|
||||
grpc_core::CSliceUnref(output);
|
||||
return grpc_empty_slice();
|
||||
|
|
|
@ -53,7 +53,6 @@
|
|||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/alloc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
#include <grpc/support/time.h>
|
||||
|
||||
|
@ -84,6 +83,7 @@
|
|||
#include "src/core/lib/gprpp/status_helper.h"
|
||||
#include "src/core/lib/gprpp/time.h"
|
||||
#include "src/core/lib/iomgr/combiner.h"
|
||||
#include "src/core/lib/iomgr/endpoint.h"
|
||||
#include "src/core/lib/iomgr/error.h"
|
||||
#include "src/core/lib/iomgr/ev_posix.h"
|
||||
#include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
|
||||
|
@ -257,6 +257,7 @@ grpc_core::CopyContextFn g_get_copied_context_fn = nullptr;
|
|||
namespace grpc_core {
|
||||
|
||||
namespace {
|
||||
|
||||
// Initialize a grpc_closure \a c to call \a Fn with \a t and \a error. Holds
|
||||
// the passed in reference to \a t until it's moved into Fn.
|
||||
template <void (*Fn)(RefCountedPtr<grpc_chttp2_transport>, grpc_error_handle)>
|
||||
|
@ -272,13 +273,12 @@ grpc_closure* InitTransportClosure(RefCountedPtr<grpc_chttp2_transport> t,
|
|||
t.release(), nullptr);
|
||||
return c;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
TestOnlyGlobalHttp2TransportInitCallback test_only_init_callback = nullptr;
|
||||
TestOnlyGlobalHttp2TransportDestructCallback test_only_destruct_callback =
|
||||
nullptr;
|
||||
bool test_only_disable_transient_failure_state_notification = false;
|
||||
|
||||
} // namespace
|
||||
|
||||
void TestOnlySetGlobalHttp2TransportInitCallback(
|
||||
|
@ -361,6 +361,34 @@ std::string HttpAnnotation::ToString() const {
|
|||
return s;
|
||||
}
|
||||
|
||||
void Chttp2CallTracerWrapper::RecordIncomingBytes(
|
||||
const CallTracerInterface::TransportByteSize& transport_byte_size) {
|
||||
// Update legacy API.
|
||||
stream_->stats.incoming.framing_bytes += transport_byte_size.framing_bytes;
|
||||
stream_->stats.incoming.data_bytes += transport_byte_size.data_bytes;
|
||||
stream_->stats.incoming.header_bytes += transport_byte_size.header_bytes;
|
||||
// Update new API.
|
||||
if (!IsCallTracerInTransportEnabled()) return;
|
||||
auto* call_tracer = stream_->arena->GetContext<CallTracerInterface>();
|
||||
if (call_tracer != nullptr) {
|
||||
call_tracer->RecordIncomingBytes(transport_byte_size);
|
||||
}
|
||||
}
|
||||
|
||||
void Chttp2CallTracerWrapper::RecordOutgoingBytes(
|
||||
const CallTracerInterface::TransportByteSize& transport_byte_size) {
|
||||
// Update legacy API.
|
||||
stream_->stats.outgoing.framing_bytes += transport_byte_size.framing_bytes;
|
||||
stream_->stats.outgoing.data_bytes += transport_byte_size.data_bytes;
|
||||
stream_->stats.outgoing.header_bytes +=
|
||||
transport_byte_size.header_bytes; // Update new API.
|
||||
if (!IsCallTracerInTransportEnabled()) return;
|
||||
auto* call_tracer = stream_->arena->GetContext<CallTracerInterface>();
|
||||
if (call_tracer != nullptr) {
|
||||
call_tracer->RecordOutgoingBytes(transport_byte_size);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
//
|
||||
|
@ -378,8 +406,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
|
|||
channelz_socket.reset();
|
||||
}
|
||||
|
||||
if (ep != nullptr) grpc_endpoint_destroy(ep);
|
||||
|
||||
grpc_slice_buffer_destroy(&qbuf);
|
||||
|
||||
grpc_error_handle error = GRPC_ERROR_CREATE("Transport destroyed");
|
||||
|
@ -420,9 +446,9 @@ static void read_channel_args(grpc_chttp2_transport* t,
|
|||
channel_args.GetInt(GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER).value_or(-1);
|
||||
if (initial_sequence_number > 0) {
|
||||
if ((t->next_stream_id & 1) != (initial_sequence_number & 1)) {
|
||||
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
|
||||
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1,
|
||||
is_client ? "client" : "server");
|
||||
LOG(ERROR) << GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
|
||||
<< ": low bit must be " << (t->next_stream_id & 1) << " on "
|
||||
<< (is_client ? "client" : "server");
|
||||
} else {
|
||||
t->next_stream_id = static_cast<uint32_t>(initial_sequence_number);
|
||||
}
|
||||
|
@ -458,15 +484,11 @@ static void read_channel_args(grpc_chttp2_transport* t,
|
|||
if (t->is_client) {
|
||||
t->keepalive_permit_without_calls =
|
||||
channel_args.GetBool(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)
|
||||
.value_or(grpc_core::IsKeepaliveFixEnabled()
|
||||
? g_default_client_keepalive_permit_without_calls
|
||||
: false);
|
||||
.value_or(g_default_client_keepalive_permit_without_calls);
|
||||
} else {
|
||||
t->keepalive_permit_without_calls =
|
||||
channel_args.GetBool(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)
|
||||
.value_or(grpc_core::IsKeepaliveServerFixEnabled()
|
||||
? g_default_server_keepalive_permit_without_calls
|
||||
: false);
|
||||
.value_or(g_default_server_keepalive_permit_without_calls);
|
||||
}
|
||||
|
||||
t->settings_timeout =
|
||||
|
@ -495,7 +517,7 @@ static void read_channel_args(grpc_chttp2_transport* t,
|
|||
.value_or(GRPC_ENABLE_CHANNELZ_DEFAULT)) {
|
||||
t->channelz_socket =
|
||||
grpc_core::MakeRefCounted<grpc_core::channelz::SocketNode>(
|
||||
std::string(grpc_endpoint_get_local_address(t->ep)),
|
||||
std::string(grpc_endpoint_get_local_address(t->ep.get())),
|
||||
std::string(t->peer_string.as_string_view()),
|
||||
absl::StrCat(t->GetTransportName(), " ",
|
||||
t->peer_string.as_string_view()),
|
||||
|
@ -528,8 +550,8 @@ static void read_channel_args(grpc_chttp2_transport* t,
|
|||
t->max_concurrent_streams_policy.SetTarget(value);
|
||||
}
|
||||
} else if (channel_args.Contains(GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
|
||||
gpr_log(GPR_DEBUG, "%s is not available on clients",
|
||||
GRPC_ARG_MAX_CONCURRENT_STREAMS);
|
||||
VLOG(2) << GRPC_ARG_MAX_CONCURRENT_STREAMS
|
||||
<< " is not available on clients";
|
||||
}
|
||||
value =
|
||||
channel_args.GetInt(GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER).value_or(-1);
|
||||
|
@ -589,11 +611,11 @@ using grpc_event_engine::experimental::QueryExtension;
|
|||
using grpc_event_engine::experimental::TcpTraceExtension;
|
||||
|
||||
grpc_chttp2_transport::grpc_chttp2_transport(
|
||||
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
|
||||
bool is_client)
|
||||
: ep(ep),
|
||||
const grpc_core::ChannelArgs& channel_args,
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> endpoint, bool is_client)
|
||||
: ep(std::move(endpoint)),
|
||||
peer_string(
|
||||
grpc_core::Slice::FromCopiedString(grpc_endpoint_get_peer(ep))),
|
||||
grpc_core::Slice::FromCopiedString(grpc_endpoint_get_peer(ep.get()))),
|
||||
memory_owner(channel_args.GetObject<grpc_core::ResourceQuota>()
|
||||
->memory_quota()
|
||||
->CreateMemoryOwner()),
|
||||
|
@ -617,10 +639,11 @@ grpc_chttp2_transport::grpc_chttp2_transport(
|
|||
context_list = new grpc_core::ContextList();
|
||||
|
||||
if (channel_args.GetBool(GRPC_ARG_TCP_TRACING_ENABLED).value_or(false) &&
|
||||
grpc_event_engine::experimental::grpc_is_event_engine_endpoint(ep)) {
|
||||
grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
|
||||
ep.get())) {
|
||||
auto epte = QueryExtension<TcpTraceExtension>(
|
||||
grpc_event_engine::experimental::grpc_get_wrapped_event_engine_endpoint(
|
||||
ep));
|
||||
ep.get()));
|
||||
if (epte != nullptr) {
|
||||
epte->InitializeAndReturnTcpTracer();
|
||||
}
|
||||
|
@ -763,17 +786,16 @@ static void close_transport_locked(grpc_chttp2_transport* t,
|
|||
CHECK(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE);
|
||||
if (t->interested_parties_until_recv_settings != nullptr) {
|
||||
grpc_endpoint_delete_from_pollset_set(
|
||||
t->ep, t->interested_parties_until_recv_settings);
|
||||
t->ep.get(), t->interested_parties_until_recv_settings);
|
||||
t->interested_parties_until_recv_settings = nullptr;
|
||||
}
|
||||
grpc_core::MutexLock lock(&t->ep_destroy_mu);
|
||||
grpc_endpoint_destroy(t->ep);
|
||||
t->ep = nullptr;
|
||||
t->ep.reset();
|
||||
}
|
||||
if (t->notify_on_receive_settings != nullptr) {
|
||||
if (t->interested_parties_until_recv_settings != nullptr) {
|
||||
grpc_endpoint_delete_from_pollset_set(
|
||||
t->ep, t->interested_parties_until_recv_settings);
|
||||
t->ep.get(), t->interested_parties_until_recv_settings);
|
||||
t->interested_parties_until_recv_settings = nullptr;
|
||||
}
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION, t->notify_on_receive_settings,
|
||||
|
@ -820,7 +842,8 @@ grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
|
|||
return refcount;
|
||||
}()),
|
||||
arena(arena),
|
||||
flow_control(&t->flow_control) {
|
||||
flow_control(&t->flow_control),
|
||||
call_tracer_wrapper(this) {
|
||||
t->streams_allocated.fetch_add(1, std::memory_order_relaxed);
|
||||
if (server_data) {
|
||||
id = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(server_data));
|
||||
|
@ -927,11 +950,11 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
|
|||
|
||||
static void set_write_state(grpc_chttp2_transport* t,
|
||||
grpc_chttp2_write_state st, const char* reason) {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO, "W:%p %s [%s] state %s -> %s [%s]", t,
|
||||
t->is_client ? "CLIENT" : "SERVER",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
write_state_name(t->write_state), write_state_name(st), reason));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "W:" << t << " " << (t->is_client ? "CLIENT" : "SERVER") << " ["
|
||||
<< t->peer_string.as_string_view() << "] state "
|
||||
<< write_state_name(t->write_state) << " -> " << write_state_name(st)
|
||||
<< " [" << reason << "]";
|
||||
t->write_state = st;
|
||||
// If the state is being reset back to idle, it means a write was just
|
||||
// finished. Make sure all the run_after_write closures are scheduled.
|
||||
|
@ -1021,11 +1044,10 @@ static void write_action_begin_locked(
|
|||
// We had paused reading, because we had many induced frames (SETTINGS
|
||||
// ACK, PINGS ACK and RST_STREAMS) pending in t->qbuf. Now that we have
|
||||
// been able to flush qbuf, we can resume reading.
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_INFO,
|
||||
"transport %p : Resuming reading after being paused due to too "
|
||||
"many unwritten SETTINGS ACK, PINGS ACK and RST_STREAM frames",
|
||||
t.get()));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "transport " << t.get()
|
||||
<< " : Resuming reading after being paused due to too many unwritten "
|
||||
"SETTINGS ACK, PINGS ACK and RST_STREAM frames";
|
||||
t->reading_paused_on_pending_induced_frames = false;
|
||||
continue_read_action_locked(std::move(t));
|
||||
}
|
||||
|
@ -1061,7 +1083,7 @@ static void write_action(grpc_chttp2_transport* t) {
|
|||
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t << "]: Write "
|
||||
<< t->outbuf.Length() << " bytes";
|
||||
t->write_size_policy.BeginWrite(t->outbuf.Length());
|
||||
grpc_endpoint_write(t->ep, t->outbuf.c_slice_buffer(),
|
||||
grpc_endpoint_write(t->ep.get(), t->outbuf.c_slice_buffer(),
|
||||
grpc_core::InitTransportClosure<write_action_end>(
|
||||
t->Ref(), &t->write_action_end_locked),
|
||||
cl, max_frame_size);
|
||||
|
@ -1152,15 +1174,15 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
|
|||
static_cast<intptr_t>(goaway_error)),
|
||||
grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE);
|
||||
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO, "transport %p got goaway with last stream id %d", t,
|
||||
last_stream_id));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "transport " << t << " got goaway with last stream id "
|
||||
<< last_stream_id;
|
||||
// We want to log this irrespective of whether http tracing is enabled if we
|
||||
// received a GOAWAY with a non NO_ERROR code.
|
||||
if (goaway_error != GRPC_HTTP2_NO_ERROR) {
|
||||
gpr_log(GPR_INFO, "%s: Got goaway [%d] err=%s",
|
||||
std::string(t->peer_string.as_string_view()).c_str(), goaway_error,
|
||||
grpc_core::StatusToString(t->goaway_error).c_str());
|
||||
LOG(INFO) << t->peer_string.as_string_view() << ": Got goaway ["
|
||||
<< goaway_error
|
||||
<< "] err=" << grpc_core::StatusToString(t->goaway_error);
|
||||
}
|
||||
if (t->is_client) {
|
||||
cancel_unstarted_streams(t, t->goaway_error, false);
|
||||
|
@ -1186,12 +1208,11 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
|
|||
if (GPR_UNLIKELY(t->is_client &&
|
||||
goaway_error == GRPC_HTTP2_ENHANCE_YOUR_CALM &&
|
||||
goaway_text == "too_many_pings")) {
|
||||
gpr_log(GPR_ERROR,
|
||||
"%s: Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
|
||||
"data equal to \"too_many_pings\". Current keepalive time (before "
|
||||
"throttling): %s",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->keepalive_time.ToString().c_str());
|
||||
LOG(ERROR) << t->peer_string.as_string_view()
|
||||
<< ": Received a GOAWAY with error code ENHANCE_YOUR_CALM and "
|
||||
"debug data equal to \"too_many_pings\". Current keepalive "
|
||||
"time (before throttling): "
|
||||
<< t->keepalive_time.ToString();
|
||||
constexpr int max_keepalive_time_millis =
|
||||
INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
|
||||
int64_t throttled_keepalive_time =
|
||||
|
@ -1223,10 +1244,10 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
|
|||
t->stream_map.size() < t->settings.peer().max_concurrent_streams() &&
|
||||
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
|
||||
// safe since we can't (legally) be parsing this stream yet
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_INFO,
|
||||
"HTTP:%s: Transport %p allocating new grpc_chttp2_stream %p to id %d",
|
||||
t->is_client ? "CLI" : "SVR", t, s, t->next_stream_id));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "HTTP:" << (t->is_client ? "CLI" : "SVR") << ": Transport " << t
|
||||
<< " allocating new grpc_chttp2_stream " << s << " to id "
|
||||
<< t->next_stream_id;
|
||||
|
||||
CHECK_EQ(s->id, 0u);
|
||||
s->id = t->next_stream_id;
|
||||
|
@ -1289,17 +1310,13 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
|
|||
}
|
||||
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(
|
||||
GPR_INFO,
|
||||
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
|
||||
"write_state=%s whence=%s:%d",
|
||||
t, closure,
|
||||
static_cast<int>(closure->next_data.scratch /
|
||||
CLOSURE_BARRIER_FIRST_REF_BIT),
|
||||
static_cast<int>(closure->next_data.scratch %
|
||||
CLOSURE_BARRIER_FIRST_REF_BIT),
|
||||
desc, grpc_core::StatusToString(error).c_str(),
|
||||
write_state_name(t->write_state), whence.file(), whence.line());
|
||||
LOG(INFO) << "complete_closure_step: t=" << t << " " << closure << " refs="
|
||||
<< (closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT)
|
||||
<< " flags="
|
||||
<< (closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT)
|
||||
<< " desc=" << desc << " err=" << grpc_core::StatusToString(error)
|
||||
<< " write_state=" << write_state_name(t->write_state)
|
||||
<< " whence=" << whence.file() << ":" << whence.line();
|
||||
}
|
||||
|
||||
if (!error.ok()) {
|
||||
|
@ -1342,7 +1359,7 @@ static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
|
|||
const std::string prefix = absl::StrCat(
|
||||
"HTTP:", id, is_initial ? ":HDR" : ":TRL", is_client ? ":CLI:" : ":SVR:");
|
||||
md_batch->Log([&prefix](absl::string_view key, absl::string_view value) {
|
||||
VLOG(2) << absl::StrCat(prefix, key, ": ", value);
|
||||
VLOG(2) << prefix << key << ": " << value;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -1356,13 +1373,14 @@ static void perform_stream_op_locked(void* stream_op,
|
|||
grpc_chttp2_transport* t = s->t.get();
|
||||
|
||||
s->traced = op->is_traced;
|
||||
s->call_tracer = CallTracerIfSampled(s);
|
||||
if (!grpc_core::IsCallTracerInTransportEnabled()) {
|
||||
s->call_tracer = CallTracerIfSampled(s);
|
||||
}
|
||||
s->tcp_tracer = TcpTracerIfSampled(s);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"perform_stream_op_locked[s=%p; op=%p]: %s; on_complete = %p", s,
|
||||
op, grpc_transport_stream_op_batch_string(op, false).c_str(),
|
||||
op->on_complete);
|
||||
LOG(INFO) << "perform_stream_op_locked[s=" << s << "; op=" << op
|
||||
<< "]: " << grpc_transport_stream_op_batch_string(op, false)
|
||||
<< "; on_complete = " << op->on_complete;
|
||||
if (op->send_initial_metadata) {
|
||||
log_metadata(op_payload->send_initial_metadata.send_initial_metadata,
|
||||
s->id, t->is_client, true);
|
||||
|
@ -1388,12 +1406,24 @@ static void perform_stream_op_locked(void* stream_op,
|
|||
}
|
||||
|
||||
if (op->send_initial_metadata) {
|
||||
if (s->call_tracer != nullptr) {
|
||||
s->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s->t->flow_control.stats())
|
||||
.Add(s->flow_control.stats()));
|
||||
if (!grpc_core::IsCallTracerInTransportEnabled()) {
|
||||
if (s->call_tracer != nullptr) {
|
||||
s->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s->t->flow_control.stats())
|
||||
.Add(s->flow_control.stats()));
|
||||
}
|
||||
} else if (grpc_core::IsTraceRecordCallopsEnabled()) {
|
||||
auto* call_tracer =
|
||||
s->arena->GetContext<grpc_core::CallTracerInterface>();
|
||||
if (call_tracer != nullptr && call_tracer->IsSampled()) {
|
||||
call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s->t->flow_control.stats())
|
||||
.Add(s->flow_control.stats()));
|
||||
}
|
||||
}
|
||||
if (t->is_client && t->channelz_socket != nullptr) {
|
||||
t->channelz_socket->RecordStreamStartedFromLocal();
|
||||
|
@ -1477,11 +1507,8 @@ static void perform_stream_op_locked(void* stream_op,
|
|||
frame_hdr[3] = static_cast<uint8_t>(len >> 8);
|
||||
frame_hdr[4] = static_cast<uint8_t>(len);
|
||||
|
||||
if (grpc_core::IsHttp2StatsFixEnabled()) {
|
||||
s->stats.outgoing.framing_bytes += GRPC_HEADER_SIZE_IN_BYTES;
|
||||
s->stats.outgoing.data_bytes +=
|
||||
op_payload->send_message.send_message->Length();
|
||||
}
|
||||
s->call_tracer_wrapper.RecordOutgoingBytes(
|
||||
{GRPC_HEADER_SIZE_IN_BYTES, len, 0});
|
||||
s->next_message_end_offset =
|
||||
s->flow_controlled_bytes_written +
|
||||
static_cast<int64_t>(s->flow_controlled_buffer.length) +
|
||||
|
@ -1626,8 +1653,8 @@ void grpc_chttp2_transport::PerformStreamOp(
|
|||
}
|
||||
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "perform_stream_op[s=%p; op=%p]: %s", s, op,
|
||||
grpc_transport_stream_op_batch_string(op, false).c_str());
|
||||
LOG(INFO) << "perform_stream_op[s=" << s << "; op=" << op
|
||||
<< "]: " << grpc_transport_stream_op_batch_string(op, false);
|
||||
}
|
||||
|
||||
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
|
||||
|
@ -1638,8 +1665,8 @@ void grpc_chttp2_transport::PerformStreamOp(
|
|||
}
|
||||
|
||||
static void cancel_pings(grpc_chttp2_transport* t, grpc_error_handle error) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "%p CANCEL PINGS: %s", t,
|
||||
grpc_core::StatusToString(error).c_str()));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< t << " CANCEL PINGS: " << grpc_core::StatusToString(error);
|
||||
// callback remaining pings: they're not allowed to call into the transport,
|
||||
// and maybe they hold resources that need to be freed
|
||||
t->ping_callbacks.CancelAll(t->event_engine.get());
|
||||
|
@ -1722,8 +1749,8 @@ static void retry_initiate_ping_locked(
|
|||
|
||||
void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
|
||||
if (!t->ping_callbacks.AckPing(id, t->event_engine.get())) {
|
||||
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64,
|
||||
std::string(t->peer_string.as_string_view()).c_str(), id);
|
||||
VLOG(2) << "Unknown ping response from " << t->peer_string.as_string_view()
|
||||
<< ": " << id;
|
||||
return;
|
||||
}
|
||||
if (t->ping_callbacks.ping_requested()) {
|
||||
|
@ -1735,8 +1762,8 @@ void grpc_chttp2_keepalive_timeout(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
|
||||
t->combiner->Run(
|
||||
grpc_core::NewClosure([t](grpc_error_handle) {
|
||||
gpr_log(GPR_INFO, "%s: Keepalive timeout. Closing transport.",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
GRPC_TRACE_LOG(http, INFO) << t->peer_string.as_string_view()
|
||||
<< ": Keepalive timeout. Closing transport.";
|
||||
send_goaway(
|
||||
t.get(),
|
||||
grpc_error_set_int(GRPC_ERROR_CREATE("keepalive_timeout"),
|
||||
|
@ -1756,8 +1783,8 @@ void grpc_chttp2_ping_timeout(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
|
||||
t->combiner->Run(
|
||||
grpc_core::NewClosure([t](grpc_error_handle) {
|
||||
gpr_log(GPR_INFO, "%s: Ping timeout. Closing transport.",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
GRPC_TRACE_LOG(http, INFO) << t->peer_string.as_string_view()
|
||||
<< ": Ping timeout. Closing transport.";
|
||||
send_goaway(
|
||||
t.get(),
|
||||
grpc_error_set_int(GRPC_ERROR_CREATE("ping_timeout"),
|
||||
|
@ -1777,8 +1804,8 @@ void grpc_chttp2_settings_timeout(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
|
||||
t->combiner->Run(
|
||||
grpc_core::NewClosure([t](grpc_error_handle) {
|
||||
gpr_log(GPR_INFO, "%s: Settings timeout. Closing transport.",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
GRPC_TRACE_LOG(http, INFO) << t->peer_string.as_string_view()
|
||||
<< ": Settings timeout. Closing transport.";
|
||||
send_goaway(
|
||||
t.get(),
|
||||
grpc_error_set_int(GRPC_ERROR_CREATE("settings_timeout"),
|
||||
|
@ -1828,22 +1855,21 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
|
|||
return;
|
||||
}
|
||||
if (t_->destroying || !t_->closed_with_error.ok()) {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO,
|
||||
"transport:%p %s peer:%s Transport already shutting down. "
|
||||
"Graceful GOAWAY abandoned.",
|
||||
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
|
||||
std::string(t_->peer_string.as_string_view()).c_str()));
|
||||
GRPC_TRACE_LOG(http, INFO) << "transport:" << t_.get() << " "
|
||||
<< (t_->is_client ? "CLIENT" : "SERVER")
|
||||
<< " peer:" << t_->peer_string.as_string_view()
|
||||
<< " Transport already shutting down. "
|
||||
"Graceful GOAWAY abandoned.";
|
||||
return;
|
||||
}
|
||||
// Ping completed. Send final goaway.
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO,
|
||||
"transport:%p %s peer:%s Graceful shutdown: Ping received. "
|
||||
"Sending final GOAWAY with stream_id:%d",
|
||||
t_.get(), t_->is_client ? "CLIENT" : "SERVER",
|
||||
std::string(t_->peer_string.as_string_view()).c_str(),
|
||||
t_->last_new_stream_id));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "transport:" << t_.get() << " "
|
||||
<< (t_->is_client ? "CLIENT" : "SERVER")
|
||||
<< " peer:" << std::string(t_->peer_string.as_string_view())
|
||||
<< " Graceful shutdown: Ping received. "
|
||||
"Sending final GOAWAY with stream_id:"
|
||||
<< t_->last_new_stream_id;
|
||||
t_->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
|
||||
grpc_chttp2_goaway_append(t_->last_new_stream_id, 0, grpc_empty_slice(),
|
||||
&t_->qbuf);
|
||||
|
@ -1887,10 +1913,10 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error,
|
|||
} else if (t->sent_goaway_state == GRPC_CHTTP2_NO_GOAWAY_SEND ||
|
||||
t->sent_goaway_state == GRPC_CHTTP2_GRACEFUL_GOAWAY) {
|
||||
// We want to log this irrespective of whether http tracing is enabled
|
||||
gpr_log(GPR_DEBUG, "%s %s: Sending goaway last_new_stream_id=%d err=%s",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->is_client ? "CLIENT" : "SERVER", t->last_new_stream_id,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
VLOG(2) << t->peer_string.as_string_view() << " "
|
||||
<< (t->is_client ? "CLIENT" : "SERVER")
|
||||
<< ": Sending goaway last_new_stream_id=" << t->last_new_stream_id
|
||||
<< " err=" << grpc_core::StatusToString(error);
|
||||
t->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
|
||||
grpc_chttp2_goaway_append(
|
||||
t->last_new_stream_id, static_cast<uint32_t>(http_error),
|
||||
|
@ -1939,13 +1965,13 @@ static void perform_transport_op_locked(void* stream_op,
|
|||
|
||||
if (op->bind_pollset) {
|
||||
if (t->ep != nullptr) {
|
||||
grpc_endpoint_add_to_pollset(t->ep, op->bind_pollset);
|
||||
grpc_endpoint_add_to_pollset(t->ep.get(), op->bind_pollset);
|
||||
}
|
||||
}
|
||||
|
||||
if (op->bind_pollset_set) {
|
||||
if (t->ep != nullptr) {
|
||||
grpc_endpoint_add_to_pollset_set(t->ep, op->bind_pollset_set);
|
||||
grpc_endpoint_add_to_pollset_set(t->ep.get(), op->bind_pollset_set);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1974,8 +2000,8 @@ static void perform_transport_op_locked(void* stream_op,
|
|||
|
||||
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", this,
|
||||
grpc_transport_op_string(op).c_str());
|
||||
LOG(INFO) << "perform_transport_op[t=" << this
|
||||
<< "]: " << grpc_transport_op_string(op);
|
||||
}
|
||||
op->handler_private.extra_arg = this;
|
||||
Ref().release()->combiner->Run(
|
||||
|
@ -2027,10 +2053,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
|
|||
// exited out of at any point by returning.
|
||||
[&]() {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_DEBUG,
|
||||
"maybe_complete_recv_message %p final_metadata_requested=%d "
|
||||
"seen_error=%d",
|
||||
s, s->final_metadata_requested, s->seen_error);
|
||||
VLOG(2) << "maybe_complete_recv_message " << s
|
||||
<< " final_metadata_requested=" << s->final_metadata_requested
|
||||
<< " seen_error=" << s->seen_error;
|
||||
}
|
||||
if (s->final_metadata_requested && s->seen_error) {
|
||||
grpc_slice_buffer_reset_and_unref(&s->frame_storage);
|
||||
|
@ -2043,10 +2068,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
|
|||
auto r = grpc_deframe_unprocessed_incoming_frames(
|
||||
s, &min_progress_size, &**s->recv_message, s->recv_message_flags);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_DEBUG, "Deframe data frame: %s",
|
||||
grpc_core::PollToString(r, [](absl::Status r) {
|
||||
return r.ToString();
|
||||
}).c_str());
|
||||
VLOG(2) << "Deframe data frame: "
|
||||
<< grpc_core::PollToString(
|
||||
r, [](absl::Status r) { return r.ToString(); });
|
||||
}
|
||||
if (r.pending()) {
|
||||
if (s->read_closed) {
|
||||
|
@ -2099,12 +2123,11 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
|
|||
grpc_chttp2_stream* s) {
|
||||
grpc_chttp2_maybe_complete_recv_message(t, s);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_DEBUG,
|
||||
"maybe_complete_recv_trailing_metadata cli=%d s=%p closure=%p "
|
||||
"read_closed=%d "
|
||||
"write_closed=%d %" PRIdPTR,
|
||||
t->is_client, s, s->recv_trailing_metadata_finished, s->read_closed,
|
||||
s->write_closed, s->frame_storage.length);
|
||||
VLOG(2) << "maybe_complete_recv_trailing_metadata cli=" << t->is_client
|
||||
<< " s=" << s << " closure=" << s->recv_trailing_metadata_finished
|
||||
<< " read_closed=" << s->read_closed
|
||||
<< " write_closed=" << s->write_closed << " "
|
||||
<< s->frame_storage.length;
|
||||
}
|
||||
if (s->recv_trailing_metadata_finished != nullptr && s->read_closed &&
|
||||
s->write_closed) {
|
||||
|
@ -2310,12 +2333,13 @@ grpc_chttp2_transport::RemovedStreamHandle grpc_chttp2_mark_stream_closed(
|
|||
int close_writes, grpc_error_handle error) {
|
||||
grpc_chttp2_transport::RemovedStreamHandle rsh;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(
|
||||
GPR_DEBUG, "MARK_STREAM_CLOSED: t=%p s=%p(id=%d) %s [%s]", t, s, s->id,
|
||||
(close_reads && close_writes)
|
||||
? "read+write"
|
||||
: (close_reads ? "read" : (close_writes ? "write" : "nothing??")),
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
VLOG(2) << "MARK_STREAM_CLOSED: t=" << t << " s=" << s << "(id=" << s->id
|
||||
<< ") "
|
||||
<< ((close_reads && close_writes)
|
||||
? "read+write"
|
||||
: (close_reads ? "read"
|
||||
: (close_writes ? "write" : "nothing??")))
|
||||
<< " [" << grpc_core::StatusToString(error) << "]";
|
||||
}
|
||||
if (s->read_closed && s->write_closed) {
|
||||
// already closed, but we should still fake the status if needed.
|
||||
|
@ -2723,11 +2747,10 @@ static void read_action_parse_loop_locked(
|
|||
if (keep_reading) {
|
||||
if (t->num_pending_induced_frames >= DEFAULT_MAX_PENDING_INDUCED_FRAMES) {
|
||||
t->reading_paused_on_pending_induced_frames = true;
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO,
|
||||
"transport %p : Pausing reading due to too "
|
||||
"many unwritten SETTINGS ACK and RST_STREAM frames",
|
||||
t.get()));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "transport " << t.get()
|
||||
<< " : Pausing reading due to too many unwritten "
|
||||
"SETTINGS ACK and RST_STREAM frames";
|
||||
} else {
|
||||
continue_read_action_locked(std::move(t));
|
||||
}
|
||||
|
@ -2742,9 +2765,8 @@ static void read_action_locked(
|
|||
if (t->keepalive_ping_timeout_handle != TaskHandle::kInvalid) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"%s[%p]: Clear keepalive timer because data was received",
|
||||
t->is_client ? "CLIENT" : "SERVER", t.get());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t.get()
|
||||
<< "]: Clear keepalive timer because data was received";
|
||||
}
|
||||
t->event_engine->Cancel(
|
||||
std::exchange(t->keepalive_ping_timeout_handle, TaskHandle::kInvalid));
|
||||
|
@ -2763,7 +2785,7 @@ static void continue_read_action_locked(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t) {
|
||||
const bool urgent = !t->goaway_error.ok();
|
||||
auto* tp = t.get();
|
||||
grpc_endpoint_read(tp->ep, &tp->read_buffer,
|
||||
grpc_endpoint_read(tp->ep.get(), &tp->read_buffer,
|
||||
grpc_core::InitTransportClosure<read_action>(
|
||||
std::move(t), &tp->read_action_locked),
|
||||
urgent, grpc_chttp2_min_read_progress_size(tp));
|
||||
|
@ -2795,9 +2817,8 @@ static void start_bdp_ping_locked(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
|
||||
grpc_error_handle error) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
LOG(INFO) << t->peer_string.as_string_view()
|
||||
<< ": Start BDP ping err=" << grpc_core::StatusToString(error);
|
||||
}
|
||||
if (!error.ok() || !t->closed_with_error.ok()) {
|
||||
return;
|
||||
|
@ -2822,9 +2843,8 @@ static void finish_bdp_ping_locked(
|
|||
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
|
||||
grpc_error_handle error) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
LOG(INFO) << t->peer_string.as_string_view()
|
||||
<< ": Complete BDP ping err=" << grpc_core::StatusToString(error);
|
||||
}
|
||||
if (!error.ok() || !t->closed_with_error.ok()) {
|
||||
return;
|
||||
|
@ -2967,8 +2987,8 @@ static void finish_keepalive_ping_locked(
|
|||
if (error.ok()) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
|
||||
gpr_log(GPR_INFO, "%s: Finish keepalive ping",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
LOG(INFO) << t->peer_string.as_string_view()
|
||||
<< ": Finish keepalive ping";
|
||||
}
|
||||
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
|
||||
CHECK(t->keepalive_ping_timer_handle == TaskHandle::kInvalid);
|
||||
|
@ -2989,8 +3009,8 @@ static void maybe_reset_keepalive_ping_timer_locked(grpc_chttp2_transport* t) {
|
|||
// need to Ref or Unref here since we still hold the Ref.
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
|
||||
gpr_log(GPR_INFO, "%s: Keepalive ping cancelled. Resetting timer.",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
LOG(INFO) << t->peer_string.as_string_view()
|
||||
<< ": Keepalive ping cancelled. Resetting timer.";
|
||||
}
|
||||
t->keepalive_ping_timer_handle =
|
||||
t->event_engine->RunAfter(t->keepalive_time, [t = t->Ref()]() mutable {
|
||||
|
@ -3009,9 +3029,9 @@ static void connectivity_state_set(grpc_chttp2_transport* t,
|
|||
grpc_connectivity_state state,
|
||||
const absl::Status& status,
|
||||
const char* reason) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_INFO, "transport %p set connectivity_state=%d; status=%s; reason=%s",
|
||||
t, state, status.ToString().c_str(), reason));
|
||||
GRPC_TRACE_LOG(http, INFO)
|
||||
<< "transport " << t << " set connectivity_state=" << state
|
||||
<< "; status=" << status.ToString() << "; reason=" << reason;
|
||||
t->state_tracker.SetState(state, status, reason);
|
||||
}
|
||||
|
||||
|
@ -3026,7 +3046,7 @@ void grpc_chttp2_transport::SetPollset(grpc_stream* /*gs*/,
|
|||
// actually uses pollsets.
|
||||
if (strcmp(grpc_get_poll_strategy_name(), "poll") != 0) return;
|
||||
grpc_core::MutexLock lock(&ep_destroy_mu);
|
||||
if (ep != nullptr) grpc_endpoint_add_to_pollset(ep, pollset);
|
||||
if (ep != nullptr) grpc_endpoint_add_to_pollset(ep.get(), pollset);
|
||||
}
|
||||
|
||||
void grpc_chttp2_transport::SetPollsetSet(grpc_stream* /*gs*/,
|
||||
|
@ -3036,7 +3056,7 @@ void grpc_chttp2_transport::SetPollsetSet(grpc_stream* /*gs*/,
|
|||
// actually uses pollsets.
|
||||
if (strcmp(grpc_get_poll_strategy_name(), "poll") != 0) return;
|
||||
grpc_core::MutexLock lock(&ep_destroy_mu);
|
||||
if (ep != nullptr) grpc_endpoint_add_to_pollset_set(ep, pollset_set);
|
||||
if (ep != nullptr) grpc_endpoint_add_to_pollset_set(ep.get(), pollset_set);
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -3088,8 +3108,8 @@ static void benign_reclaimer_locked(
|
|||
// Channel with no active streams: send a goaway to try and make it
|
||||
// disconnect cleanly
|
||||
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
|
||||
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
|
||||
std::string(t->peer_string.as_string_view()).c_str());
|
||||
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
|
||||
<< " - send goaway to free memory";
|
||||
}
|
||||
send_goaway(t.get(),
|
||||
grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
|
||||
|
@ -3097,11 +3117,9 @@ static void benign_reclaimer_locked(
|
|||
GRPC_HTTP2_ENHANCE_YOUR_CALM),
|
||||
/*immediate_disconnect_hint=*/true);
|
||||
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
|
||||
" streams",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->stream_map.size());
|
||||
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
|
||||
<< " - skip benign reclamation, there are still "
|
||||
<< t->stream_map.size() << " streams";
|
||||
}
|
||||
t->benign_reclaimer_registered = false;
|
||||
if (error != absl::CancelledError()) {
|
||||
|
@ -3117,8 +3135,8 @@ static void destructive_reclaimer_locked(
|
|||
// As stream_map is a hash map, this selects effectively a random stream.
|
||||
grpc_chttp2_stream* s = t->stream_map.begin()->second;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
|
||||
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
|
||||
std::string(t->peer_string.as_string_view()).c_str(), s->id);
|
||||
LOG(INFO) << "HTTP2: " << t->peer_string.as_string_view()
|
||||
<< " - abandon stream id " << s->id;
|
||||
}
|
||||
grpc_chttp2_cancel_stream(
|
||||
t.get(), s,
|
||||
|
@ -3215,9 +3233,9 @@ grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport) {
|
|||
}
|
||||
|
||||
grpc_core::Transport* grpc_create_chttp2_transport(
|
||||
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
|
||||
bool is_client) {
|
||||
return new grpc_chttp2_transport(channel_args, ep, is_client);
|
||||
const grpc_core::ChannelArgs& channel_args,
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> ep, bool is_client) {
|
||||
return new grpc_chttp2_transport(channel_args, std::move(ep), is_client);
|
||||
}
|
||||
|
||||
void grpc_chttp2_transport_start_reading(
|
||||
|
@ -3228,7 +3246,6 @@ void grpc_chttp2_transport_start_reading(
|
|||
auto t = reinterpret_cast<grpc_chttp2_transport*>(transport)->Ref();
|
||||
if (read_buffer != nullptr) {
|
||||
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
|
||||
gpr_free(read_buffer);
|
||||
}
|
||||
auto* tp = t.get();
|
||||
tp->combiner->Run(
|
||||
|
@ -3240,7 +3257,7 @@ void grpc_chttp2_transport_start_reading(
|
|||
if (t->ep != nullptr &&
|
||||
interested_parties_until_recv_settings != nullptr) {
|
||||
grpc_endpoint_delete_from_pollset_set(
|
||||
t->ep, interested_parties_until_recv_settings);
|
||||
t->ep.get(), interested_parties_until_recv_settings);
|
||||
}
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION, notify_on_receive_settings,
|
||||
t->closed_with_error);
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
/// from the caller; if the caller still needs the resource_user after creating
|
||||
/// a transport, the caller must take another ref.
|
||||
grpc_core::Transport* grpc_create_chttp2_transport(
|
||||
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
|
||||
bool is_client);
|
||||
const grpc_core::ChannelArgs& channel_args,
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> ep, bool is_client);
|
||||
|
||||
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
|
||||
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport);
|
||||
|
|
|
@ -28,11 +28,11 @@
|
|||
#include <vector>
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
|
||||
|
@ -234,8 +234,8 @@ void TransportFlowControl::UpdateSetting(
|
|||
uint32_t)) {
|
||||
if (new_desired_value != *desired_value) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(flowctl)) {
|
||||
gpr_log(GPR_INFO, "[flowctl] UPDATE SETTING %s from %" PRId64 " to %d",
|
||||
std::string(name).c_str(), *desired_value, new_desired_value);
|
||||
LOG(INFO) << "[flowctl] UPDATE SETTING " << name << " from "
|
||||
<< *desired_value << " to " << new_desired_value;
|
||||
}
|
||||
// Reaching zero can only happen for initial window size, and if it occurs
|
||||
// we really want to wake up writes and ensure all the queued stream
|
||||
|
|
|
@ -55,7 +55,7 @@ absl::Status grpc_chttp2_data_parser_begin_frame(uint8_t flags,
|
|||
|
||||
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
|
||||
uint32_t write_bytes, int is_eof,
|
||||
grpc_transport_one_way_stats* stats,
|
||||
grpc_core::CallTracerInterface* call_tracer,
|
||||
grpc_slice_buffer* outbuf) {
|
||||
grpc_slice hdr;
|
||||
uint8_t* p;
|
||||
|
@ -77,10 +77,7 @@ void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
|
|||
|
||||
grpc_slice_buffer_move_first_no_ref(inbuf, write_bytes, outbuf);
|
||||
|
||||
stats->framing_bytes += header_size;
|
||||
if (!grpc_core::IsHttp2StatsFixEnabled()) {
|
||||
stats->data_bytes += write_bytes;
|
||||
}
|
||||
call_tracer->RecordOutgoingBytes({header_size, 0, 0});
|
||||
}
|
||||
|
||||
grpc_core::Poll<grpc_error_handle> grpc_deframe_unprocessed_incoming_frames(
|
||||
|
@ -129,8 +126,7 @@ grpc_core::Poll<grpc_error_handle> grpc_deframe_unprocessed_incoming_frames(
|
|||
if (min_progress_size != nullptr) *min_progress_size = 0;
|
||||
|
||||
if (stream_out != nullptr) {
|
||||
s->stats.incoming.framing_bytes += 5;
|
||||
s->stats.incoming.data_bytes += length;
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({5, length, 0});
|
||||
grpc_slice_buffer_move_first_into_buffer(slices, 5, header);
|
||||
grpc_slice_buffer_move_first(slices, length, stream_out->c_slice_buffer());
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "src/core/lib/promise/poll.h"
|
||||
#include "src/core/lib/slice/slice_buffer.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/telemetry/call_tracer.h"
|
||||
|
||||
// start processing a new data frame
|
||||
absl::Status grpc_chttp2_data_parser_begin_frame(uint8_t flags,
|
||||
|
@ -49,7 +50,7 @@ grpc_error_handle grpc_chttp2_data_parser_parse(void* parser,
|
|||
|
||||
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
|
||||
uint32_t write_bytes, int is_eof,
|
||||
grpc_transport_one_way_stats* stats,
|
||||
grpc_core::CallTracerInterface* call_tracer,
|
||||
grpc_slice_buffer* outbuf);
|
||||
|
||||
grpc_core::Poll<grpc_error_handle> grpc_deframe_unprocessed_incoming_frames(
|
||||
|
|
|
@ -25,11 +25,11 @@
|
|||
|
||||
#include "absl/container/flat_hash_map.h"
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
|
||||
#include <grpc/support/alloc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/internal.h"
|
||||
|
@ -94,8 +94,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
|
|||
CHECK(is_last);
|
||||
if (p->is_ack) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
|
||||
gpr_log(GPR_INFO, "%s[%p]: received ping ack %" PRIx64,
|
||||
t->is_client ? "CLIENT" : "SERVER", t, p->opaque_8bytes);
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: received ping ack " << p->opaque_8bytes;
|
||||
}
|
||||
grpc_chttp2_ack_ping(t, p->opaque_8bytes);
|
||||
} else {
|
||||
|
@ -104,16 +104,15 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
|
|||
t->keepalive_permit_without_calls == 0 && t->stream_map.empty();
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "SERVER[%p]: received ping %" PRIx64 ": %s", t,
|
||||
p->opaque_8bytes,
|
||||
t->ping_abuse_policy.GetDebugString(transport_idle).c_str());
|
||||
LOG(INFO) << "SERVER[" << t << "]: received ping " << p->opaque_8bytes
|
||||
<< ": "
|
||||
<< t->ping_abuse_policy.GetDebugString(transport_idle);
|
||||
}
|
||||
if (t->ping_abuse_policy.ReceivedOnePing(transport_idle)) {
|
||||
grpc_chttp2_exceeded_ping_strikes(t);
|
||||
}
|
||||
} else if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
|
||||
gpr_log(GPR_INFO, "CLIENT[%p]: received ping %" PRIx64, t,
|
||||
p->opaque_8bytes);
|
||||
LOG(INFO) << "CLIENT[" << t << "]: received ping " << p->opaque_8bytes;
|
||||
}
|
||||
if (t->ack_pings) {
|
||||
if (t->ping_ack_count == t->ping_ack_capacity) {
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
#include <stddef.h>
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/random/distributions.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
|
||||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/internal.h"
|
||||
|
@ -39,11 +39,13 @@
|
|||
#include "src/core/lib/transport/http2_errors.h"
|
||||
#include "src/core/lib/transport/metadata_batch.h"
|
||||
|
||||
grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
|
||||
grpc_transport_one_way_stats* stats) {
|
||||
grpc_slice grpc_chttp2_rst_stream_create(
|
||||
uint32_t id, uint32_t code, grpc_core::CallTracerInterface* call_tracer) {
|
||||
static const size_t frame_size = 13;
|
||||
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
|
||||
if (stats != nullptr) stats->framing_bytes += frame_size;
|
||||
if (call_tracer != nullptr) {
|
||||
call_tracer->RecordOutgoingBytes({frame_size, 0, 0});
|
||||
}
|
||||
uint8_t* p = GRPC_SLICE_START_PTR(slice);
|
||||
|
||||
// Frame size.
|
||||
|
@ -70,10 +72,10 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
|
|||
|
||||
void grpc_chttp2_add_rst_stream_to_next_write(
|
||||
grpc_chttp2_transport* t, uint32_t id, uint32_t code,
|
||||
grpc_transport_one_way_stats* stats) {
|
||||
grpc_core::CallTracerInterface* call_tracer) {
|
||||
t->num_pending_induced_frames++;
|
||||
grpc_slice_buffer_add(&t->qbuf,
|
||||
grpc_chttp2_rst_stream_create(id, code, stats));
|
||||
grpc_chttp2_rst_stream_create(id, code, call_tracer));
|
||||
}
|
||||
|
||||
grpc_error_handle grpc_chttp2_rst_stream_parser_begin_frame(
|
||||
|
@ -102,7 +104,8 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
|
|||
cur++;
|
||||
p->byte++;
|
||||
}
|
||||
s->stats.incoming.framing_bytes += static_cast<uint64_t>(end - cur);
|
||||
uint64_t framing_bytes = static_cast<uint64_t>(end - cur);
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({framing_bytes, 0, 0});
|
||||
|
||||
if (p->byte == 4) {
|
||||
CHECK(is_last);
|
||||
|
@ -111,9 +114,8 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
|
|||
((static_cast<uint32_t>(p->reason_bytes[2])) << 8) |
|
||||
((static_cast<uint32_t>(p->reason_bytes[3])));
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"[chttp2 transport=%p stream=%p] received RST_STREAM(reason=%d)",
|
||||
t, s, reason);
|
||||
LOG(INFO) << "[chttp2 transport=" << t << " stream=" << s
|
||||
<< "] received RST_STREAM(reason=" << reason << ")";
|
||||
}
|
||||
grpc_error_handle error;
|
||||
if (reason != GRPC_HTTP2_NO_ERROR || s->trailing_metadata_buffer.empty()) {
|
||||
|
|
|
@ -27,20 +27,22 @@
|
|||
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
|
||||
#include "src/core/lib/iomgr/error.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/telemetry/call_tracer.h"
|
||||
|
||||
struct grpc_chttp2_rst_stream_parser {
|
||||
uint8_t byte;
|
||||
uint8_t reason_bytes[4];
|
||||
};
|
||||
grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
|
||||
grpc_transport_one_way_stats* stats);
|
||||
grpc_slice grpc_chttp2_rst_stream_create(
|
||||
uint32_t stream_id, uint32_t code,
|
||||
grpc_core::CallTracerInterface* call_tracer);
|
||||
|
||||
// Adds RST_STREAM frame to t->qbuf (buffer for the next write). Should be
|
||||
// called when we want to add RST_STREAM and we are not in
|
||||
// write_action_begin_locked.
|
||||
void grpc_chttp2_add_rst_stream_to_next_write(
|
||||
grpc_chttp2_transport* t, uint32_t id, uint32_t code,
|
||||
grpc_transport_one_way_stats* stats);
|
||||
grpc_core::CallTracerInterface* call_tracer);
|
||||
|
||||
grpc_error_handle grpc_chttp2_rst_stream_parser_begin_frame(
|
||||
grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags);
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
#include <string>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
|
||||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
|
||||
|
@ -110,7 +110,7 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
|
|||
if (t->notify_on_receive_settings != nullptr) {
|
||||
if (t->interested_parties_until_recv_settings != nullptr) {
|
||||
grpc_endpoint_delete_from_pollset_set(
|
||||
t->ep, t->interested_parties_until_recv_settings);
|
||||
t->ep.get(), t->interested_parties_until_recv_settings);
|
||||
t->interested_parties_until_recv_settings = nullptr;
|
||||
}
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
|
||||
|
@ -172,9 +172,9 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
|
|||
parser->incoming_settings->initial_window_size();
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(flowctl)) {
|
||||
gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
|
||||
t->is_client ? "cli" : "svr",
|
||||
static_cast<int>(t->initial_window_update));
|
||||
LOG(INFO) << t << "[" << (t->is_client ? "cli" : "svr")
|
||||
<< "] adding " << t->initial_window_update
|
||||
<< " for initial_window change";
|
||||
}
|
||||
}
|
||||
auto error =
|
||||
|
@ -188,11 +188,10 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
|
|||
grpc_core::Http2Settings::WireIdToName(parser->id).c_str()));
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
|
||||
t->is_client ? "CLI" : "SVR",
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
grpc_core::Http2Settings::WireIdToName(parser->id).c_str(),
|
||||
parser->value);
|
||||
LOG(INFO) << "CHTTP2:" << (t->is_client ? "CLI" : "SVR") << ":"
|
||||
<< t->peer_string.as_string_view() << ": got setting "
|
||||
<< grpc_core::Http2Settings::WireIdToName(parser->id)
|
||||
<< " = " << parser->value;
|
||||
}
|
||||
} break;
|
||||
}
|
||||
|
|
|
@ -32,10 +32,13 @@
|
|||
#include "src/core/ext/transport/chttp2/transport/internal.h"
|
||||
|
||||
grpc_slice grpc_chttp2_window_update_create(
|
||||
uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats* stats) {
|
||||
uint32_t id, uint32_t window_delta,
|
||||
grpc_core::CallTracerInterface* call_tracer) {
|
||||
static const size_t frame_size = 13;
|
||||
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
|
||||
stats->framing_bytes += frame_size;
|
||||
if (call_tracer != nullptr) {
|
||||
call_tracer->RecordOutgoingBytes({frame_size, 0, 0});
|
||||
}
|
||||
uint8_t* p = GRPC_SLICE_START_PTR(slice);
|
||||
|
||||
CHECK(window_delta);
|
||||
|
@ -84,7 +87,8 @@ grpc_error_handle grpc_chttp2_window_update_parser_parse(
|
|||
}
|
||||
|
||||
if (s != nullptr) {
|
||||
s->stats.incoming.framing_bytes += static_cast<uint32_t>(end - cur);
|
||||
uint64_t framing_bytes = static_cast<uint32_t>(end - cur);
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({framing_bytes, 0, 0});
|
||||
}
|
||||
|
||||
if (p->byte == 4) {
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
|
||||
#include "src/core/lib/iomgr/error.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/telemetry/call_tracer.h"
|
||||
|
||||
struct grpc_chttp2_window_update_parser {
|
||||
uint8_t byte;
|
||||
|
@ -34,7 +35,8 @@ struct grpc_chttp2_window_update_parser {
|
|||
uint32_t amount;
|
||||
};
|
||||
grpc_slice grpc_chttp2_window_update_create(
|
||||
uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats* stats);
|
||||
uint32_t id, uint32_t window_delta,
|
||||
grpc_core::CallTracerInterface* call_tracer);
|
||||
|
||||
grpc_error_handle grpc_chttp2_window_update_parser_begin_frame(
|
||||
grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags);
|
||||
|
|
|
@ -22,10 +22,10 @@
|
|||
#include <cstdint>
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
|
||||
#include <grpc/slice.h>
|
||||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
|
||||
|
@ -87,7 +87,7 @@ void HPackCompressor::Frame(const EncodeHeaderOptions& options,
|
|||
if (options.is_end_of_stream) {
|
||||
flags |= GRPC_CHTTP2_DATA_FLAG_END_STREAM;
|
||||
}
|
||||
options.stats->header_bytes += raw.Length();
|
||||
options.call_tracer->RecordOutgoingBytes({0, 0, raw.Length()});
|
||||
while (frame_type == GRPC_CHTTP2_FRAME_HEADER || raw.Length() > 0) {
|
||||
// per the HTTP/2 spec:
|
||||
// A HEADERS frame without the END_HEADERS flag set MUST be followed by
|
||||
|
@ -101,7 +101,7 @@ void HPackCompressor::Frame(const EncodeHeaderOptions& options,
|
|||
}
|
||||
FillHeader(grpc_slice_buffer_tiny_add(output, kHeadersFrameHeaderSize),
|
||||
frame_type, options.stream_id, len, flags);
|
||||
options.stats->framing_bytes += kHeadersFrameHeaderSize;
|
||||
options.call_tracer->RecordOutgoingBytes({kHeadersFrameHeaderSize, 0, 0});
|
||||
grpc_slice_buffer_move_first(raw.c_slice_buffer(), len, output);
|
||||
|
||||
frame_type = GRPC_CHTTP2_FRAME_CONTINUATION;
|
||||
|
@ -118,8 +118,7 @@ void HPackCompressor::SetMaxTableSize(uint32_t max_table_size) {
|
|||
if (table_.SetMaxSize(std::min(max_usable_size_, max_table_size))) {
|
||||
advertise_table_size_change_ = true;
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "set max table size from encoder to %d",
|
||||
max_table_size);
|
||||
LOG(INFO) << "set max table size from encoder to " << max_table_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,12 +25,12 @@
|
|||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
|
||||
#include <grpc/slice.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
|
||||
|
@ -42,6 +42,7 @@
|
|||
#include "src/core/lib/transport/metadata_compression_traits.h"
|
||||
#include "src/core/lib/transport/timeout_encoding.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/telemetry/call_tracer.h"
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
|
@ -208,9 +209,7 @@ class Compressor<
|
|||
void EncodeWith(MetadataTrait, const typename MetadataTrait::ValueType& value,
|
||||
Encoder* encoder) {
|
||||
if (value != known_value) {
|
||||
gpr_log(GPR_ERROR, "%s",
|
||||
absl::StrCat("Not encoding bad ", MetadataTrait::key(), " header")
|
||||
.c_str());
|
||||
LOG(ERROR) << "Not encoding bad " << MetadataTrait::key() << " header";
|
||||
encoder->NoteEncodingError();
|
||||
return;
|
||||
}
|
||||
|
@ -355,7 +354,7 @@ class HPackCompressor {
|
|||
bool is_end_of_stream;
|
||||
bool use_true_binary_metadata;
|
||||
size_t max_frame_size;
|
||||
grpc_transport_one_way_stats* stats;
|
||||
CallTracerInterface* call_tracer;
|
||||
};
|
||||
|
||||
template <typename HeaderSet>
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/strings/match.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
|
@ -37,7 +38,6 @@
|
|||
#include "absl/types/variant.h"
|
||||
|
||||
#include <grpc/slice.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/decode_huff.h"
|
||||
|
@ -90,12 +90,14 @@ constexpr Base64InverseTable kBase64InverseTable;
|
|||
class HPackParser::Input {
|
||||
public:
|
||||
Input(grpc_slice_refcount* current_slice_refcount, const uint8_t* begin,
|
||||
const uint8_t* end, absl::BitGenRef bitsrc, HpackParseResult& error)
|
||||
const uint8_t* end, absl::BitGenRef bitsrc,
|
||||
HpackParseResult& frame_error, HpackParseResult& field_error)
|
||||
: current_slice_refcount_(current_slice_refcount),
|
||||
begin_(begin),
|
||||
end_(end),
|
||||
frontier_(begin),
|
||||
error_(error),
|
||||
frame_error_(frame_error),
|
||||
field_error_(field_error),
|
||||
bitsrc_(bitsrc) {}
|
||||
|
||||
// If input is backed by a slice, retrieve its refcount. If not, return
|
||||
|
@ -214,14 +216,18 @@ class HPackParser::Input {
|
|||
|
||||
// Check if we saw an EOF
|
||||
bool eof_error() const {
|
||||
return min_progress_size_ != 0 || error_.connection_error();
|
||||
return min_progress_size_ != 0 || frame_error_.connection_error();
|
||||
}
|
||||
|
||||
// Reset the field error to be ok
|
||||
void ClearFieldError() {
|
||||
if (field_error_.ok()) return;
|
||||
field_error_ = HpackParseResult();
|
||||
}
|
||||
|
||||
// Minimum number of bytes to unstuck the current parse
|
||||
size_t min_progress_size() const { return min_progress_size_; }
|
||||
|
||||
bool has_error() const { return !error_.ok(); }
|
||||
|
||||
// Set the current error - tweaks the error to include a stream id so that
|
||||
// chttp2 does not close the connection.
|
||||
// Intended for errors that are specific to a stream and recoverable.
|
||||
|
@ -245,10 +251,7 @@ class HPackParser::Input {
|
|||
// read prior to being able to get further in this parse.
|
||||
void UnexpectedEOF(size_t min_progress_size) {
|
||||
CHECK_GT(min_progress_size, 0u);
|
||||
if (min_progress_size_ != 0 || error_.connection_error()) {
|
||||
DCHECK(eof_error());
|
||||
return;
|
||||
}
|
||||
if (eof_error()) return;
|
||||
// Set min progress size, taking into account bytes parsed already but not
|
||||
// consumed.
|
||||
min_progress_size_ = min_progress_size + (begin_ - frontier_);
|
||||
|
@ -301,13 +304,18 @@ class HPackParser::Input {
|
|||
// Do not use this directly, instead use SetErrorAndContinueParsing or
|
||||
// SetErrorAndStopParsing.
|
||||
void SetError(HpackParseResult error) {
|
||||
if (!error_.ok() || min_progress_size_ > 0) {
|
||||
if (error.connection_error() && !error_.connection_error()) {
|
||||
error_ = std::move(error); // connection errors dominate
|
||||
SetErrorFor(frame_error_, error);
|
||||
SetErrorFor(field_error_, std::move(error));
|
||||
}
|
||||
|
||||
void SetErrorFor(HpackParseResult& error, HpackParseResult new_error) {
|
||||
if (!error.ok() || min_progress_size_ > 0) {
|
||||
if (new_error.connection_error() && !error.connection_error()) {
|
||||
error = std::move(new_error); // connection errors dominate
|
||||
}
|
||||
return;
|
||||
}
|
||||
error_ = std::move(error);
|
||||
error = std::move(new_error);
|
||||
}
|
||||
|
||||
// Refcount if we are backed by a slice
|
||||
|
@ -319,7 +327,8 @@ class HPackParser::Input {
|
|||
// Frontier denotes the first byte past successfully processed input
|
||||
const uint8_t* frontier_;
|
||||
// Current error
|
||||
HpackParseResult& error_;
|
||||
HpackParseResult& frame_error_;
|
||||
HpackParseResult& field_error_;
|
||||
// If the error was EOF, we flag it here by noting how many more bytes would
|
||||
// be needed to make progress
|
||||
size_t min_progress_size_ = 0;
|
||||
|
@ -596,6 +605,7 @@ class HPackParser::Parser {
|
|||
bool ParseTop() {
|
||||
DCHECK(state_.parse_state == ParseState::kTop);
|
||||
auto cur = *input_->Next();
|
||||
input_->ClearFieldError();
|
||||
switch (cur >> 4) {
|
||||
// Literal header not indexed - First byte format: 0000xxxx
|
||||
// Literal header never indexed - First byte format: 0001xxxx
|
||||
|
@ -700,14 +710,15 @@ class HPackParser::Parser {
|
|||
type = "???";
|
||||
break;
|
||||
}
|
||||
gpr_log(
|
||||
GPR_DEBUG, "HTTP:%d:%s:%s: %s%s", log_info_.stream_id, type,
|
||||
log_info_.is_client ? "CLI" : "SVR", memento.md.DebugString().c_str(),
|
||||
memento.parse_status == nullptr
|
||||
? ""
|
||||
: absl::StrCat(" (parse error: ",
|
||||
memento.parse_status->Materialize().ToString(), ")")
|
||||
.c_str());
|
||||
LOG(INFO) << "HTTP:" << log_info_.stream_id << ":" << type << ":"
|
||||
<< (log_info_.is_client ? "CLI" : "SVR") << ": "
|
||||
<< memento.md.DebugString()
|
||||
<< (memento.parse_status == nullptr
|
||||
? ""
|
||||
: absl::StrCat(
|
||||
" (parse error: ",
|
||||
memento.parse_status->Materialize().ToString(),
|
||||
")"));
|
||||
}
|
||||
|
||||
void EmitHeader(const HPackTable::Memento& md) {
|
||||
|
@ -950,11 +961,10 @@ class HPackParser::Parser {
|
|||
state_.string_length)
|
||||
: String::Parse(input_, state_.is_string_huff_compressed,
|
||||
state_.string_length);
|
||||
HpackParseResult& status = state_.frame_error;
|
||||
absl::string_view key_string;
|
||||
if (auto* s = absl::get_if<Slice>(&state_.key)) {
|
||||
key_string = s->as_string_view();
|
||||
if (status.ok()) {
|
||||
if (state_.field_error.ok()) {
|
||||
auto r = ValidateKey(key_string);
|
||||
if (r != ValidateMetadataResult::kOk) {
|
||||
input_->SetErrorAndContinueParsing(
|
||||
|
@ -964,7 +974,7 @@ class HPackParser::Parser {
|
|||
} else {
|
||||
const auto* memento = absl::get<const HPackTable::Memento*>(state_.key);
|
||||
key_string = memento->md.key();
|
||||
if (status.ok() && memento->parse_status != nullptr) {
|
||||
if (state_.field_error.ok() && memento->parse_status != nullptr) {
|
||||
input_->SetErrorAndContinueParsing(*memento->parse_status);
|
||||
}
|
||||
}
|
||||
|
@ -991,16 +1001,15 @@ class HPackParser::Parser {
|
|||
key_string.size() + value.wire_size + hpack_constants::kEntryOverhead;
|
||||
auto md = grpc_metadata_batch::Parse(
|
||||
key_string, std::move(value_slice), state_.add_to_table, transport_size,
|
||||
[key_string, &status, this](absl::string_view message, const Slice&) {
|
||||
if (!status.ok()) return;
|
||||
[key_string, this](absl::string_view message, const Slice&) {
|
||||
if (!state_.field_error.ok()) return;
|
||||
input_->SetErrorAndContinueParsing(
|
||||
HpackParseResult::MetadataParseError(key_string));
|
||||
gpr_log(GPR_ERROR, "Error parsing '%s' metadata: %s",
|
||||
std::string(key_string).c_str(),
|
||||
std::string(message).c_str());
|
||||
LOG(ERROR) << "Error parsing '" << key_string
|
||||
<< "' metadata: " << message;
|
||||
});
|
||||
HPackTable::Memento memento{std::move(md),
|
||||
status.PersistentStreamErrorOrNullptr()};
|
||||
HPackTable::Memento memento{
|
||||
std::move(md), state_.field_error.PersistentStreamErrorOrNullptr()};
|
||||
input_->UpdateFrontier();
|
||||
state_.parse_state = ParseState::kTop;
|
||||
if (state_.add_to_table) {
|
||||
|
@ -1113,13 +1122,13 @@ grpc_error_handle HPackParser::Parse(
|
|||
std::vector<uint8_t> buffer = std::move(unparsed_bytes_);
|
||||
return ParseInput(
|
||||
Input(nullptr, buffer.data(), buffer.data() + buffer.size(), bitsrc,
|
||||
state_.frame_error),
|
||||
state_.frame_error, state_.field_error),
|
||||
is_last, call_tracer);
|
||||
}
|
||||
return ParseInput(
|
||||
Input(slice.refcount, GRPC_SLICE_START_PTR(slice),
|
||||
GRPC_SLICE_END_PTR(slice), bitsrc, state_.frame_error),
|
||||
is_last, call_tracer);
|
||||
return ParseInput(Input(slice.refcount, GRPC_SLICE_START_PTR(slice),
|
||||
GRPC_SLICE_END_PTR(slice), bitsrc, state_.frame_error,
|
||||
state_.field_error),
|
||||
is_last, call_tracer);
|
||||
}
|
||||
|
||||
grpc_error_handle HPackParser::ParseInput(
|
||||
|
|
|
@ -235,6 +235,8 @@ class HPackParser {
|
|||
HPackTable hpack_table;
|
||||
// Error so far for this frame (set by class Input)
|
||||
HpackParseResult frame_error;
|
||||
// Error so far for this field (set by class Input)
|
||||
HpackParseResult field_error;
|
||||
// Length of frame so far.
|
||||
uint32_t frame_length = 0;
|
||||
// Length of the string being parsed
|
||||
|
|
|
@ -226,7 +226,8 @@ typedef enum {
|
|||
struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
|
||||
public grpc_core::KeepsGrpcInitialized {
|
||||
grpc_chttp2_transport(const grpc_core::ChannelArgs& channel_args,
|
||||
grpc_endpoint* ep, bool is_client);
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> endpoint,
|
||||
bool is_client);
|
||||
~grpc_chttp2_transport() override;
|
||||
|
||||
void Orphan() override;
|
||||
|
@ -257,7 +258,7 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
|
|||
grpc_pollset_set* pollset_set) override;
|
||||
void PerformOp(grpc_transport_op* op) override;
|
||||
|
||||
grpc_endpoint* ep;
|
||||
grpc_core::OrphanablePtr<grpc_endpoint> ep;
|
||||
grpc_core::Mutex ep_destroy_mu; // Guards endpoint destruction only.
|
||||
|
||||
grpc_core::Slice peer_string;
|
||||
|
@ -553,6 +554,51 @@ typedef enum {
|
|||
GRPC_METADATA_PUBLISHED_AT_CLOSE
|
||||
} grpc_published_metadata_method;
|
||||
|
||||
namespace grpc_core {
|
||||
|
||||
// A CallTracer wrapper that updates both the legacy and new APIs for
|
||||
// transport byte sizes.
|
||||
// TODO(ctiller): This can go away as part of removing the
|
||||
// grpc_transport_stream_stats struct.
|
||||
class Chttp2CallTracerWrapper final : public CallTracerInterface {
|
||||
public:
|
||||
explicit Chttp2CallTracerWrapper(grpc_chttp2_stream* stream)
|
||||
: stream_(stream) {}
|
||||
|
||||
void RecordIncomingBytes(
|
||||
const TransportByteSize& transport_byte_size) override;
|
||||
void RecordOutgoingBytes(
|
||||
const TransportByteSize& transport_byte_size) override;
|
||||
|
||||
// Everything else is a no-op.
|
||||
void RecordSendInitialMetadata(
|
||||
grpc_metadata_batch* /*send_initial_metadata*/) override {}
|
||||
void RecordSendTrailingMetadata(
|
||||
grpc_metadata_batch* /*send_trailing_metadata*/) override {}
|
||||
void RecordSendMessage(const SliceBuffer& /*send_message*/) override {}
|
||||
void RecordSendCompressedMessage(
|
||||
const SliceBuffer& /*send_compressed_message*/) override {}
|
||||
void RecordReceivedInitialMetadata(
|
||||
grpc_metadata_batch* /*recv_initial_metadata*/) override {}
|
||||
void RecordReceivedMessage(const SliceBuffer& /*recv_message*/) override {}
|
||||
void RecordReceivedDecompressedMessage(
|
||||
const SliceBuffer& /*recv_decompressed_message*/) override {}
|
||||
void RecordCancel(grpc_error_handle /*cancel_error*/) override {}
|
||||
std::shared_ptr<TcpTracerInterface> StartNewTcpTrace() override {
|
||||
return nullptr;
|
||||
}
|
||||
void RecordAnnotation(absl::string_view /*annotation*/) override {}
|
||||
void RecordAnnotation(const Annotation& /*annotation*/) override {}
|
||||
std::string TraceId() override { return ""; }
|
||||
std::string SpanId() override { return ""; }
|
||||
bool IsSampled() override { return false; }
|
||||
|
||||
private:
|
||||
grpc_chttp2_stream* stream_;
|
||||
};
|
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
struct grpc_chttp2_stream {
|
||||
grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_stream_refcount* refcount,
|
||||
const void* server_data, grpc_core::Arena* arena);
|
||||
|
@ -652,7 +698,11 @@ struct grpc_chttp2_stream {
|
|||
/// Number of times written
|
||||
int64_t write_counter = 0;
|
||||
|
||||
grpc_core::Chttp2CallTracerWrapper call_tracer_wrapper;
|
||||
|
||||
/// Only set when enabled.
|
||||
// TODO(roth): Remove this when the call_tracer_in_transport
|
||||
// experiment finishes rolling out.
|
||||
grpc_core::CallTracerAnnotationInterface* call_tracer = nullptr;
|
||||
|
||||
/// Only set when enabled.
|
||||
|
@ -807,13 +857,8 @@ void grpc_chttp2_settings_timeout(
|
|||
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
|
||||
(sizeof(GRPC_CHTTP2_CLIENT_CONNECT_STRING) - 1)
|
||||
|
||||
//
|
||||
#define GRPC_CHTTP2_IF_TRACING(stmt) \
|
||||
do { \
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) { \
|
||||
(stmt); \
|
||||
} \
|
||||
} while (0)
|
||||
#define GRPC_CHTTP2_IF_TRACING(severity) \
|
||||
LOG_IF(severity, GRPC_TRACE_FLAG_ENABLED(http))
|
||||
|
||||
void grpc_chttp2_fake_status(grpc_chttp2_transport* t,
|
||||
grpc_chttp2_stream* stream,
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include <grpc/event_engine/event_engine.h>
|
||||
#include <grpc/slice.h>
|
||||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/channelz/channelz.h"
|
||||
|
@ -332,10 +331,11 @@ absl::variant<size_t, absl::Status> grpc_chttp2_perform_read(
|
|||
DCHECK_LT(cur, end);
|
||||
t->incoming_stream_id |= (static_cast<uint32_t>(*cur));
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_INFO, "INCOMING[%p]: %s len:%d id:0x%08x", t,
|
||||
FrameTypeString(t->incoming_frame_type, t->incoming_frame_flags)
|
||||
.c_str(),
|
||||
t->incoming_frame_size, t->incoming_stream_id);
|
||||
LOG(INFO) << "INCOMING[" << t << "]: "
|
||||
<< FrameTypeString(t->incoming_frame_type,
|
||||
t->incoming_frame_flags)
|
||||
<< " len:" << t->incoming_frame_size
|
||||
<< absl::StrFormat(" id:0x%08x", t->incoming_stream_id);
|
||||
}
|
||||
t->deframe_state = GRPC_DTS_FRAME;
|
||||
err = init_frame_parser(t, requests_started);
|
||||
|
@ -453,7 +453,8 @@ static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t,
|
|||
return init_goaway_parser(t);
|
||||
default:
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
|
||||
LOG(ERROR) << "Unknown frame type "
|
||||
<< absl::StrFormat("%02x", t->incoming_frame_type);
|
||||
}
|
||||
return init_non_header_skip_frame_parser(t);
|
||||
}
|
||||
|
@ -554,7 +555,7 @@ static grpc_error_handle init_data_frame_parser(grpc_chttp2_transport* t) {
|
|||
return init_non_header_skip_frame_parser(t);
|
||||
}
|
||||
s->received_bytes += t->incoming_frame_size;
|
||||
s->stats.incoming.framing_bytes += 9;
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({9, 0, 0});
|
||||
if (s->read_closed) {
|
||||
return init_non_header_skip_frame_parser(t);
|
||||
}
|
||||
|
@ -573,7 +574,7 @@ error_handler:
|
|||
absl_status_to_grpc_error(status));
|
||||
grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
|
||||
GRPC_HTTP2_PROTOCOL_ERROR,
|
||||
&s->stats.outgoing);
|
||||
&s->call_tracer_wrapper);
|
||||
return init_non_header_skip_frame_parser(t);
|
||||
} else {
|
||||
return absl_status_to_grpc_error(status);
|
||||
|
@ -611,9 +612,8 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
|
||||
if (s == nullptr) {
|
||||
if (GPR_UNLIKELY(is_continuation)) {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_ERROR,
|
||||
"grpc_chttp2_stream disbanded before CONTINUATION received"));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "grpc_chttp2_stream disbanded before CONTINUATION received";
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
}
|
||||
if (t->is_client) {
|
||||
|
@ -621,23 +621,21 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
t->incoming_stream_id < t->next_stream_id)) {
|
||||
// this is an old (probably cancelled) grpc_chttp2_stream
|
||||
} else {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "ignoring new grpc_chttp2_stream creation on client";
|
||||
}
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
} else if (GPR_UNLIKELY(t->last_new_stream_id >= t->incoming_stream_id)) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_ERROR,
|
||||
"ignoring out of order new grpc_chttp2_stream request on server; "
|
||||
"last grpc_chttp2_stream "
|
||||
"id=%d, new grpc_chttp2_stream id=%d",
|
||||
t->last_new_stream_id, t->incoming_stream_id));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "ignoring out of order new grpc_chttp2_stream request on server; "
|
||||
"last grpc_chttp2_stream id="
|
||||
<< t->last_new_stream_id
|
||||
<< ", new grpc_chttp2_stream id=" << t->incoming_stream_id;
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
} else if (GPR_UNLIKELY((t->incoming_stream_id & 1) == 0)) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_ERROR,
|
||||
"ignoring grpc_chttp2_stream with non-client generated index %d",
|
||||
t->incoming_stream_id));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "ignoring grpc_chttp2_stream with non-client generated index "
|
||||
<< t->incoming_stream_id;
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
} else if (GPR_UNLIKELY(t->stream_map.size() + t->extra_streams >=
|
||||
t->settings.acked().max_concurrent_streams())) {
|
||||
|
@ -678,21 +676,22 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
} else if (t->sent_goaway_state == GRPC_CHTTP2_FINAL_GOAWAY_SENT ||
|
||||
t->sent_goaway_state ==
|
||||
GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_INFO,
|
||||
"transport:%p SERVER peer:%s Final GOAWAY sent. Ignoring new "
|
||||
"grpc_chttp2_stream request id=%d, last grpc_chttp2_stream id=%d",
|
||||
t, std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->incoming_stream_id, t->last_new_stream_id));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO)
|
||||
<< "transport:" << t
|
||||
<< " SERVER peer:" << t->peer_string.as_string_view()
|
||||
<< " Final GOAWAY sent. Ignoring new grpc_chttp2_stream request "
|
||||
"id="
|
||||
<< t->incoming_stream_id
|
||||
<< ", last grpc_chttp2_stream id=" << t->last_new_stream_id;
|
||||
;
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
} else if (t->num_incoming_streams_before_settings_ack == 0) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_ERROR,
|
||||
"transport:%p SERVER peer:%s rejecting grpc_chttp2_stream id=%d, "
|
||||
"last grpc_chttp2_stream id=%d before settings have been "
|
||||
"acknowledged",
|
||||
t, std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->incoming_stream_id, t->last_new_stream_id));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "transport:" << t
|
||||
<< " SERVER peer:" << t->peer_string.as_string_view()
|
||||
<< " rejecting grpc_chttp2_stream id=" << t->incoming_stream_id
|
||||
<< ", last grpc_chttp2_stream id=" << t->last_new_stream_id
|
||||
<< " before settings have been acknowledged";
|
||||
++t->num_pending_induced_frames;
|
||||
grpc_slice_buffer_add(
|
||||
&t->qbuf,
|
||||
|
@ -708,18 +707,16 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
grpc_chttp2_parsing_accept_stream(t, t->incoming_stream_id);
|
||||
++requests_started;
|
||||
if (GPR_UNLIKELY(s == nullptr)) {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR) << "grpc_chttp2_stream not accepted";
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(chttp2_new_stream)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"[t:%p fd:%d peer:%s] Accepting new stream; "
|
||||
"num_incoming_streams_before_settings_ack=%u",
|
||||
t, grpc_endpoint_get_fd(t->ep),
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->num_incoming_streams_before_settings_ack);
|
||||
LOG(INFO) << "[t:" << t << " fd:" << grpc_endpoint_get_fd(t->ep.get())
|
||||
<< " peer:" << t->peer_string.as_string_view()
|
||||
<< "] Accepting new stream; "
|
||||
"num_incoming_streams_before_settings_ack="
|
||||
<< t->num_incoming_streams_before_settings_ack;
|
||||
}
|
||||
if (t->channelz_socket != nullptr) {
|
||||
t->channelz_socket->RecordStreamStartedFromRemote();
|
||||
|
@ -728,10 +725,10 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
t->incoming_stream = s;
|
||||
}
|
||||
DCHECK_NE(s, nullptr);
|
||||
s->stats.incoming.framing_bytes += 9;
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({9, 0, 0});
|
||||
if (GPR_UNLIKELY(s->read_closed)) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(
|
||||
GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
|
||||
GRPC_CHTTP2_IF_TRACING(ERROR)
|
||||
<< "skipping already closed grpc_chttp2_stream header";
|
||||
t->incoming_stream = nullptr;
|
||||
return init_header_skip_frame_parser(t, priority_type, is_eoh);
|
||||
}
|
||||
|
@ -745,7 +742,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
switch (s->header_frames_received) {
|
||||
case 0:
|
||||
if (t->is_client && t->header_eof) {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing Trailers-Only"));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO) << "parsing Trailers-Only";
|
||||
if (s->trailing_metadata_available != nullptr) {
|
||||
*s->trailing_metadata_available = true;
|
||||
}
|
||||
|
@ -755,13 +752,13 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
|
|||
incoming_metadata_buffer = &s->trailing_metadata_buffer;
|
||||
frame_type = HPackParser::LogInfo::kTrailers;
|
||||
} else {
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO) << "parsing initial_metadata";
|
||||
incoming_metadata_buffer = &s->initial_metadata_buffer;
|
||||
frame_type = HPackParser::LogInfo::kHeaders;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO) << "parsing trailing_metadata";
|
||||
incoming_metadata_buffer = &s->trailing_metadata_buffer;
|
||||
frame_type = HPackParser::LogInfo::kTrailers;
|
||||
break;
|
||||
|
@ -794,12 +791,12 @@ static grpc_error_handle init_window_update_frame_parser(
|
|||
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
|
||||
if (s == nullptr) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_ERROR, "Stream %d not found, ignoring WINDOW_UPDATE",
|
||||
t->incoming_stream_id);
|
||||
LOG(ERROR) << "Stream " << t->incoming_stream_id
|
||||
<< " not found, ignoring WINDOW_UPDATE";
|
||||
}
|
||||
return init_non_header_skip_frame_parser(t);
|
||||
}
|
||||
s->stats.incoming.framing_bytes += 9;
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({9, 0, 0});
|
||||
}
|
||||
t->parser = grpc_chttp2_transport::Parser{
|
||||
"window_update", grpc_chttp2_window_update_parser_parse,
|
||||
|
@ -825,7 +822,7 @@ static grpc_error_handle init_rst_stream_parser(grpc_chttp2_transport* t) {
|
|||
if (!t->incoming_stream) {
|
||||
return init_non_header_skip_frame_parser(t);
|
||||
}
|
||||
s->stats.incoming.framing_bytes += 9;
|
||||
s->call_tracer_wrapper.RecordIncomingBytes({9, 0, 0});
|
||||
t->parser = grpc_chttp2_transport::Parser{
|
||||
"rst_stream", grpc_chttp2_rst_stream_parser_parse, &t->simple.rst_stream};
|
||||
if (!t->is_client && grpc_core::IsRstpitEnabled()) {
|
||||
|
@ -896,8 +893,8 @@ static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
|
|||
return err;
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http)) {
|
||||
gpr_log(GPR_ERROR, "INCOMING[%p;%p]: Parse failed with %s", t, s,
|
||||
err.ToString().c_str());
|
||||
LOG(ERROR) << "INCOMING[" << t << ";" << s << "]: Parse failed with "
|
||||
<< err;
|
||||
}
|
||||
if (grpc_error_get_int(err, grpc_core::StatusIntProperty::kStreamId,
|
||||
&unused)) {
|
||||
|
@ -921,7 +918,7 @@ static void force_client_rst_stream(void* sp, grpc_error_handle /*error*/) {
|
|||
grpc_chttp2_transport* t = s->t.get();
|
||||
if (!s->write_closed) {
|
||||
grpc_chttp2_add_rst_stream_to_next_write(t, s->id, GRPC_HTTP2_NO_ERROR,
|
||||
&s->stats.outgoing);
|
||||
&s->call_tracer_wrapper);
|
||||
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
|
||||
grpc_chttp2_mark_stream_closed(t, s, true, true, absl::OkStatus());
|
||||
}
|
||||
|
@ -936,9 +933,12 @@ grpc_error_handle grpc_chttp2_header_parser_parse(void* hpack_parser,
|
|||
auto* parser = static_cast<grpc_core::HPackParser*>(hpack_parser);
|
||||
grpc_core::CallTracerAnnotationInterface* call_tracer = nullptr;
|
||||
if (s != nullptr) {
|
||||
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
|
||||
s->call_tracer_wrapper.RecordIncomingBytes(
|
||||
{0, 0, GRPC_SLICE_LENGTH(slice)});
|
||||
call_tracer =
|
||||
s->arena->GetContext<grpc_core::CallTracerAnnotationInterface>();
|
||||
grpc_core::IsCallTracerInTransportEnabled()
|
||||
? s->arena->GetContext<grpc_core::CallTracerInterface>()
|
||||
: s->arena->GetContext<grpc_core::CallTracerAnnotationInterface>();
|
||||
}
|
||||
grpc_error_handle error = parser->Parse(
|
||||
slice, is_last != 0, absl::BitGenRef(t->bitgen), call_tracer);
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
//
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/chttp2/transport/internal.h"
|
||||
|
@ -69,8 +69,8 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
|
|||
}
|
||||
*stream = s;
|
||||
if (s && GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
|
||||
gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
|
||||
t->is_client ? "cli" : "svr", stream_list_id_string(id));
|
||||
LOG(INFO) << t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
|
||||
<< "]: pop from " << stream_list_id_string(id);
|
||||
}
|
||||
return s != nullptr;
|
||||
}
|
||||
|
@ -91,8 +91,8 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
|
|||
t->lists[id].tail = s->links[id].prev;
|
||||
}
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
|
||||
gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
|
||||
t->is_client ? "cli" : "svr", stream_list_id_string(id));
|
||||
LOG(INFO) << t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
|
||||
<< "]: remove from " << stream_list_id_string(id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -123,8 +123,8 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
|
|||
t->lists[id].tail = s;
|
||||
s->included.set(id);
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
|
||||
gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
|
||||
t->is_client ? "cli" : "svr", stream_list_id_string(id));
|
||||
LOG(INFO) << t << "[" << s->id << "][" << (t->is_client ? "cli" : "svr")
|
||||
<< "]: add to " << stream_list_id_string(id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,12 +27,12 @@
|
|||
|
||||
#include "absl/container/flat_hash_map.h"
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
#include <grpc/event_engine/event_engine.h>
|
||||
#include <grpc/slice_buffer.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
#include <grpc/support/time.h>
|
||||
|
||||
|
@ -136,10 +136,10 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
|
|||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
|
||||
gpr_log(GPR_INFO, "%s[%p]: Ping %" PRIx64 " sent [%s]: %s",
|
||||
t->is_client ? "CLIENT" : "SERVER", t, id,
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->ping_rate_policy.GetDebugString().c_str());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: Ping " << id << " sent ["
|
||||
<< std::string(t->peer_string.as_string_view())
|
||||
<< "]: " << t->ping_rate_policy.GetDebugString();
|
||||
}
|
||||
},
|
||||
[t](grpc_core::Chttp2PingRatePolicy::TooManyRecentPings) {
|
||||
|
@ -148,11 +148,11 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
|
|||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
|
||||
gpr_log(GPR_INFO,
|
||||
"%s[%p]: Ping delayed [%s]: too many recent pings: %s",
|
||||
t->is_client ? "CLIENT" : "SERVER", t,
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
t->ping_rate_policy.GetDebugString().c_str());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: Ping delayed ["
|
||||
<< std::string(t->peer_string.as_string_view())
|
||||
<< "]: too many recent pings: "
|
||||
<< t->ping_rate_policy.GetDebugString();
|
||||
}
|
||||
},
|
||||
[t](grpc_core::Chttp2PingRatePolicy::TooSoon too_soon) {
|
||||
|
@ -161,15 +161,14 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
|
|||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
|
||||
gpr_log(
|
||||
GPR_INFO,
|
||||
"%s[%p]: Ping delayed [%s]: not enough time elapsed since last "
|
||||
"ping. Last ping:%s, minimum wait:%s need to wait:%s",
|
||||
t->is_client ? "CLIENT" : "SERVER", t,
|
||||
std::string(t->peer_string.as_string_view()).c_str(),
|
||||
too_soon.last_ping.ToString().c_str(),
|
||||
too_soon.next_allowed_ping_interval.ToString().c_str(),
|
||||
too_soon.wait.ToString().c_str());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: Ping delayed ["
|
||||
<< std::string(t->peer_string.as_string_view())
|
||||
<< "]: not enough time elapsed since last "
|
||||
"ping. Last ping:"
|
||||
<< too_soon.last_ping
|
||||
<< ", minimum wait:" << too_soon.next_allowed_ping_interval
|
||||
<< ", need to wait:" << too_soon.wait;
|
||||
}
|
||||
if (t->delayed_ping_timer_handle ==
|
||||
grpc_event_engine::experimental::EventEngine::TaskHandle::
|
||||
|
@ -207,22 +206,22 @@ static bool update_list(grpc_chttp2_transport* t, int64_t send_bytes,
|
|||
static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
|
||||
const char* staller) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(flowctl)) {
|
||||
gpr_log(
|
||||
GPR_DEBUG,
|
||||
"%s:%p stream %d moved to stalled list by %s. This is FULLY expected "
|
||||
"to happen in a healthy program that is not seeing flow control stalls."
|
||||
" However, if you know that there are unwanted stalls, here is some "
|
||||
"helpful data: [fc:pending=%" PRIdPTR ":flowed=%" PRId64
|
||||
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
|
||||
std::string(t->peer_string.as_string_view()).c_str(), t, s->id, staller,
|
||||
s->flow_controlled_buffer.length, s->flow_controlled_bytes_flowed,
|
||||
t->settings.acked().initial_window_size(),
|
||||
t->flow_control.remote_window(),
|
||||
static_cast<uint32_t>(std::max(
|
||||
int64_t{0}, s->flow_control.remote_window_delta() +
|
||||
static_cast<int64_t>(
|
||||
t->settings.peer().initial_window_size()))),
|
||||
s->flow_control.remote_window_delta());
|
||||
VLOG(2) << t->peer_string.as_string_view() << ":" << t << " stream "
|
||||
<< s->id << " moved to stalled list by " << staller
|
||||
<< ". This is FULLY expected to happen in a healthy program that "
|
||||
"is not seeing flow control stalls. However, if you know that "
|
||||
"there are unwanted stalls, here is some helpful data: "
|
||||
"[fc:pending="
|
||||
<< s->flow_controlled_buffer.length
|
||||
<< ":flowed=" << s->flow_controlled_bytes_flowed
|
||||
<< ":peer_initwin=" << t->settings.acked().initial_window_size()
|
||||
<< ":t_win=" << t->flow_control.remote_window() << ":s_win="
|
||||
<< static_cast<uint32_t>(
|
||||
std::max(int64_t{0},
|
||||
s->flow_control.remote_window_delta() +
|
||||
static_cast<int64_t>(
|
||||
t->settings.peer().initial_window_size())))
|
||||
<< ":s_delta=" << s->flow_control.remote_window_delta() << "]";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -297,10 +296,9 @@ class WriteContext {
|
|||
uint32_t transport_announce = t_->flow_control.MaybeSendUpdate(
|
||||
t_->outbuf.c_slice_buffer()->count > 0);
|
||||
if (transport_announce) {
|
||||
grpc_transport_one_way_stats throwaway_stats;
|
||||
grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
|
||||
grpc_chttp2_window_update_create(
|
||||
0, transport_announce, &throwaway_stats));
|
||||
grpc_slice_buffer_add(
|
||||
t_->outbuf.c_slice_buffer(),
|
||||
grpc_chttp2_window_update_create(0, transport_announce, nullptr));
|
||||
grpc_chttp2_reset_ping_clock(t_);
|
||||
}
|
||||
}
|
||||
|
@ -412,7 +410,7 @@ class DataSendContext {
|
|||
s_->send_trailing_metadata != nullptr &&
|
||||
s_->send_trailing_metadata->empty();
|
||||
grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, send_bytes,
|
||||
is_last_frame_, &s_->stats.outgoing,
|
||||
is_last_frame_, &s_->call_tracer_wrapper,
|
||||
t_->outbuf.c_slice_buffer());
|
||||
sfc_upd_.SentData(send_bytes);
|
||||
s_->sending_bytes += send_bytes;
|
||||
|
@ -443,10 +441,10 @@ class StreamWriteContext {
|
|||
public:
|
||||
StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
|
||||
: write_context_(write_context), t_(write_context->transport()), s_(s) {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d)", t_,
|
||||
t_->is_client ? "CLIENT" : "SERVER", s->id,
|
||||
s->sent_initial_metadata, s->send_initial_metadata != nullptr));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO)
|
||||
<< "W:" << t_ << " " << (t_->is_client ? "CLIENT" : "SERVER") << "["
|
||||
<< s->id << "] im-(sent,send)=(" << s->sent_initial_metadata << ","
|
||||
<< (s->send_initial_metadata != nullptr) << ")";
|
||||
}
|
||||
|
||||
void FlushInitialMetadata() {
|
||||
|
@ -471,8 +469,7 @@ class StreamWriteContext {
|
|||
t_->settings.peer()
|
||||
.allow_true_binary_metadata(), // use_true_binary_metadata
|
||||
t_->settings.peer().max_frame_size(), // max_frame_size
|
||||
&s_->stats.outgoing // stats
|
||||
},
|
||||
&s_->call_tracer_wrapper},
|
||||
*s_->send_initial_metadata, t_->outbuf.c_slice_buffer());
|
||||
grpc_chttp2_reset_ping_clock(t_);
|
||||
write_context_->IncInitialMetadataWrites();
|
||||
|
@ -484,16 +481,32 @@ class StreamWriteContext {
|
|||
grpc_chttp2_complete_closure_step(t_, &s_->send_initial_metadata_finished,
|
||||
absl::OkStatus(),
|
||||
"send_initial_metadata_finished");
|
||||
if (s_->call_tracer) {
|
||||
grpc_core::HttpAnnotation::WriteStats write_stats;
|
||||
write_stats.target_write_size = write_context_->target_write_size();
|
||||
s_->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(
|
||||
grpc_core::HttpAnnotation::Type::kHeadWritten,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats())
|
||||
.Add(write_stats));
|
||||
if (!grpc_core::IsCallTracerInTransportEnabled()) {
|
||||
if (s_->call_tracer) {
|
||||
grpc_core::HttpAnnotation::WriteStats write_stats;
|
||||
write_stats.target_write_size = write_context_->target_write_size();
|
||||
s_->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(
|
||||
grpc_core::HttpAnnotation::Type::kHeadWritten,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats())
|
||||
.Add(write_stats));
|
||||
}
|
||||
} else if (grpc_core::IsTraceRecordCallopsEnabled()) {
|
||||
auto* call_tracer =
|
||||
s_->arena->GetContext<grpc_core::CallTracerInterface>();
|
||||
if (call_tracer != nullptr && call_tracer->IsSampled()) {
|
||||
grpc_core::HttpAnnotation::WriteStats write_stats;
|
||||
write_stats.target_write_size = write_context_->target_write_size();
|
||||
call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(
|
||||
grpc_core::HttpAnnotation::Type::kHeadWritten,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats())
|
||||
.Add(write_stats));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,9 +517,10 @@ class StreamWriteContext {
|
|||
const uint32_t stream_announce = s_->flow_control.MaybeSendUpdate();
|
||||
if (stream_announce == 0) return;
|
||||
|
||||
grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
|
||||
grpc_chttp2_window_update_create(
|
||||
s_->id, stream_announce, &s_->stats.outgoing));
|
||||
grpc_slice_buffer_add(
|
||||
t_->outbuf.c_slice_buffer(),
|
||||
grpc_chttp2_window_update_create(s_->id, stream_announce,
|
||||
&s_->call_tracer_wrapper));
|
||||
grpc_chttp2_reset_ping_clock(t_);
|
||||
write_context_->IncWindowUpdateWrites();
|
||||
}
|
||||
|
@ -556,15 +570,16 @@ class StreamWriteContext {
|
|||
if (s_->send_trailing_metadata == nullptr) return;
|
||||
if (s_->flow_controlled_buffer.length != 0) return;
|
||||
|
||||
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO) << "sending trailing_metadata";
|
||||
if (s_->send_trailing_metadata->empty()) {
|
||||
grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true,
|
||||
&s_->stats.outgoing, t_->outbuf.c_slice_buffer());
|
||||
&s_->call_tracer_wrapper,
|
||||
t_->outbuf.c_slice_buffer());
|
||||
} else {
|
||||
t_->hpack_compressor.EncodeHeaders(
|
||||
grpc_core::HPackCompressor::EncodeHeaderOptions{
|
||||
s_->id, true, t_->settings.peer().allow_true_binary_metadata(),
|
||||
t_->settings.peer().max_frame_size(), &s_->stats.outgoing},
|
||||
t_->settings.peer().max_frame_size(), &s_->call_tracer_wrapper},
|
||||
*s_->send_trailing_metadata, t_->outbuf.c_slice_buffer());
|
||||
}
|
||||
write_context_->IncTrailingMetadataWrites();
|
||||
|
@ -607,8 +622,8 @@ class StreamWriteContext {
|
|||
};
|
||||
|
||||
void ConvertInitialMetadataToTrailingMetadata() {
|
||||
GRPC_CHTTP2_IF_TRACING(
|
||||
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
|
||||
GRPC_CHTTP2_IF_TRACING(INFO)
|
||||
<< "not sending initial_metadata (Trailers-Only)";
|
||||
// When sending Trailers-Only, we need to move metadata from headers to
|
||||
// trailers.
|
||||
TrailersOnlyMetadataEncoder encoder(s_->send_trailing_metadata);
|
||||
|
@ -628,16 +643,28 @@ class StreamWriteContext {
|
|||
grpc_slice_buffer_add(
|
||||
t_->outbuf.c_slice_buffer(),
|
||||
grpc_chttp2_rst_stream_create(s_->id, GRPC_HTTP2_NO_ERROR,
|
||||
&s_->stats.outgoing));
|
||||
&s_->call_tracer_wrapper));
|
||||
}
|
||||
grpc_chttp2_mark_stream_closed(t_, s_, !t_->is_client, true,
|
||||
absl::OkStatus());
|
||||
if (s_->call_tracer) {
|
||||
s_->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats()));
|
||||
if (!grpc_core::IsCallTracerInTransportEnabled()) {
|
||||
if (s_->call_tracer) {
|
||||
s_->call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats()));
|
||||
}
|
||||
} else if (grpc_core::IsTraceRecordCallopsEnabled()) {
|
||||
auto* call_tracer =
|
||||
s_->arena->GetContext<grpc_core::CallTracerInterface>();
|
||||
if (call_tracer != nullptr && call_tracer->IsSampled()) {
|
||||
call_tracer->RecordAnnotation(
|
||||
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
|
||||
gpr_now(GPR_CLOCK_REALTIME))
|
||||
.Add(s_->t->flow_control.stats())
|
||||
.Add(s_->flow_control.stats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -676,7 +703,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
|
|||
num_stream_bytes = t->outbuf.c_slice_buffer()->length - orig_len;
|
||||
s->byte_counter += static_cast<size_t>(num_stream_bytes);
|
||||
++s->write_counter;
|
||||
if (s->traced && grpc_endpoint_can_track_err(t->ep)) {
|
||||
if (s->traced && grpc_endpoint_can_track_err(t->ep.get())) {
|
||||
grpc_core::CopyContextFn copy_context_fn =
|
||||
grpc_core::GrpcHttp2GetCopyContextFn();
|
||||
if (copy_context_fn != nullptr &&
|
||||
|
@ -728,10 +755,9 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
|
|||
grpc_chttp2_ping_timeout(t);
|
||||
});
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) && id.has_value()) {
|
||||
gpr_log(GPR_INFO,
|
||||
"%s[%p]: Set ping timeout timer of %s for ping id %" PRIx64,
|
||||
t->is_client ? "CLIENT" : "SERVER", t, timeout.ToString().c_str(),
|
||||
id.value());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: Set ping timeout timer of " << timeout.ToString()
|
||||
<< " for ping id " << id.value();
|
||||
}
|
||||
|
||||
if (t->keepalive_incoming_data_wanted &&
|
||||
|
@ -741,9 +767,9 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
|
|||
kInvalid) {
|
||||
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
|
||||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
|
||||
gpr_log(GPR_INFO, "%s[%p]: Set keepalive ping timeout timer of %s",
|
||||
t->is_client ? "CLIENT" : "SERVER", t,
|
||||
t->keepalive_timeout.ToString().c_str());
|
||||
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t
|
||||
<< "]: Set keepalive ping timeout timer of "
|
||||
<< t->keepalive_timeout.ToString();
|
||||
}
|
||||
t->keepalive_ping_timeout_handle =
|
||||
t->event_engine->RunAfter(t->keepalive_timeout, [t = t->Ref()] {
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
|
||||
#include "src/core/ext/transport/cronet/client/secure/cronet_channel_create.h"
|
||||
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/statusor.h"
|
||||
|
||||
#include <grpc/impl/channel_arg_names.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/cronet/transport/cronet_transport.h"
|
||||
|
@ -38,9 +38,8 @@
|
|||
GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
|
||||
void* engine, const char* target, const grpc_channel_args* args,
|
||||
void* reserved) {
|
||||
gpr_log(GPR_DEBUG,
|
||||
"grpc_create_cronet_transport: stream_engine = %p, target=%s", engine,
|
||||
target);
|
||||
VLOG(2) << "grpc_create_cronet_transport: stream_engine = " << engine
|
||||
<< ", target=" << target;
|
||||
|
||||
// Disable client authority filter when using Cronet
|
||||
auto channel_args = grpc_core::CoreConfiguration::Get()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,21 +15,26 @@
|
|||
#include "src/core/ext/transport/inproc/inproc_transport.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
|
||||
#include "absl/log/check.h"
|
||||
#include "absl/log/log.h"
|
||||
#include "absl/status/status.h"
|
||||
|
||||
#include <grpc/grpc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
|
||||
#include "src/core/ext/transport/inproc/legacy_inproc_transport.h"
|
||||
#include "src/core/lib/config/core_configuration.h"
|
||||
#include "src/core/lib/event_engine/event_engine_context.h"
|
||||
#include "src/core/lib/experiments/experiments.h"
|
||||
#include "src/core/lib/gprpp/crash.h"
|
||||
#include "src/core/lib/gprpp/debug_location.h"
|
||||
#include "src/core/lib/promise/promise.h"
|
||||
#include "src/core/lib/promise/try_seq.h"
|
||||
#include "src/core/lib/resource_quota/resource_quota.h"
|
||||
#include "src/core/lib/surface/channel_create.h"
|
||||
#include "src/core/lib/transport/metadata.h"
|
||||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/server/server.h"
|
||||
|
||||
|
@ -56,12 +61,14 @@ class InprocServerTransport final : public ServerTransport {
|
|||
state_.compare_exchange_strong(expect, ConnectionState::kReady,
|
||||
std::memory_order_acq_rel,
|
||||
std::memory_order_acquire);
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.SetState(GRPC_CHANNEL_READY, absl::OkStatus(),
|
||||
"accept function set");
|
||||
connected_state()->SetReady();
|
||||
}
|
||||
|
||||
void Orphan() override { Unref(); }
|
||||
void Orphan() override {
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "InprocServerTransport::Orphan(): " << this;
|
||||
Disconnect(absl::UnavailableError("Server transport closed"));
|
||||
Unref();
|
||||
}
|
||||
|
||||
FilterStackTransport* filter_stack_transport() override { return nullptr; }
|
||||
ClientTransport* client_transport() override { return nullptr; }
|
||||
|
@ -70,29 +77,30 @@ class InprocServerTransport final : public ServerTransport {
|
|||
void SetPollset(grpc_stream*, grpc_pollset*) override {}
|
||||
void SetPollsetSet(grpc_stream*, grpc_pollset_set*) override {}
|
||||
void PerformOp(grpc_transport_op* op) override {
|
||||
gpr_log(GPR_INFO, "inproc server op: %s",
|
||||
grpc_transport_op_string(op).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "inproc server op: " << grpc_transport_op_string(op);
|
||||
if (op->start_connectivity_watch != nullptr) {
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.AddWatcher(op->start_connectivity_watch_state,
|
||||
std::move(op->start_connectivity_watch));
|
||||
connected_state()->AddWatcher(op->start_connectivity_watch_state,
|
||||
std::move(op->start_connectivity_watch));
|
||||
}
|
||||
if (op->stop_connectivity_watch != nullptr) {
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.RemoveWatcher(op->stop_connectivity_watch);
|
||||
connected_state()->RemoveWatcher(op->stop_connectivity_watch);
|
||||
}
|
||||
if (op->set_accept_stream) {
|
||||
Crash("set_accept_stream not supported on inproc transport");
|
||||
}
|
||||
ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, absl::OkStatus());
|
||||
}
|
||||
|
||||
void Disconnect(absl::Status error) {
|
||||
if (disconnecting_.exchange(true, std::memory_order_relaxed)) return;
|
||||
disconnect_error_ = std::move(error);
|
||||
RefCountedPtr<ConnectedState> connected_state;
|
||||
{
|
||||
MutexLock lock(&connected_state_mu_);
|
||||
connected_state = std::move(connected_state_);
|
||||
}
|
||||
if (connected_state == nullptr) return;
|
||||
connected_state->Disconnect(std::move(error));
|
||||
state_.store(ConnectionState::kDisconnected, std::memory_order_relaxed);
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.SetState(GRPC_CHANNEL_SHUTDOWN, disconnect_error_,
|
||||
"inproc transport disconnected");
|
||||
}
|
||||
|
||||
absl::StatusOr<CallInitiator> AcceptCall(ClientMetadataHandle md) {
|
||||
|
@ -105,24 +113,64 @@ class InprocServerTransport final : public ServerTransport {
|
|||
case ConnectionState::kReady:
|
||||
break;
|
||||
}
|
||||
auto server_call = MakeCallPair(std::move(md), event_engine_.get(),
|
||||
call_arena_allocator_->MakeArena());
|
||||
auto arena = call_arena_allocator_->MakeArena();
|
||||
arena->SetContext<grpc_event_engine::experimental::EventEngine>(
|
||||
event_engine_.get());
|
||||
auto server_call = MakeCallPair(std::move(md), std::move(arena));
|
||||
unstarted_call_handler_->StartCall(std::move(server_call.handler));
|
||||
return std::move(server_call.initiator);
|
||||
}
|
||||
|
||||
OrphanablePtr<InprocClientTransport> MakeClientTransport();
|
||||
|
||||
class ConnectedState : public RefCounted<ConnectedState> {
|
||||
public:
|
||||
~ConnectedState() override {
|
||||
state_tracker_.SetState(GRPC_CHANNEL_SHUTDOWN, disconnect_error_,
|
||||
"inproc transport disconnected");
|
||||
}
|
||||
|
||||
void SetReady() {
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.SetState(GRPC_CHANNEL_READY, absl::OkStatus(),
|
||||
"accept function set");
|
||||
}
|
||||
|
||||
void Disconnect(absl::Status error) {
|
||||
disconnect_error_ = std::move(error);
|
||||
}
|
||||
|
||||
void AddWatcher(grpc_connectivity_state initial_state,
|
||||
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.AddWatcher(initial_state, std::move(watcher));
|
||||
}
|
||||
|
||||
void RemoveWatcher(ConnectivityStateWatcherInterface* watcher) {
|
||||
MutexLock lock(&state_tracker_mu_);
|
||||
state_tracker_.RemoveWatcher(watcher);
|
||||
}
|
||||
|
||||
private:
|
||||
absl::Status disconnect_error_;
|
||||
Mutex state_tracker_mu_;
|
||||
ConnectivityStateTracker state_tracker_ ABSL_GUARDED_BY(state_tracker_mu_){
|
||||
"inproc_server_transport", GRPC_CHANNEL_CONNECTING};
|
||||
};
|
||||
|
||||
RefCountedPtr<ConnectedState> connected_state() {
|
||||
MutexLock lock(&connected_state_mu_);
|
||||
return connected_state_;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class ConnectionState : uint8_t { kInitial, kReady, kDisconnected };
|
||||
|
||||
std::atomic<ConnectionState> state_{ConnectionState::kInitial};
|
||||
std::atomic<bool> disconnecting_{false};
|
||||
RefCountedPtr<UnstartedCallDestination> unstarted_call_handler_;
|
||||
absl::Status disconnect_error_;
|
||||
Mutex state_tracker_mu_;
|
||||
ConnectivityStateTracker state_tracker_ ABSL_GUARDED_BY(state_tracker_mu_){
|
||||
"inproc_server_transport", GRPC_CHANNEL_CONNECTING};
|
||||
Mutex connected_state_mu_;
|
||||
RefCountedPtr<ConnectedState> connected_state_
|
||||
ABSL_GUARDED_BY(connected_state_mu_) = MakeRefCounted<ConnectedState>();
|
||||
const std::shared_ptr<grpc_event_engine::experimental::EventEngine>
|
||||
event_engine_;
|
||||
const RefCountedPtr<CallArenaAllocator> call_arena_allocator_;
|
||||
|
@ -139,19 +187,27 @@ class InprocClientTransport final : public ClientTransport {
|
|||
"pull_initial_metadata",
|
||||
TrySeq(child_call_handler.PullClientInitialMetadata(),
|
||||
[server_transport = server_transport_,
|
||||
child_call_handler](ClientMetadataHandle md) {
|
||||
connected_state = server_transport_->connected_state(),
|
||||
child_call_handler](ClientMetadataHandle md) mutable {
|
||||
auto server_call_initiator =
|
||||
server_transport->AcceptCall(std::move(md));
|
||||
if (!server_call_initiator.ok()) {
|
||||
return server_call_initiator.status();
|
||||
}
|
||||
ForwardCall(child_call_handler,
|
||||
std::move(*server_call_initiator));
|
||||
ForwardCall(
|
||||
child_call_handler, std::move(*server_call_initiator),
|
||||
[connected_state =
|
||||
std::move(connected_state)](ServerMetadata& md) {
|
||||
md.Set(GrpcStatusFromWire(), true);
|
||||
});
|
||||
return absl::OkStatus();
|
||||
}));
|
||||
}
|
||||
|
||||
void Orphan() override { delete this; }
|
||||
void Orphan() override {
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "InprocClientTransport::Orphan(): " << this;
|
||||
Unref();
|
||||
}
|
||||
|
||||
FilterStackTransport* filter_stack_transport() override { return nullptr; }
|
||||
ClientTransport* client_transport() override { return this; }
|
||||
|
@ -170,8 +226,10 @@ class InprocClientTransport final : public ClientTransport {
|
|||
const RefCountedPtr<InprocServerTransport> server_transport_;
|
||||
};
|
||||
|
||||
bool UsePromiseBasedTransport() {
|
||||
return IsPromiseBasedInprocTransportEnabled();
|
||||
bool UsePromiseBasedTransport(const ChannelArgs& channel_args) {
|
||||
return channel_args
|
||||
.GetBool("grpc.experimental.promise_based_inproc_transport")
|
||||
.value_or(IsPromiseBasedInprocTransportEnabled());
|
||||
}
|
||||
|
||||
OrphanablePtr<InprocClientTransport>
|
||||
|
@ -182,8 +240,7 @@ InprocServerTransport::MakeClientTransport() {
|
|||
|
||||
RefCountedPtr<Channel> MakeLameChannel(absl::string_view why,
|
||||
absl::Status error) {
|
||||
gpr_log(GPR_ERROR, "%s: %s", std::string(why).c_str(),
|
||||
std::string(error.message()).c_str());
|
||||
LOG(ERROR) << why << ": " << error.message();
|
||||
intptr_t integer;
|
||||
grpc_status_code status = GRPC_STATUS_INTERNAL;
|
||||
if (grpc_error_get_int(error, StatusIntProperty::kRpcStatus, &integer)) {
|
||||
|
@ -210,7 +267,8 @@ RefCountedPtr<Channel> MakeInprocChannel(Server* server,
|
|||
std::ignore = server_transport.release(); // consumed by SetupTransport
|
||||
auto channel = ChannelCreate(
|
||||
"inproc",
|
||||
client_channel_args.Set(GRPC_ARG_DEFAULT_AUTHORITY, "inproc.authority"),
|
||||
client_channel_args.Set(GRPC_ARG_DEFAULT_AUTHORITY, "inproc.authority")
|
||||
.Set(GRPC_ARG_USE_V3_STACK, true),
|
||||
GRPC_CLIENT_DIRECT_CHANNEL, client_transport.release());
|
||||
if (!channel.ok()) {
|
||||
return MakeLameChannel("Failed to create client channel", channel.status());
|
||||
|
@ -235,13 +293,14 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
|
|||
void* reserved) {
|
||||
grpc_core::ApplicationCallbackExecCtx app_exec_ctx;
|
||||
grpc_core::ExecCtx exec_ctx;
|
||||
if (!grpc_core::UsePromiseBasedTransport()) {
|
||||
const auto channel_args = grpc_core::CoreConfiguration::Get()
|
||||
.channel_args_preconditioning()
|
||||
.PreconditionChannelArgs(args);
|
||||
if (!grpc_core::UsePromiseBasedTransport(channel_args)) {
|
||||
return grpc_legacy_inproc_channel_create(server, args, reserved);
|
||||
}
|
||||
return grpc_core::MakeInprocChannel(grpc_core::Server::FromC(server),
|
||||
grpc_core::CoreConfiguration::Get()
|
||||
.channel_args_preconditioning()
|
||||
.PreconditionChannelArgs(args))
|
||||
channel_args)
|
||||
.release()
|
||||
->c_ptr();
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include <grpc/impl/connectivity_state.h>
|
||||
#include <grpc/status.h>
|
||||
#include <grpc/support/alloc.h>
|
||||
#include <grpc/support/log.h>
|
||||
#include <grpc/support/port_platform.h>
|
||||
#include <grpc/support/sync.h>
|
||||
|
||||
|
@ -60,7 +59,6 @@
|
|||
#include "src/core/lib/resource_quota/arena.h"
|
||||
#include "src/core/lib/slice/slice.h"
|
||||
#include "src/core/lib/slice/slice_buffer.h"
|
||||
#include "src/core/lib/surface/api_trace.h"
|
||||
#include "src/core/lib/surface/channel.h"
|
||||
#include "src/core/lib/surface/channel_create.h"
|
||||
#include "src/core/lib/surface/channel_stack_type.h"
|
||||
|
@ -69,13 +67,6 @@
|
|||
#include "src/core/lib/transport/transport.h"
|
||||
#include "src/core/server/server.h"
|
||||
|
||||
#define INPROC_LOG(...) \
|
||||
do { \
|
||||
if (GRPC_TRACE_FLAG_ENABLED(inproc)) { \
|
||||
gpr_log(__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace {
|
||||
struct inproc_stream;
|
||||
bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error);
|
||||
|
@ -149,16 +140,16 @@ struct inproc_transport final : public grpc_core::FilterStackTransport {
|
|||
void Orphan() override;
|
||||
|
||||
void ref() {
|
||||
INPROC_LOG(GPR_INFO, "ref_transport %p", this);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "ref_transport " << this;
|
||||
gpr_ref(&refs);
|
||||
}
|
||||
|
||||
void unref() {
|
||||
INPROC_LOG(GPR_INFO, "unref_transport %p", this);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "unref_transport " << this;
|
||||
if (!gpr_unref(&refs)) {
|
||||
return;
|
||||
}
|
||||
INPROC_LOG(GPR_INFO, "really_destroy_transport %p", this);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "really_destroy_transport " << this;
|
||||
this->~inproc_transport();
|
||||
gpr_free(this);
|
||||
}
|
||||
|
@ -202,8 +193,9 @@ struct inproc_stream {
|
|||
// Pass the client-side stream address to the server-side for a ref
|
||||
ref("inproc_init_stream:clt"); // ref it now on behalf of server
|
||||
// side to avoid destruction
|
||||
INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p",
|
||||
st->accept_stream_cb, st->accept_stream_data);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "calling accept stream cb " << st->accept_stream_cb << " "
|
||||
<< st->accept_stream_data;
|
||||
(*st->accept_stream_cb)(st->accept_stream_data, t, this);
|
||||
} else {
|
||||
// This is the server-side and is being called through accept_stream_cb
|
||||
|
@ -252,12 +244,12 @@ struct inproc_stream {
|
|||
#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
|
||||
#endif
|
||||
void ref(const char* reason) {
|
||||
INPROC_LOG(GPR_INFO, "ref_stream %p %s", this, reason);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "ref_stream " << this << " " << reason;
|
||||
STREAM_REF(refs, reason);
|
||||
}
|
||||
|
||||
void unref(const char* reason) {
|
||||
INPROC_LOG(GPR_INFO, "unref_stream %p %s", this, reason);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "unref_stream " << this << " " << reason;
|
||||
STREAM_UNREF(refs, reason);
|
||||
}
|
||||
#undef STREAM_REF
|
||||
|
@ -373,7 +365,8 @@ void inproc_transport::InitStream(grpc_stream* gs,
|
|||
grpc_stream_refcount* refcount,
|
||||
const void* server_data,
|
||||
grpc_core::Arena* arena) {
|
||||
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", this, gs, server_data);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "init_stream " << this << " " << gs << " " << server_data;
|
||||
new (gs) inproc_stream(this, refcount, server_data, arena);
|
||||
}
|
||||
|
||||
|
@ -435,8 +428,9 @@ void complete_if_batch_end_locked(inproc_stream* s, grpc_error_handle error,
|
|||
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
|
||||
|
||||
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
|
||||
INPROC_LOG(GPR_INFO, "%s %p %p %p %s", msg, s, op, op->on_complete,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< msg << " " << s << " " << op << " " << op->on_complete << " "
|
||||
<< grpc_core::StatusToString(error);
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete, error);
|
||||
}
|
||||
}
|
||||
|
@ -449,7 +443,7 @@ void maybe_process_ops_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
}
|
||||
|
||||
void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
|
||||
INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "op_state_machine " << s << " fail_helper";
|
||||
// If we're failing this side, we need to make sure that
|
||||
// we also send or have already sent trailing metadata
|
||||
if (!s->trailing_md_sent) {
|
||||
|
@ -501,10 +495,10 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
*s->recv_initial_md_op->payload->recv_initial_metadata
|
||||
.trailing_metadata_available = true;
|
||||
}
|
||||
INPROC_LOG(GPR_INFO,
|
||||
"fail_helper %p scheduling initial-metadata-ready %s %s", s,
|
||||
grpc_core::StatusToString(error).c_str(),
|
||||
grpc_core::StatusToString(err).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "fail_helper " << s << " scheduling initial-metadata-ready "
|
||||
<< grpc_core::StatusToString(error) << " "
|
||||
<< grpc_core::StatusToString(err);
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
s->recv_initial_md_op->payload->recv_initial_metadata
|
||||
|
@ -518,8 +512,9 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
s->recv_initial_md_op = nullptr;
|
||||
}
|
||||
if (s->recv_message_op) {
|
||||
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %s", s,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "fail_helper " << s << " scheduling message-ready "
|
||||
<< grpc_core::StatusToString(error);
|
||||
if (s->recv_message_op->payload->recv_message
|
||||
.call_failed_before_recv_message != nullptr) {
|
||||
*s->recv_message_op->payload->recv_message
|
||||
|
@ -547,15 +542,17 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
s->send_trailing_md_op = nullptr;
|
||||
}
|
||||
if (s->recv_trailing_md_op) {
|
||||
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-metadata-ready %s",
|
||||
s, grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "fail_helper " << s << " scheduling trailing-metadata-ready "
|
||||
<< grpc_core::StatusToString(error);
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
s->recv_trailing_md_op->payload->recv_trailing_metadata
|
||||
.recv_trailing_metadata_ready,
|
||||
error);
|
||||
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %s",
|
||||
s, grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "fail_helper " << s << " scheduling trailing-md-on-complete "
|
||||
<< grpc_core::StatusToString(error);
|
||||
complete_if_batch_end_locked(
|
||||
s, error, s->recv_trailing_md_op,
|
||||
"fail_helper scheduling recv-trailing-metadata-on-complete");
|
||||
|
@ -579,8 +576,8 @@ void message_transfer_locked(inproc_stream* sender, inproc_stream* receiver) {
|
|||
*receiver->recv_message_op->payload->recv_message.flags =
|
||||
sender->send_message_op->payload->send_message.flags;
|
||||
|
||||
INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready",
|
||||
receiver);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "message_transfer_locked " << receiver << " scheduling message-ready";
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
receiver->recv_message_op->payload->recv_message.recv_message_ready,
|
||||
|
@ -606,7 +603,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
|
||||
bool needs_close = false;
|
||||
|
||||
INPROC_LOG(GPR_INFO, "op_state_machine %p", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "op_state_machine " << s;
|
||||
// cancellation takes precedence
|
||||
inproc_stream* other = s->other_side;
|
||||
|
||||
|
@ -653,7 +650,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
: &other->to_read_trailing_md_filled;
|
||||
if (*destfilled || s->trailing_md_sent) {
|
||||
// The buffer is already in use; that's an error!
|
||||
INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "Extra trailing metadata " << s;
|
||||
new_err = GRPC_ERROR_CREATE("Extra trailing metadata");
|
||||
fail_helper_locked(s, new_err);
|
||||
goto done;
|
||||
|
@ -669,15 +666,15 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
*s->send_trailing_md_op->payload->send_trailing_metadata.sent = true;
|
||||
}
|
||||
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
|
||||
INPROC_LOG(GPR_INFO,
|
||||
"op_state_machine %p scheduling trailing-metadata-ready", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "op_state_machine " << s
|
||||
<< " scheduling trailing-metadata-ready";
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
s->recv_trailing_md_op->payload->recv_trailing_metadata
|
||||
.recv_trailing_metadata_ready,
|
||||
absl::OkStatus());
|
||||
INPROC_LOG(GPR_INFO,
|
||||
"op_state_machine %p scheduling trailing-md-on-complete", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "op_state_machine " << s
|
||||
<< " scheduling trailing-md-on-complete";
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
|
||||
s->recv_trailing_md_op->on_complete,
|
||||
absl::OkStatus());
|
||||
|
@ -694,11 +691,11 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
if (s->recv_initial_md_op) {
|
||||
if (s->initial_md_recvd) {
|
||||
new_err = GRPC_ERROR_CREATE("Already recvd initial md");
|
||||
INPROC_LOG(
|
||||
GPR_INFO,
|
||||
"op_state_machine %p scheduling on_complete errors for already "
|
||||
"recvd initial md %s",
|
||||
s, grpc_core::StatusToString(new_err).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s
|
||||
<< " scheduling on_complete errors for already "
|
||||
"recvd initial md "
|
||||
<< grpc_core::StatusToString(new_err);
|
||||
fail_helper_locked(s, new_err);
|
||||
goto done;
|
||||
}
|
||||
|
@ -749,20 +746,20 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
if (s->to_read_trailing_md_filled) {
|
||||
if (s->trailing_md_recvd) {
|
||||
if (s->trailing_md_recvd_implicit_only) {
|
||||
INPROC_LOG(GPR_INFO,
|
||||
"op_state_machine %p already implicitly received trailing "
|
||||
"metadata, so ignoring new trailing metadata from client",
|
||||
s);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s
|
||||
<< " already implicitly received trailing metadata, so "
|
||||
"ignoring new trailing metadata from client";
|
||||
s->to_read_trailing_md.Clear();
|
||||
s->to_read_trailing_md_filled = false;
|
||||
s->trailing_md_recvd_implicit_only = false;
|
||||
} else {
|
||||
new_err = GRPC_ERROR_CREATE("Already recvd trailing md");
|
||||
INPROC_LOG(
|
||||
GPR_INFO,
|
||||
"op_state_machine %p scheduling on_complete errors for already "
|
||||
"recvd trailing md %s",
|
||||
s, grpc_core::StatusToString(new_err).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s
|
||||
<< " scheduling on_complete errors for already recvd trailing "
|
||||
"md "
|
||||
<< grpc_core::StatusToString(new_err);
|
||||
fail_helper_locked(s, new_err);
|
||||
goto done;
|
||||
}
|
||||
|
@ -771,7 +768,8 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
// This message needs to be wrapped up because it will never be
|
||||
// satisfied
|
||||
s->recv_message_op->payload->recv_message.recv_message->reset();
|
||||
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s << " scheduling message-ready";
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
s->recv_message_op->payload->recv_message.recv_message_ready,
|
||||
|
@ -822,9 +820,9 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
needs_close = s->trailing_md_sent;
|
||||
}
|
||||
} else if (!s->trailing_md_recvd) {
|
||||
INPROC_LOG(
|
||||
GPR_INFO,
|
||||
"op_state_machine %p has trailing md but not yet waiting for it", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s
|
||||
<< " has trailing md but not yet waiting for it";
|
||||
}
|
||||
}
|
||||
if (!s->t->is_client && s->trailing_md_sent &&
|
||||
|
@ -832,8 +830,9 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
// In this case, we don't care to receive the write-close from the client
|
||||
// because we have already sent status and the RPC is over as far as we
|
||||
// are concerned.
|
||||
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling trailing-md-ready %s",
|
||||
s, grpc_core::StatusToString(new_err).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s << " scheduling trailing-md-ready "
|
||||
<< grpc_core::StatusToString(new_err);
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
s->recv_trailing_md_op->payload->recv_trailing_metadata
|
||||
|
@ -851,7 +850,8 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
if (s->trailing_md_recvd && s->recv_message_op) {
|
||||
// No further message will come on this stream, so finish off the
|
||||
// recv_message_op
|
||||
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s << " scheduling message-ready";
|
||||
s->recv_message_op->payload->recv_message.recv_message->reset();
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
|
@ -873,12 +873,12 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
}
|
||||
if (s->send_message_op || s->send_trailing_md_op || s->recv_initial_md_op ||
|
||||
s->recv_message_op || s->recv_trailing_md_op) {
|
||||
// Didn't get the item we wanted so we still need to get
|
||||
// rescheduled
|
||||
INPROC_LOG(
|
||||
GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s,
|
||||
s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
|
||||
s->recv_message_op, s->recv_trailing_md_op);
|
||||
// Didn't get the item we wanted so we still need to get rescheduled
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "op_state_machine " << s << " still needs closure "
|
||||
<< s->send_message_op << " " << s->send_trailing_md_op << " "
|
||||
<< s->recv_initial_md_op << " " << s->recv_message_op << " "
|
||||
<< s->recv_trailing_md_op;
|
||||
s->ops_needed = true;
|
||||
}
|
||||
done:
|
||||
|
@ -890,8 +890,8 @@ done:
|
|||
|
||||
bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
|
||||
bool ret = false; // was the cancel accepted
|
||||
INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "cancel_stream " << s << " with " << grpc_core::StatusToString(error);
|
||||
if (s->cancel_self_error.ok()) {
|
||||
ret = true;
|
||||
s->cancel_self_error = error;
|
||||
|
@ -944,7 +944,8 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
|
|||
|
||||
void inproc_transport::PerformStreamOp(grpc_stream* gs,
|
||||
grpc_transport_stream_op_batch* op) {
|
||||
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", this, gs, op);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "perform_stream_op " << this << " " << gs << " " << op;
|
||||
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
|
||||
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
|
||||
gpr_mu_lock(mu);
|
||||
|
@ -980,14 +981,15 @@ void inproc_transport::PerformStreamOp(grpc_stream* gs,
|
|||
// already self-canceled so still give it an error
|
||||
error = s->cancel_self_error;
|
||||
} else {
|
||||
INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
|
||||
s->t->is_client ? "client" : "server",
|
||||
op->send_initial_metadata ? " send_initial_metadata" : "",
|
||||
op->send_message ? " send_message" : "",
|
||||
op->send_trailing_metadata ? " send_trailing_metadata" : "",
|
||||
op->recv_initial_metadata ? " recv_initial_metadata" : "",
|
||||
op->recv_message ? " recv_message" : "",
|
||||
op->recv_trailing_metadata ? " recv_trailing_metadata" : "");
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "perform_stream_op " << s
|
||||
<< (s->t->is_client ? " client" : " server")
|
||||
<< (op->send_initial_metadata ? " send_initial_metadata" : "")
|
||||
<< (op->send_message ? " send_message" : "")
|
||||
<< (op->send_trailing_metadata ? " send_trailing_metadata" : "")
|
||||
<< (op->recv_initial_metadata ? " recv_initial_metadata" : "")
|
||||
<< (op->recv_message ? " recv_message" : "")
|
||||
<< (op->recv_trailing_metadata ? " recv_trailing_metadata" : "");
|
||||
}
|
||||
|
||||
inproc_stream* other = s->other_side;
|
||||
|
@ -1003,7 +1005,7 @@ void inproc_transport::PerformStreamOp(grpc_stream* gs,
|
|||
: &other->to_read_initial_md_filled;
|
||||
if (*destfilled || s->initial_md_sent) {
|
||||
// The buffer is already in use; that's an error!
|
||||
INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "Extra initial metadata " << s;
|
||||
error = GRPC_ERROR_CREATE("Extra initial metadata");
|
||||
} else {
|
||||
if (!s->other_side_closed) {
|
||||
|
@ -1081,20 +1083,18 @@ void inproc_transport::PerformStreamOp(grpc_stream* gs,
|
|||
*op->payload->recv_initial_metadata.trailing_metadata_available =
|
||||
true;
|
||||
}
|
||||
INPROC_LOG(
|
||||
GPR_INFO,
|
||||
"perform_stream_op error %p scheduling initial-metadata-ready %s",
|
||||
s, grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "perform_stream_op error " << s
|
||||
<< " scheduling initial-metadata-ready "
|
||||
<< grpc_core::StatusToString(error);
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
op->payload->recv_initial_metadata.recv_initial_metadata_ready,
|
||||
error);
|
||||
}
|
||||
if (op->recv_message) {
|
||||
INPROC_LOG(
|
||||
GPR_INFO,
|
||||
"perform_stream_op error %p scheduling recv message-ready %s", s,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "perform_stream_op error " << s
|
||||
<< " scheduling recv message-ready "
|
||||
<< grpc_core::StatusToString(error);
|
||||
if (op->payload->recv_message.call_failed_before_recv_message !=
|
||||
nullptr) {
|
||||
*op->payload->recv_message.call_failed_before_recv_message = true;
|
||||
|
@ -1104,25 +1104,27 @@ void inproc_transport::PerformStreamOp(grpc_stream* gs,
|
|||
error);
|
||||
}
|
||||
if (op->recv_trailing_metadata) {
|
||||
INPROC_LOG(GPR_INFO,
|
||||
"perform_stream_op error %p scheduling "
|
||||
"trailing-metadata-ready %s",
|
||||
s, grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "perform_stream_op error " << s
|
||||
<< " scheduling trailing-metadata-ready "
|
||||
<< grpc_core::StatusToString(error);
|
||||
grpc_core::ExecCtx::Run(
|
||||
DEBUG_LOCATION,
|
||||
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready,
|
||||
error);
|
||||
}
|
||||
}
|
||||
INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %s", s,
|
||||
grpc_core::StatusToString(error).c_str());
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "perform_stream_op " << s << " scheduling on_complete "
|
||||
<< grpc_core::StatusToString(error);
|
||||
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_complete, error);
|
||||
}
|
||||
gpr_mu_unlock(mu);
|
||||
}
|
||||
|
||||
void close_transport_locked(inproc_transport* t) {
|
||||
INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "close_transport " << t << " " << t->is_closed;
|
||||
|
||||
t->state_tracker.SetState(GRPC_CHANNEL_SHUTDOWN, absl::Status(),
|
||||
"close transport");
|
||||
if (!t->is_closed) {
|
||||
|
@ -1140,7 +1142,7 @@ void close_transport_locked(inproc_transport* t) {
|
|||
}
|
||||
|
||||
void inproc_transport::PerformOp(grpc_transport_op* op) {
|
||||
INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", this, op);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "perform_transport_op " << this << " " << op;
|
||||
gpr_mu_lock(&mu->mu);
|
||||
if (op->start_connectivity_watch != nullptr) {
|
||||
state_tracker.AddWatcher(op->start_connectivity_watch_state,
|
||||
|
@ -1174,7 +1176,8 @@ void inproc_transport::PerformOp(grpc_transport_op* op) {
|
|||
|
||||
void inproc_transport::DestroyStream(grpc_stream* gs,
|
||||
grpc_closure* then_schedule_closure) {
|
||||
INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
|
||||
GRPC_TRACE_LOG(inproc, INFO)
|
||||
<< "destroy_stream " << gs << " " << then_schedule_closure;
|
||||
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
|
||||
gpr_mu_lock(&mu->mu);
|
||||
close_stream_locked(s);
|
||||
|
@ -1185,7 +1188,7 @@ void inproc_transport::DestroyStream(grpc_stream* gs,
|
|||
}
|
||||
|
||||
void inproc_transport::Orphan() {
|
||||
INPROC_LOG(GPR_INFO, "destroy_transport %p", this);
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "destroy_transport " << this;
|
||||
gpr_mu_lock(&mu->mu);
|
||||
close_transport_locked(this);
|
||||
gpr_mu_unlock(&mu->mu);
|
||||
|
@ -1218,7 +1221,7 @@ void inproc_transport::SetPollsetSet(grpc_stream* /*gs*/,
|
|||
//
|
||||
void inproc_transports_create(grpc_core::Transport** server_transport,
|
||||
grpc_core::Transport** client_transport) {
|
||||
INPROC_LOG(GPR_INFO, "inproc_transports_create");
|
||||
GRPC_TRACE_LOG(inproc, INFO) << "inproc_transports_create";
|
||||
shared_mu* mu = new (gpr_malloc(sizeof(*mu))) shared_mu();
|
||||
inproc_transport* st =
|
||||
new (gpr_malloc(sizeof(*st))) inproc_transport(mu, /*is_client=*/false);
|
||||
|
@ -1234,8 +1237,8 @@ void inproc_transports_create(grpc_core::Transport** server_transport,
|
|||
grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
|
||||
const grpc_channel_args* args,
|
||||
void* /*reserved*/) {
|
||||
GRPC_API_TRACE("grpc_inproc_channel_create(server=%p, args=%p)", 2,
|
||||
(server, args));
|
||||
GRPC_TRACE_LOG(api, INFO) << "grpc_inproc_channel_create(server=" << server
|
||||
<< ", args=" << args << ")";
|
||||
|
||||
grpc_core::ExecCtx exec_ctx;
|
||||
|
||||
|
|
|
@ -449,11 +449,11 @@ UPB_INLINE bool envoy_admin_v3_CertificateDetails_has_ocsp_details(const envoy_a
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_path(envoy_admin_v3_CertificateDetails *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_serial_number(envoy_admin_v3_CertificateDetails *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(36, 32), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE envoy_admin_v3_SubjectAlternateName** envoy_admin_v3_CertificateDetails_mutable_subject_alt_names(envoy_admin_v3_CertificateDetails* msg, size_t* size) {
|
||||
upb_MiniTableField field = {3, UPB_SIZE(12, 48), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -487,11 +487,11 @@ UPB_INLINE struct envoy_admin_v3_SubjectAlternateName* envoy_admin_v3_Certificat
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_days_until_expiration(envoy_admin_v3_CertificateDetails *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(48, 56), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_valid_from(envoy_admin_v3_CertificateDetails *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(16, 64), 64, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_mutable_valid_from(envoy_admin_v3_CertificateDetails* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_CertificateDetails_valid_from(msg);
|
||||
|
@ -503,7 +503,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_m
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_expiration_time(envoy_admin_v3_CertificateDetails *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(20, 72), 65, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_mutable_expiration_time(envoy_admin_v3_CertificateDetails* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_CertificateDetails_expiration_time(msg);
|
||||
|
@ -515,7 +515,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_m
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_set_ocsp_details(envoy_admin_v3_CertificateDetails *msg, envoy_admin_v3_CertificateDetails_OcspDetails* value) {
|
||||
const upb_MiniTableField field = {7, UPB_SIZE(24, 80), 66, 3, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_CertificateDetails_OcspDetails* envoy_admin_v3_CertificateDetails_mutable_ocsp_details(envoy_admin_v3_CertificateDetails* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_CertificateDetails_OcspDetails* sub = (struct envoy_admin_v3_CertificateDetails_OcspDetails*)envoy_admin_v3_CertificateDetails_ocsp_details(msg);
|
||||
|
@ -597,7 +597,7 @@ UPB_INLINE bool envoy_admin_v3_CertificateDetails_OcspDetails_has_expiration(con
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_OcspDetails_set_valid_from(envoy_admin_v3_CertificateDetails_OcspDetails *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_OcspDetails_mutable_valid_from(envoy_admin_v3_CertificateDetails_OcspDetails* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_CertificateDetails_OcspDetails_valid_from(msg);
|
||||
|
@ -609,7 +609,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_O
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CertificateDetails_OcspDetails_set_expiration(envoy_admin_v3_CertificateDetails_OcspDetails *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_CertificateDetails_OcspDetails_mutable_expiration(envoy_admin_v3_CertificateDetails_OcspDetails* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_CertificateDetails_OcspDetails_expiration(msg);
|
||||
|
@ -718,15 +718,15 @@ UPB_INLINE bool envoy_admin_v3_SubjectAlternateName_has_ip_address(const envoy_a
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_SubjectAlternateName_set_dns(envoy_admin_v3_SubjectAlternateName *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SubjectAlternateName_set_uri(envoy_admin_v3_SubjectAlternateName *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SubjectAlternateName_set_ip_address(envoy_admin_v3_SubjectAlternateName *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -27,6 +27,9 @@ const upb_MiniTable envoy__admin__v3__Certificates_msg_init = {
|
|||
&envoy_admin_v3_Certificates_submsgs[0],
|
||||
&envoy_admin_v3_Certificates__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.Certificates",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -47,6 +50,9 @@ const upb_MiniTable envoy__admin__v3__Certificate_msg_init = {
|
|||
&envoy_admin_v3_Certificate_submsgs[0],
|
||||
&envoy_admin_v3_Certificate__fields[0],
|
||||
UPB_SIZE(16, 24), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.Certificate",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max128b},
|
||||
|
@ -76,6 +82,9 @@ const upb_MiniTable envoy__admin__v3__CertificateDetails_msg_init = {
|
|||
&envoy_admin_v3_CertificateDetails_submsgs[0],
|
||||
&envoy_admin_v3_CertificateDetails__fields[0],
|
||||
UPB_SIZE(56, 88), 7, kUpb_ExtMode_NonExtendable, 7, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.CertificateDetails",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -102,6 +111,9 @@ const upb_MiniTable envoy__admin__v3__CertificateDetails__OcspDetails_msg_init =
|
|||
&envoy_admin_v3_CertificateDetails_OcspDetails_submsgs[0],
|
||||
&envoy_admin_v3_CertificateDetails_OcspDetails__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.CertificateDetails.OcspDetails",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableField envoy_admin_v3_SubjectAlternateName__fields[3] = {
|
||||
|
@ -114,6 +126,9 @@ const upb_MiniTable envoy__admin__v3__SubjectAlternateName_msg_init = {
|
|||
NULL,
|
||||
&envoy_admin_v3_SubjectAlternateName__fields[0],
|
||||
UPB_SIZE(24, 32), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.SubjectAlternateName",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000080100000a, &upb_pos_1bt},
|
||||
|
|
|
@ -307,15 +307,15 @@ UPB_INLINE upb_StringView envoy_admin_v3_ClusterStatus_eds_service_name(const en
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_name(envoy_admin_v3_ClusterStatus *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_added_via_api(envoy_admin_v3_ClusterStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {2, 9, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_success_rate_ejection_threshold(envoy_admin_v3_ClusterStatus *msg, struct envoy_type_v3_Percent* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_ClusterStatus_mutable_success_rate_ejection_threshold(envoy_admin_v3_ClusterStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_type_v3_Percent* sub = (struct envoy_type_v3_Percent*)envoy_admin_v3_ClusterStatus_success_rate_ejection_threshold(msg);
|
||||
|
@ -357,7 +357,7 @@ UPB_INLINE struct envoy_admin_v3_HostStatus* envoy_admin_v3_ClusterStatus_add_ho
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_local_origin_success_rate_ejection_threshold(envoy_admin_v3_ClusterStatus *msg, struct envoy_type_v3_Percent* value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(20, 48), 65, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_ClusterStatus_mutable_local_origin_success_rate_ejection_threshold(envoy_admin_v3_ClusterStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_type_v3_Percent* sub = (struct envoy_type_v3_Percent*)envoy_admin_v3_ClusterStatus_local_origin_success_rate_ejection_threshold(msg);
|
||||
|
@ -369,7 +369,7 @@ UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_ClusterStatus_mutable_lo
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_circuit_breakers(envoy_admin_v3_ClusterStatus *msg, struct envoy_config_cluster_v3_CircuitBreakers* value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(24, 56), 66, 3, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_cluster_v3_CircuitBreakers* envoy_admin_v3_ClusterStatus_mutable_circuit_breakers(envoy_admin_v3_ClusterStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_config_cluster_v3_CircuitBreakers* sub = (struct envoy_config_cluster_v3_CircuitBreakers*)envoy_admin_v3_ClusterStatus_circuit_breakers(msg);
|
||||
|
@ -381,11 +381,11 @@ UPB_INLINE struct envoy_config_cluster_v3_CircuitBreakers* envoy_admin_v3_Cluste
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_observability_name(envoy_admin_v3_ClusterStatus *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {7, UPB_SIZE(36, 64), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClusterStatus_set_eds_service_name(envoy_admin_v3_ClusterStatus *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {8, UPB_SIZE(44, 80), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.HostStatus */
|
||||
|
@ -575,7 +575,7 @@ UPB_INLINE bool envoy_admin_v3_HostStatus_has_locality(const envoy_admin_v3_Host
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_address(envoy_admin_v3_HostStatus *msg, struct envoy_config_core_v3_Address* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 24), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_core_v3_Address* envoy_admin_v3_HostStatus_mutable_address(envoy_admin_v3_HostStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_config_core_v3_Address* sub = (struct envoy_config_core_v3_Address*)envoy_admin_v3_HostStatus_address(msg);
|
||||
|
@ -617,7 +617,7 @@ UPB_INLINE struct envoy_admin_v3_SimpleMetric* envoy_admin_v3_HostStatus_add_sta
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_health_status(envoy_admin_v3_HostStatus *msg, envoy_admin_v3_HostHealthStatus* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(20, 40), 65, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_HostHealthStatus* envoy_admin_v3_HostStatus_mutable_health_status(envoy_admin_v3_HostStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_HostHealthStatus* sub = (struct envoy_admin_v3_HostHealthStatus*)envoy_admin_v3_HostStatus_health_status(msg);
|
||||
|
@ -629,7 +629,7 @@ UPB_INLINE struct envoy_admin_v3_HostHealthStatus* envoy_admin_v3_HostStatus_mut
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_success_rate(envoy_admin_v3_HostStatus *msg, struct envoy_type_v3_Percent* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(24, 48), 66, 3, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_HostStatus_mutable_success_rate(envoy_admin_v3_HostStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_type_v3_Percent* sub = (struct envoy_type_v3_Percent*)envoy_admin_v3_HostStatus_success_rate(msg);
|
||||
|
@ -641,19 +641,19 @@ UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_HostStatus_mutable_succe
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_weight(envoy_admin_v3_HostStatus *msg, uint32_t value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(28, 12), 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_hostname(envoy_admin_v3_HostStatus *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(44, 56), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_priority(envoy_admin_v3_HostStatus *msg, uint32_t value) {
|
||||
const upb_MiniTableField field = {7, UPB_SIZE(32, 16), 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_local_origin_success_rate(envoy_admin_v3_HostStatus *msg, struct envoy_type_v3_Percent* value) {
|
||||
const upb_MiniTableField field = {8, UPB_SIZE(36, 72), 67, 4, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_HostStatus_mutable_local_origin_success_rate(envoy_admin_v3_HostStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_type_v3_Percent* sub = (struct envoy_type_v3_Percent*)envoy_admin_v3_HostStatus_local_origin_success_rate(msg);
|
||||
|
@ -665,7 +665,7 @@ UPB_INLINE struct envoy_type_v3_Percent* envoy_admin_v3_HostStatus_mutable_local
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostStatus_set_locality(envoy_admin_v3_HostStatus *msg, struct envoy_config_core_v3_Locality* value) {
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(40, 80), 68, 5, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_core_v3_Locality* envoy_admin_v3_HostStatus_mutable_locality(envoy_admin_v3_HostStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_config_core_v3_Locality* sub = (struct envoy_config_core_v3_Locality*)envoy_admin_v3_HostStatus_locality(msg);
|
||||
|
@ -811,35 +811,35 @@ UPB_INLINE bool envoy_admin_v3_HostHealthStatus_active_hc_timeout(const envoy_ad
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_failed_active_health_check(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {1, 8, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_failed_outlier_check(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {2, 9, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_eds_health_status(envoy_admin_v3_HostHealthStatus *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {3, 12, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_failed_active_degraded_check(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {4, 16, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_pending_dynamic_removal(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {5, 17, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_pending_active_hc(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {6, 18, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_excluded_via_immediate_hc_fail(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {7, 19, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_HostHealthStatus_set_active_hc_timeout(envoy_admin_v3_HostHealthStatus *msg, bool value) {
|
||||
const upb_MiniTableField field = {8, 20, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,6 +32,9 @@ const upb_MiniTable envoy__admin__v3__Clusters_msg_init = {
|
|||
&envoy_admin_v3_Clusters_submsgs[0],
|
||||
&envoy_admin_v3_Clusters__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.Clusters",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max128b},
|
||||
|
@ -60,6 +63,9 @@ const upb_MiniTable envoy__admin__v3__ClusterStatus_msg_init = {
|
|||
&envoy_admin_v3_ClusterStatus_submsgs[0],
|
||||
&envoy_admin_v3_ClusterStatus__fields[0],
|
||||
UPB_SIZE(56, 96), 8, kUpb_ExtMode_NonExtendable, 8, UPB_FASTTABLE_MASK(120), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ClusterStatus",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -105,6 +111,9 @@ const upb_MiniTable envoy__admin__v3__HostStatus_msg_init = {
|
|||
&envoy_admin_v3_HostStatus_submsgs[0],
|
||||
&envoy_admin_v3_HostStatus__fields[0],
|
||||
UPB_SIZE(56, 88), 9, kUpb_ExtMode_NonExtendable, 9, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.HostStatus",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
|
@ -132,6 +141,9 @@ const upb_MiniTable envoy__admin__v3__HostHealthStatus_msg_init = {
|
|||
NULL,
|
||||
&envoy_admin_v3_HostHealthStatus__fields[0],
|
||||
24, 8, kUpb_ExtMode_NonExtendable, 8, UPB_FASTTABLE_MASK(120), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.HostHealthStatus",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f000008, &upb_psb1_1bt},
|
||||
|
|
|
@ -209,7 +209,7 @@ UPB_INLINE bool envoy_admin_v3_BootstrapConfigDump_has_last_updated(const envoy_
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_BootstrapConfigDump_set_bootstrap(envoy_admin_v3_BootstrapConfigDump *msg, struct envoy_config_bootstrap_v3_Bootstrap* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_bootstrap_v3_Bootstrap* envoy_admin_v3_BootstrapConfigDump_mutable_bootstrap(envoy_admin_v3_BootstrapConfigDump* msg, upb_Arena* arena) {
|
||||
struct envoy_config_bootstrap_v3_Bootstrap* sub = (struct envoy_config_bootstrap_v3_Bootstrap*)envoy_admin_v3_BootstrapConfigDump_bootstrap(msg);
|
||||
|
@ -221,7 +221,7 @@ UPB_INLINE struct envoy_config_bootstrap_v3_Bootstrap* envoy_admin_v3_BootstrapC
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_BootstrapConfigDump_set_last_updated(envoy_admin_v3_BootstrapConfigDump *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_BootstrapConfigDump_mutable_last_updated(envoy_admin_v3_BootstrapConfigDump* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_BootstrapConfigDump_last_updated(msg);
|
||||
|
@ -579,15 +579,15 @@ UPB_INLINE int32_t envoy_admin_v3_SecretsConfigDump_DynamicSecret_client_status(
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_name(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_version_info(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(36, 32), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_last_updated(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(12, 48), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_SecretsConfigDump_DynamicSecret_mutable_last_updated(envoy_admin_v3_SecretsConfigDump_DynamicSecret* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_SecretsConfigDump_DynamicSecret_last_updated(msg);
|
||||
|
@ -599,7 +599,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_SecretsConfigDump_Dy
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_secret(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(16, 56), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_SecretsConfigDump_DynamicSecret_mutable_secret(envoy_admin_v3_SecretsConfigDump_DynamicSecret* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_SecretsConfigDump_DynamicSecret_secret(msg);
|
||||
|
@ -611,7 +611,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_SecretsConfigDump_DynamicS
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_error_state(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, struct envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(20, 64), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_SecretsConfigDump_DynamicSecret_mutable_error_state(envoy_admin_v3_SecretsConfigDump_DynamicSecret* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_SecretsConfigDump_DynamicSecret_error_state(msg);
|
||||
|
@ -623,7 +623,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_SecretsConfi
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_DynamicSecret_set_client_status(envoy_admin_v3_SecretsConfigDump_DynamicSecret *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.SecretsConfigDump.StaticSecret */
|
||||
|
@ -709,11 +709,11 @@ UPB_INLINE bool envoy_admin_v3_SecretsConfigDump_StaticSecret_has_secret(const e
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_StaticSecret_set_name(envoy_admin_v3_SecretsConfigDump_StaticSecret *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(20, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_StaticSecret_set_last_updated(envoy_admin_v3_SecretsConfigDump_StaticSecret *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_SecretsConfigDump_StaticSecret_mutable_last_updated(envoy_admin_v3_SecretsConfigDump_StaticSecret* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_SecretsConfigDump_StaticSecret_last_updated(msg);
|
||||
|
@ -725,7 +725,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_SecretsConfigDump_St
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_SecretsConfigDump_StaticSecret_set_secret(envoy_admin_v3_SecretsConfigDump_StaticSecret *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_SecretsConfigDump_StaticSecret_mutable_secret(envoy_admin_v3_SecretsConfigDump_StaticSecret* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_SecretsConfigDump_StaticSecret_secret(msg);
|
||||
|
|
|
@ -30,6 +30,9 @@ const upb_MiniTable envoy__admin__v3__ConfigDump_msg_init = {
|
|||
&envoy_admin_v3_ConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_ConfigDump__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_maxmaxb},
|
||||
|
@ -50,6 +53,9 @@ const upb_MiniTable envoy__admin__v3__BootstrapConfigDump_msg_init = {
|
|||
&envoy_admin_v3_BootstrapConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_BootstrapConfigDump__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.BootstrapConfigDump",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableSub envoy_admin_v3_SecretsConfigDump_submsgs[3] = {
|
||||
|
@ -68,6 +74,9 @@ const upb_MiniTable envoy__admin__v3__SecretsConfigDump_msg_init = {
|
|||
&envoy_admin_v3_SecretsConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_SecretsConfigDump__fields[0],
|
||||
UPB_SIZE(24, 32), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.SecretsConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -95,6 +104,9 @@ const upb_MiniTable envoy__admin__v3__SecretsConfigDump__DynamicSecret_msg_init
|
|||
&envoy_admin_v3_SecretsConfigDump_DynamicSecret_submsgs[0],
|
||||
&envoy_admin_v3_SecretsConfigDump_DynamicSecret__fields[0],
|
||||
UPB_SIZE(48, 72), 6, kUpb_ExtMode_NonExtendable, 6, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.SecretsConfigDump.DynamicSecret",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -122,6 +134,9 @@ const upb_MiniTable envoy__admin__v3__SecretsConfigDump__StaticSecret_msg_init =
|
|||
&envoy_admin_v3_SecretsConfigDump_StaticSecret_submsgs[0],
|
||||
&envoy_admin_v3_SecretsConfigDump_StaticSecret__fields[0],
|
||||
UPB_SIZE(32, 48), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.SecretsConfigDump.StaticSecret",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
|
|
@ -151,7 +151,7 @@ UPB_INLINE upb_StringView envoy_admin_v3_UpdateFailureState_version_info(const e
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_UpdateFailureState_set_failed_configuration(envoy_admin_v3_UpdateFailureState *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_UpdateFailureState_mutable_failed_configuration(envoy_admin_v3_UpdateFailureState* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_UpdateFailureState_failed_configuration(msg);
|
||||
|
@ -163,7 +163,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_UpdateFailureState_mutable
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_UpdateFailureState_set_last_update_attempt(envoy_admin_v3_UpdateFailureState *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_UpdateFailureState_mutable_last_update_attempt(envoy_admin_v3_UpdateFailureState* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_UpdateFailureState_last_update_attempt(msg);
|
||||
|
@ -175,11 +175,11 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_UpdateFailureState_m
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_UpdateFailureState_set_details(envoy_admin_v3_UpdateFailureState *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(20, 32), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_UpdateFailureState_set_version_info(envoy_admin_v3_UpdateFailureState *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(28, 48), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.ListenersConfigDump */
|
||||
|
@ -297,7 +297,7 @@ UPB_INLINE upb_Array* _envoy_admin_v3_ListenersConfigDump_dynamic_listeners_muta
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_set_version_info(envoy_admin_v3_ListenersConfigDump *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(16, 8), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE envoy_admin_v3_ListenersConfigDump_StaticListener** envoy_admin_v3_ListenersConfigDump_mutable_static_listeners(envoy_admin_v3_ListenersConfigDump* msg, size_t* size) {
|
||||
upb_MiniTableField field = {2, UPB_SIZE(8, 24), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -431,7 +431,7 @@ UPB_INLINE bool envoy_admin_v3_ListenersConfigDump_StaticListener_has_last_updat
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_StaticListener_set_listener(envoy_admin_v3_ListenersConfigDump_StaticListener *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ListenersConfigDump_StaticListener_mutable_listener(envoy_admin_v3_ListenersConfigDump_StaticListener* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_ListenersConfigDump_StaticListener_listener(msg);
|
||||
|
@ -443,7 +443,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ListenersConfigDump_Static
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_StaticListener_set_last_updated(envoy_admin_v3_ListenersConfigDump_StaticListener *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ListenersConfigDump_StaticListener_mutable_last_updated(envoy_admin_v3_ListenersConfigDump_StaticListener* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ListenersConfigDump_StaticListener_last_updated(msg);
|
||||
|
@ -537,11 +537,11 @@ UPB_INLINE bool envoy_admin_v3_ListenersConfigDump_DynamicListenerState_has_last
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListenerState_set_version_info(envoy_admin_v3_ListenersConfigDump_DynamicListenerState *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(20, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListenerState_set_listener(envoy_admin_v3_ListenersConfigDump_DynamicListenerState *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ListenersConfigDump_DynamicListenerState_mutable_listener(envoy_admin_v3_ListenersConfigDump_DynamicListenerState* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_ListenersConfigDump_DynamicListenerState_listener(msg);
|
||||
|
@ -553,7 +553,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ListenersConfigDump_Dynami
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListenerState_set_last_updated(envoy_admin_v3_ListenersConfigDump_DynamicListenerState *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ListenersConfigDump_DynamicListenerState_mutable_last_updated(envoy_admin_v3_ListenersConfigDump_DynamicListenerState* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ListenersConfigDump_DynamicListenerState_last_updated(msg);
|
||||
|
@ -691,11 +691,11 @@ UPB_INLINE int32_t envoy_admin_v3_ListenersConfigDump_DynamicListener_client_sta
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_name(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(32, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_active_state(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, envoy_admin_v3_ListenersConfigDump_DynamicListenerState* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy_admin_v3_ListenersConfigDump_DynamicListener_mutable_active_state(envoy_admin_v3_ListenersConfigDump_DynamicListener* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* sub = (struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState*)envoy_admin_v3_ListenersConfigDump_DynamicListener_active_state(msg);
|
||||
|
@ -707,7 +707,7 @@ UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_warming_state(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, envoy_admin_v3_ListenersConfigDump_DynamicListenerState* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy_admin_v3_ListenersConfigDump_DynamicListener_mutable_warming_state(envoy_admin_v3_ListenersConfigDump_DynamicListener* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* sub = (struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState*)envoy_admin_v3_ListenersConfigDump_DynamicListener_warming_state(msg);
|
||||
|
@ -719,7 +719,7 @@ UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_draining_state(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, envoy_admin_v3_ListenersConfigDump_DynamicListenerState* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 48), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy_admin_v3_ListenersConfigDump_DynamicListener_mutable_draining_state(envoy_admin_v3_ListenersConfigDump_DynamicListener* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* sub = (struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState*)envoy_admin_v3_ListenersConfigDump_DynamicListener_draining_state(msg);
|
||||
|
@ -731,7 +731,7 @@ UPB_INLINE struct envoy_admin_v3_ListenersConfigDump_DynamicListenerState* envoy
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_error_state(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(24, 56), 67, 3, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ListenersConfigDump_DynamicListener_mutable_error_state(envoy_admin_v3_ListenersConfigDump_DynamicListener* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_ListenersConfigDump_DynamicListener_error_state(msg);
|
||||
|
@ -743,7 +743,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ListenersCon
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenersConfigDump_DynamicListener_set_client_status(envoy_admin_v3_ListenersConfigDump_DynamicListener *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(28, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.ClustersConfigDump */
|
||||
|
@ -893,7 +893,7 @@ UPB_INLINE upb_Array* _envoy_admin_v3_ClustersConfigDump_dynamic_warming_cluster
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_set_version_info(envoy_admin_v3_ClustersConfigDump *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(20, 8), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE envoy_admin_v3_ClustersConfigDump_StaticCluster** envoy_admin_v3_ClustersConfigDump_mutable_static_clusters(envoy_admin_v3_ClustersConfigDump* msg, size_t* size) {
|
||||
upb_MiniTableField field = {2, UPB_SIZE(8, 24), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -1057,7 +1057,7 @@ UPB_INLINE bool envoy_admin_v3_ClustersConfigDump_StaticCluster_has_last_updated
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_StaticCluster_set_cluster(envoy_admin_v3_ClustersConfigDump_StaticCluster *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ClustersConfigDump_StaticCluster_mutable_cluster(envoy_admin_v3_ClustersConfigDump_StaticCluster* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_ClustersConfigDump_StaticCluster_cluster(msg);
|
||||
|
@ -1069,7 +1069,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ClustersConfigDump_StaticC
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_StaticCluster_set_last_updated(envoy_admin_v3_ClustersConfigDump_StaticCluster *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ClustersConfigDump_StaticCluster_mutable_last_updated(envoy_admin_v3_ClustersConfigDump_StaticCluster* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ClustersConfigDump_StaticCluster_last_updated(msg);
|
||||
|
@ -1191,11 +1191,11 @@ UPB_INLINE int32_t envoy_admin_v3_ClustersConfigDump_DynamicCluster_client_statu
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_DynamicCluster_set_version_info(envoy_admin_v3_ClustersConfigDump_DynamicCluster *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_DynamicCluster_set_cluster(envoy_admin_v3_ClustersConfigDump_DynamicCluster *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ClustersConfigDump_DynamicCluster_mutable_cluster(envoy_admin_v3_ClustersConfigDump_DynamicCluster* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_ClustersConfigDump_DynamicCluster_cluster(msg);
|
||||
|
@ -1207,7 +1207,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ClustersConfigDump_Dynamic
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_DynamicCluster_set_last_updated(envoy_admin_v3_ClustersConfigDump_DynamicCluster *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ClustersConfigDump_DynamicCluster_mutable_last_updated(envoy_admin_v3_ClustersConfigDump_DynamicCluster* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ClustersConfigDump_DynamicCluster_last_updated(msg);
|
||||
|
@ -1219,7 +1219,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ClustersConfigDump_D
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_DynamicCluster_set_error_state(envoy_admin_v3_ClustersConfigDump_DynamicCluster *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 48), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ClustersConfigDump_DynamicCluster_mutable_error_state(envoy_admin_v3_ClustersConfigDump_DynamicCluster* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_ClustersConfigDump_DynamicCluster_error_state(msg);
|
||||
|
@ -1231,7 +1231,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ClustersConf
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ClustersConfigDump_DynamicCluster_set_client_status(envoy_admin_v3_ClustersConfigDump_DynamicCluster *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.RoutesConfigDump */
|
||||
|
@ -1467,7 +1467,7 @@ UPB_INLINE bool envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_has_last_updat
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_set_route_config(envoy_admin_v3_RoutesConfigDump_StaticRouteConfig *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_mutable_route_config(envoy_admin_v3_RoutesConfigDump_StaticRouteConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_route_config(msg);
|
||||
|
@ -1479,7 +1479,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_RoutesConfigDump_StaticRou
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_set_last_updated(envoy_admin_v3_RoutesConfigDump_StaticRouteConfig *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_mutable_last_updated(envoy_admin_v3_RoutesConfigDump_StaticRouteConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_last_updated(msg);
|
||||
|
@ -1601,11 +1601,11 @@ UPB_INLINE int32_t envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_client_sta
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_set_version_info(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_set_route_config(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_mutable_route_config(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_route_config(msg);
|
||||
|
@ -1617,7 +1617,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_RoutesConfigDump_DynamicRo
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_set_last_updated(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_mutable_last_updated(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_last_updated(msg);
|
||||
|
@ -1629,7 +1629,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_RoutesConfigDump_Dyn
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_set_error_state(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 48), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_mutable_error_state(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_error_state(msg);
|
||||
|
@ -1641,7 +1641,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_RoutesConfig
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_set_client_status(envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.ScopedRoutesConfigDump */
|
||||
|
@ -1905,7 +1905,7 @@ UPB_INLINE bool envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_h
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_set_name(envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(20, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any** envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_mutable_scoped_route_configs(envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs* msg, size_t* size) {
|
||||
upb_MiniTableField field = {2, UPB_SIZE(12, 32), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -1939,7 +1939,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ScopedRoutesConfigDump_Inl
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_set_last_updated(envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 64, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_mutable_last_updated(envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_last_updated(msg);
|
||||
|
@ -2089,11 +2089,11 @@ UPB_INLINE int32_t envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfi
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_set_name(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_set_version_info(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(36, 32), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any** envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_mutable_scoped_route_configs(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs* msg, size_t* size) {
|
||||
upb_MiniTableField field = {3, UPB_SIZE(12, 48), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -2127,7 +2127,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_ScopedRoutesConfigDump_Dyn
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_set_last_updated(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(16, 56), 64, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_mutable_last_updated(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_last_updated(msg);
|
||||
|
@ -2139,7 +2139,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_ScopedRoutesConfigDu
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_set_error_state(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(20, 64), 65, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_mutable_error_state(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_error_state(msg);
|
||||
|
@ -2151,7 +2151,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_ScopedRoutes
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_set_client_status(envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.EndpointsConfigDump */
|
||||
|
@ -2387,7 +2387,7 @@ UPB_INLINE bool envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_has_last
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_set_endpoint_config(envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_mutable_endpoint_config(envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_endpoint_config(msg);
|
||||
|
@ -2399,7 +2399,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EndpointsConfigDump_Static
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_set_last_updated(envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(16, 24), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_mutable_last_updated(envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_last_updated(msg);
|
||||
|
@ -2521,11 +2521,11 @@ UPB_INLINE int32_t envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_clie
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_set_version_info(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_set_endpoint_config(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_mutable_endpoint_config(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_endpoint_config(msg);
|
||||
|
@ -2537,7 +2537,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EndpointsConfigDump_Dynami
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_set_last_updated(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_mutable_last_updated(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_last_updated(msg);
|
||||
|
@ -2549,7 +2549,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_EndpointsConfigDump_
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_set_error_state(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 48), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_mutable_error_state(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_error_state(msg);
|
||||
|
@ -2561,7 +2561,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_EndpointsCon
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_set_client_status(envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
/* envoy.admin.v3.EcdsConfigDump */
|
||||
|
@ -2775,11 +2775,11 @@ UPB_INLINE int32_t envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_client_status(
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_set_version_info(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(28, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_set_ecds_filter(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig *msg, struct google_protobuf_Any* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_mutable_ecds_filter(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Any* sub = (struct google_protobuf_Any*)envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_ecds_filter(msg);
|
||||
|
@ -2791,7 +2791,7 @@ UPB_INLINE struct google_protobuf_Any* envoy_admin_v3_EcdsConfigDump_EcdsFilterC
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_set_last_updated(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig *msg, struct google_protobuf_Timestamp* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_mutable_last_updated(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Timestamp* sub = (struct google_protobuf_Timestamp*)envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_last_updated(msg);
|
||||
|
@ -2803,7 +2803,7 @@ UPB_INLINE struct google_protobuf_Timestamp* envoy_admin_v3_EcdsConfigDump_EcdsF
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_set_error_state(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig *msg, envoy_admin_v3_UpdateFailureState* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 48), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_mutable_error_state(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_UpdateFailureState* sub = (struct envoy_admin_v3_UpdateFailureState*)envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_error_state(msg);
|
||||
|
@ -2815,7 +2815,7 @@ UPB_INLINE struct envoy_admin_v3_UpdateFailureState* envoy_admin_v3_EcdsConfigDu
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_set_client_status(envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(24, 12), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,6 +32,9 @@ const upb_MiniTable envoy__admin__v3__UpdateFailureState_msg_init = {
|
|||
&envoy_admin_v3_UpdateFailureState_submsgs[0],
|
||||
&envoy_admin_v3_UpdateFailureState__fields[0],
|
||||
UPB_SIZE(40, 64), 4, kUpb_ExtMode_NonExtendable, 4, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.UpdateFailureState",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
|
@ -59,6 +62,9 @@ const upb_MiniTable envoy__admin__v3__ListenersConfigDump_msg_init = {
|
|||
&envoy_admin_v3_ListenersConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_ListenersConfigDump__fields[0],
|
||||
UPB_SIZE(24, 40), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ListenersConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_pss_1bt},
|
||||
|
@ -81,6 +87,9 @@ const upb_MiniTable envoy__admin__v3__ListenersConfigDump__StaticListener_msg_in
|
|||
&envoy_admin_v3_ListenersConfigDump_StaticListener_submsgs[0],
|
||||
&envoy_admin_v3_ListenersConfigDump_StaticListener__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ListenersConfigDump.StaticListener",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableSub envoy_admin_v3_ListenersConfigDump_DynamicListenerState_submsgs[2] = {
|
||||
|
@ -98,6 +107,9 @@ const upb_MiniTable envoy__admin__v3__ListenersConfigDump__DynamicListenerState_
|
|||
&envoy_admin_v3_ListenersConfigDump_DynamicListenerState_submsgs[0],
|
||||
&envoy_admin_v3_ListenersConfigDump_DynamicListenerState__fields[0],
|
||||
UPB_SIZE(32, 48), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ListenersConfigDump.DynamicListenerState",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -124,6 +136,9 @@ const upb_MiniTable envoy__admin__v3__ListenersConfigDump__DynamicListener_msg_i
|
|||
&envoy_admin_v3_ListenersConfigDump_DynamicListener_submsgs[0],
|
||||
&envoy_admin_v3_ListenersConfigDump_DynamicListener__fields[0],
|
||||
UPB_SIZE(40, 64), 6, kUpb_ExtMode_NonExtendable, 6, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ListenersConfigDump.DynamicListener",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -153,6 +168,9 @@ const upb_MiniTable envoy__admin__v3__ClustersConfigDump_msg_init = {
|
|||
&envoy_admin_v3_ClustersConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_ClustersConfigDump__fields[0],
|
||||
UPB_SIZE(32, 48), 4, kUpb_ExtMode_NonExtendable, 4, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ClustersConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_pss_1bt},
|
||||
|
@ -179,6 +197,9 @@ const upb_MiniTable envoy__admin__v3__ClustersConfigDump__StaticCluster_msg_init
|
|||
&envoy_admin_v3_ClustersConfigDump_StaticCluster_submsgs[0],
|
||||
&envoy_admin_v3_ClustersConfigDump_StaticCluster__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ClustersConfigDump.StaticCluster",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableSub envoy_admin_v3_ClustersConfigDump_DynamicCluster_submsgs[3] = {
|
||||
|
@ -199,6 +220,9 @@ const upb_MiniTable envoy__admin__v3__ClustersConfigDump__DynamicCluster_msg_ini
|
|||
&envoy_admin_v3_ClustersConfigDump_DynamicCluster_submsgs[0],
|
||||
&envoy_admin_v3_ClustersConfigDump_DynamicCluster__fields[0],
|
||||
UPB_SIZE(40, 56), 5, kUpb_ExtMode_NonExtendable, 5, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ClustersConfigDump.DynamicCluster",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -225,6 +249,9 @@ const upb_MiniTable envoy__admin__v3__RoutesConfigDump_msg_init = {
|
|||
&envoy_admin_v3_RoutesConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_RoutesConfigDump__fields[0],
|
||||
UPB_SIZE(16, 24), 2, kUpb_ExtMode_NonExtendable, 0, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.RoutesConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
|
@ -247,6 +274,9 @@ const upb_MiniTable envoy__admin__v3__RoutesConfigDump__StaticRouteConfig_msg_in
|
|||
&envoy_admin_v3_RoutesConfigDump_StaticRouteConfig_submsgs[0],
|
||||
&envoy_admin_v3_RoutesConfigDump_StaticRouteConfig__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.RoutesConfigDump.StaticRouteConfig",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableSub envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_submsgs[3] = {
|
||||
|
@ -267,6 +297,9 @@ const upb_MiniTable envoy__admin__v3__RoutesConfigDump__DynamicRouteConfig_msg_i
|
|||
&envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig_submsgs[0],
|
||||
&envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig__fields[0],
|
||||
UPB_SIZE(40, 56), 5, kUpb_ExtMode_NonExtendable, 5, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -293,6 +326,9 @@ const upb_MiniTable envoy__admin__v3__ScopedRoutesConfigDump_msg_init = {
|
|||
&envoy_admin_v3_ScopedRoutesConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_ScopedRoutesConfigDump__fields[0],
|
||||
UPB_SIZE(16, 24), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ScopedRoutesConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -316,6 +352,9 @@ const upb_MiniTable envoy__admin__v3__ScopedRoutesConfigDump__InlineScopedRouteC
|
|||
&envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs_submsgs[0],
|
||||
&envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs__fields[0],
|
||||
UPB_SIZE(32, 48), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -343,6 +382,9 @@ const upb_MiniTable envoy__admin__v3__ScopedRoutesConfigDump__DynamicScopedRoute
|
|||
&envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs_submsgs[0],
|
||||
&envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs__fields[0],
|
||||
UPB_SIZE(48, 72), 6, kUpb_ExtMode_NonExtendable, 6, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -369,6 +411,9 @@ const upb_MiniTable envoy__admin__v3__EndpointsConfigDump_msg_init = {
|
|||
&envoy_admin_v3_EndpointsConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_EndpointsConfigDump__fields[0],
|
||||
UPB_SIZE(16, 24), 2, kUpb_ExtMode_NonExtendable, 0, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.EndpointsConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
|
@ -391,6 +436,9 @@ const upb_MiniTable envoy__admin__v3__EndpointsConfigDump__StaticEndpointConfig_
|
|||
&envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig_submsgs[0],
|
||||
&envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(255), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig",
|
||||
#endif
|
||||
};
|
||||
|
||||
static const upb_MiniTableSub envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_submsgs[3] = {
|
||||
|
@ -411,6 +459,9 @@ const upb_MiniTable envoy__admin__v3__EndpointsConfigDump__DynamicEndpointConfig
|
|||
&envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig_submsgs[0],
|
||||
&envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig__fields[0],
|
||||
UPB_SIZE(40, 56), 5, kUpb_ExtMode_NonExtendable, 5, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -435,6 +486,9 @@ const upb_MiniTable envoy__admin__v3__EcdsConfigDump_msg_init = {
|
|||
&envoy_admin_v3_EcdsConfigDump_submsgs[0],
|
||||
&envoy_admin_v3_EcdsConfigDump__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.EcdsConfigDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -459,6 +513,9 @@ const upb_MiniTable envoy__admin__v3__EcdsConfigDump__EcdsFilterConfig_msg_init
|
|||
&envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig_submsgs[0],
|
||||
&envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig__fields[0],
|
||||
UPB_SIZE(40, 56), 5, kUpb_ExtMode_NonExtendable, 5, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
|
|
@ -209,7 +209,7 @@ UPB_INLINE upb_Array* _envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump_tar
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump_set_name(envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(12, 8), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE upb_StringView* envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump_mutable_target_names(envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump* msg, size_t* size) {
|
||||
upb_MiniTableField field = {2, UPB_SIZE(8, 24), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
|
|
@ -25,6 +25,9 @@ const upb_MiniTable envoy__admin__v3__UnreadyTargetsDumps_msg_init = {
|
|||
&envoy_admin_v3_UnreadyTargetsDumps_submsgs[0],
|
||||
&envoy_admin_v3_UnreadyTargetsDumps__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.UnreadyTargetsDumps",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -40,6 +43,9 @@ const upb_MiniTable envoy__admin__v3__UnreadyTargetsDumps__UnreadyTargetsDump_ms
|
|||
NULL,
|
||||
&envoy_admin_v3_UnreadyTargetsDumps_UnreadyTargetsDump__fields[0],
|
||||
UPB_SIZE(24, 32), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_pss_1bt},
|
||||
|
|
|
@ -228,11 +228,11 @@ UPB_INLINE upb_Array* _envoy_admin_v3_ListenerStatus_additional_local_addresses_
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ListenerStatus_set_name(envoy_admin_v3_ListenerStatus *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(20, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ListenerStatus_set_local_address(envoy_admin_v3_ListenerStatus *msg, struct envoy_config_core_v3_Address* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_core_v3_Address* envoy_admin_v3_ListenerStatus_mutable_local_address(envoy_admin_v3_ListenerStatus* msg, upb_Arena* arena) {
|
||||
struct envoy_config_core_v3_Address* sub = (struct envoy_config_core_v3_Address*)envoy_admin_v3_ListenerStatus_local_address(msg);
|
||||
|
|
|
@ -27,6 +27,9 @@ const upb_MiniTable envoy__admin__v3__Listeners_msg_init = {
|
|||
&envoy_admin_v3_Listeners_submsgs[0],
|
||||
&envoy_admin_v3_Listeners__fields[0],
|
||||
16, 1, kUpb_ExtMode_NonExtendable, 1, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.Listeners",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f00000a, &upb_prm_1bt_max64b},
|
||||
|
@ -48,6 +51,9 @@ const upb_MiniTable envoy__admin__v3__ListenerStatus_msg_init = {
|
|||
&envoy_admin_v3_ListenerStatus_submsgs[0],
|
||||
&envoy_admin_v3_ListenerStatus__fields[0],
|
||||
UPB_SIZE(32, 48), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ListenerStatus",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
|
|
@ -137,27 +137,27 @@ UPB_INLINE uint64_t envoy_admin_v3_Memory_total_physical_bytes(const envoy_admin
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_allocated(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {1, 8, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_heap_size(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {2, 16, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_pageheap_unmapped(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {3, 24, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_pageheap_free(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {4, 32, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_total_thread_cache(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {5, 40, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_Memory_set_total_physical_bytes(envoy_admin_v3_Memory *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {6, 48, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -27,6 +27,9 @@ const upb_MiniTable envoy__admin__v3__Memory_msg_init = {
|
|||
NULL,
|
||||
&envoy_admin_v3_Memory__fields[0],
|
||||
56, 6, kUpb_ExtMode_NonExtendable, 6, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.Memory",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f000008, &upb_psv8_1bt},
|
||||
|
|
|
@ -106,15 +106,15 @@ UPB_INLINE upb_StringView envoy_admin_v3_SimpleMetric_name(const envoy_admin_v3_
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_SimpleMetric_set_type(envoy_admin_v3_SimpleMetric *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {1, 8, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SimpleMetric_set_value(envoy_admin_v3_SimpleMetric *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {2, 16, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_SimpleMetric_set_name(envoy_admin_v3_SimpleMetric *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {3, 24, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -24,6 +24,9 @@ const upb_MiniTable envoy__admin__v3__SimpleMetric_msg_init = {
|
|||
NULL,
|
||||
&envoy_admin_v3_SimpleMetric__fields[0],
|
||||
UPB_SIZE(32, 40), 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.SimpleMetric",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f000008, &upb_psv4_1bt},
|
||||
|
|
|
@ -101,15 +101,15 @@ UPB_INLINE uint64_t envoy_admin_v3_MutexStats_lifetime_wait_cycles(const envoy_a
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_MutexStats_set_num_contentions(envoy_admin_v3_MutexStats *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {1, 8, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_MutexStats_set_current_wait_cycles(envoy_admin_v3_MutexStats *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {2, 16, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_MutexStats_set_lifetime_wait_cycles(envoy_admin_v3_MutexStats *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {3, 24, 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -24,6 +24,9 @@ const upb_MiniTable envoy__admin__v3__MutexStats_msg_init = {
|
|||
NULL,
|
||||
&envoy_admin_v3_MutexStats__fields[0],
|
||||
32, 3, kUpb_ExtMode_NonExtendable, 3, UPB_FASTTABLE_MASK(24), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.MutexStats",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x000800003f000008, &upb_psv8_1bt},
|
||||
|
|
|
@ -193,15 +193,15 @@ UPB_INLINE bool envoy_admin_v3_ServerInfo_has_node(const envoy_admin_v3_ServerIn
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_version(envoy_admin_v3_ServerInfo *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(32, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_state(envoy_admin_v3_ServerInfo *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {2, 12, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_uptime_current_epoch(envoy_admin_v3_ServerInfo *msg, struct google_protobuf_Duration* value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(16, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_ServerInfo_mutable_uptime_current_epoch(envoy_admin_v3_ServerInfo* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_admin_v3_ServerInfo_uptime_current_epoch(msg);
|
||||
|
@ -213,7 +213,7 @@ UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_ServerInfo_mutable_up
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_uptime_all_epochs(envoy_admin_v3_ServerInfo *msg, struct google_protobuf_Duration* value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(20, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_ServerInfo_mutable_uptime_all_epochs(envoy_admin_v3_ServerInfo* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_admin_v3_ServerInfo_uptime_all_epochs(msg);
|
||||
|
@ -225,11 +225,11 @@ UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_ServerInfo_mutable_up
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_hot_restart_version(envoy_admin_v3_ServerInfo *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {5, UPB_SIZE(40, 48), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_command_line_options(envoy_admin_v3_ServerInfo *msg, envoy_admin_v3_CommandLineOptions* value) {
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(24, 64), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_admin_v3_CommandLineOptions* envoy_admin_v3_ServerInfo_mutable_command_line_options(envoy_admin_v3_ServerInfo* msg, upb_Arena* arena) {
|
||||
struct envoy_admin_v3_CommandLineOptions* sub = (struct envoy_admin_v3_CommandLineOptions*)envoy_admin_v3_ServerInfo_command_line_options(msg);
|
||||
|
@ -241,7 +241,7 @@ UPB_INLINE struct envoy_admin_v3_CommandLineOptions* envoy_admin_v3_ServerInfo_m
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_ServerInfo_set_node(envoy_admin_v3_ServerInfo *msg, struct envoy_config_core_v3_Node* value) {
|
||||
const upb_MiniTableField field = {7, UPB_SIZE(28, 72), 67, 3, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_core_v3_Node* envoy_admin_v3_ServerInfo_mutable_node(envoy_admin_v3_ServerInfo* msg, upb_Arena* arena) {
|
||||
struct envoy_config_core_v3_Node* sub = (struct envoy_config_core_v3_Node*)envoy_admin_v3_ServerInfo_node(msg);
|
||||
|
@ -289,13 +289,13 @@ UPB_INLINE char* envoy_admin_v3_CommandLineOptions_serialize_ex(const envoy_admi
|
|||
return ptr;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_base_id(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(80, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(88, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE uint64_t envoy_admin_v3_CommandLineOptions_base_id(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
uint64_t default_val = (uint64_t)0ull;
|
||||
uint64_t ret;
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(80, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(88, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -313,25 +313,25 @@ UPB_INLINE uint32_t envoy_admin_v3_CommandLineOptions_concurrency(const envoy_ad
|
|||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_config_path(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(88, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(96, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_config_path(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(88, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(96, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_config_yaml(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(96, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(104, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_config_yaml(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(96, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(104, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -349,13 +349,13 @@ UPB_INLINE bool envoy_admin_v3_CommandLineOptions_allow_unknown_static_fields(co
|
|||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_admin_address_path(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {6, 104, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(112, 104), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_admin_address_path(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {6, 104, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(112, 104), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -373,85 +373,85 @@ UPB_INLINE int32_t envoy_admin_v3_CommandLineOptions_local_address_ip_version(co
|
|||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_log_level(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {8, UPB_SIZE(112, 120), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {8, 120, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_log_level(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {8, UPB_SIZE(112, 120), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {8, 120, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_component_log_level(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(120, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(128, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_component_log_level(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(120, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(128, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_log_format(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(128, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(136, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_log_format(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(128, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(136, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_log_path(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(136, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(144, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_log_path(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(136, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(144, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_service_cluster(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(144, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(152, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_service_cluster(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(144, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(152, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_service_node(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(152, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(160, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_service_node(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(152, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(160, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_service_zone(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(160, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(168, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_service_zone(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(160, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(168, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -645,13 +645,13 @@ UPB_INLINE bool envoy_admin_v3_CommandLineOptions_use_dynamic_base_id(const envo
|
|||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_base_id_path(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(168, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(176, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_base_id_path(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(168, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(176, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -681,13 +681,13 @@ UPB_INLINE bool envoy_admin_v3_CommandLineOptions_enable_fine_grain_logging(cons
|
|||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_socket_path(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(176, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(184, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE upb_StringView envoy_admin_v3_CommandLineOptions_socket_path(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
upb_StringView default_val = upb_StringView_FromString("");
|
||||
upb_StringView ret;
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(176, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(184, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
|
@ -748,66 +748,90 @@ UPB_INLINE upb_Array* _envoy_admin_v3_CommandLineOptions_stats_tag_mutable_upb_a
|
|||
}
|
||||
return arr;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_skip_hot_restart_on_no_parent(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {39, UPB_SIZE(80, 57), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE bool envoy_admin_v3_CommandLineOptions_skip_hot_restart_on_no_parent(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
bool default_val = false;
|
||||
bool ret;
|
||||
const upb_MiniTableField field = {39, UPB_SIZE(80, 57), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_clear_skip_hot_restart_parent_stats(envoy_admin_v3_CommandLineOptions* msg) {
|
||||
const upb_MiniTableField field = {40, UPB_SIZE(81, 58), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
|
||||
}
|
||||
UPB_INLINE bool envoy_admin_v3_CommandLineOptions_skip_hot_restart_parent_stats(const envoy_admin_v3_CommandLineOptions* msg) {
|
||||
bool default_val = false;
|
||||
bool ret;
|
||||
const upb_MiniTableField field = {40, UPB_SIZE(81, 58), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_GetNonExtensionField(UPB_UPCAST(msg), &field,
|
||||
&default_val, &ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_base_id(envoy_admin_v3_CommandLineOptions *msg, uint64_t value) {
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(80, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {1, UPB_SIZE(88, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_concurrency(envoy_admin_v3_CommandLineOptions *msg, uint32_t value) {
|
||||
const upb_MiniTableField field = {2, 12, 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_config_path(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(88, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {3, UPB_SIZE(96, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_config_yaml(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(96, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {4, UPB_SIZE(104, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_allow_unknown_static_fields(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {5, 16, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_admin_address_path(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {6, 104, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {6, UPB_SIZE(112, 104), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_local_address_ip_version(envoy_admin_v3_CommandLineOptions *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {7, 20, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_log_level(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {8, UPB_SIZE(112, 120), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {8, 120, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_component_log_level(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(120, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {9, UPB_SIZE(128, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_log_format(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(128, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {10, UPB_SIZE(136, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_log_path(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(136, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {11, UPB_SIZE(144, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_service_cluster(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(144, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {13, UPB_SIZE(152, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_service_node(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(152, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {14, UPB_SIZE(160, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_service_zone(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(160, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {15, UPB_SIZE(168, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_file_flush_interval(envoy_admin_v3_CommandLineOptions *msg, struct google_protobuf_Duration* value) {
|
||||
const upb_MiniTableField field = {16, UPB_SIZE(24, 232), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mutable_file_flush_interval(envoy_admin_v3_CommandLineOptions* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_admin_v3_CommandLineOptions_file_flush_interval(msg);
|
||||
|
@ -819,7 +843,7 @@ UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mu
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_drain_time(envoy_admin_v3_CommandLineOptions *msg, struct google_protobuf_Duration* value) {
|
||||
const upb_MiniTableField field = {17, UPB_SIZE(28, 240), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mutable_drain_time(envoy_admin_v3_CommandLineOptions* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_admin_v3_CommandLineOptions_drain_time(msg);
|
||||
|
@ -831,7 +855,7 @@ UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mu
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_parent_shutdown_time(envoy_admin_v3_CommandLineOptions *msg, struct google_protobuf_Duration* value) {
|
||||
const upb_MiniTableField field = {18, UPB_SIZE(32, 248), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mutable_parent_shutdown_time(envoy_admin_v3_CommandLineOptions* msg, upb_Arena* arena) {
|
||||
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_admin_v3_CommandLineOptions_parent_shutdown_time(msg);
|
||||
|
@ -843,31 +867,31 @@ UPB_INLINE struct google_protobuf_Duration* envoy_admin_v3_CommandLineOptions_mu
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_mode(envoy_admin_v3_CommandLineOptions *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {19, UPB_SIZE(36, 24), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_disable_hot_restart(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {22, UPB_SIZE(40, 28), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_enable_mutex_tracing(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {23, UPB_SIZE(41, 29), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_restart_epoch(envoy_admin_v3_CommandLineOptions *msg, uint32_t value) {
|
||||
const upb_MiniTableField field = {24, UPB_SIZE(44, 32), 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_cpuset_threads(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {25, UPB_SIZE(48, 36), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_reject_unknown_dynamic_fields(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {26, UPB_SIZE(49, 37), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_log_format_escaped(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {27, UPB_SIZE(50, 38), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE upb_StringView* envoy_admin_v3_CommandLineOptions_mutable_disabled_extensions(envoy_admin_v3_CommandLineOptions* msg, size_t* size) {
|
||||
upb_MiniTableField field = {28, UPB_SIZE(52, 256), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -899,35 +923,35 @@ UPB_INLINE bool envoy_admin_v3_CommandLineOptions_add_disabled_extensions(envoy_
|
|||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_ignore_unknown_dynamic_fields(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {30, UPB_SIZE(56, 39), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_use_dynamic_base_id(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {31, UPB_SIZE(57, 40), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_base_id_path(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(168, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {32, UPB_SIZE(176, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_drain_strategy(envoy_admin_v3_CommandLineOptions *msg, int32_t value) {
|
||||
const upb_MiniTableField field = {33, UPB_SIZE(60, 44), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_enable_fine_grain_logging(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {34, UPB_SIZE(64, 48), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_socket_path(envoy_admin_v3_CommandLineOptions *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(176, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
const upb_MiniTableField field = {35, UPB_SIZE(184, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_socket_mode(envoy_admin_v3_CommandLineOptions *msg, uint32_t value) {
|
||||
const upb_MiniTableField field = {36, UPB_SIZE(68, 52), 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_enable_core_dump(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {37, UPB_SIZE(72, 56), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE upb_StringView* envoy_admin_v3_CommandLineOptions_mutable_stats_tag(envoy_admin_v3_CommandLineOptions* msg, size_t* size) {
|
||||
upb_MiniTableField field = {38, UPB_SIZE(76, 296), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
|
@ -957,6 +981,14 @@ UPB_INLINE bool envoy_admin_v3_CommandLineOptions_add_stats_tag(envoy_admin_v3_C
|
|||
(arr, arr->UPB_PRIVATE(size) - 1, &val, sizeof(val));
|
||||
return true;
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_skip_hot_restart_on_no_parent(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {39, UPB_SIZE(80, 57), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_CommandLineOptions_set_skip_hot_restart_parent_stats(envoy_admin_v3_CommandLineOptions *msg, bool value) {
|
||||
const upb_MiniTableField field = {40, UPB_SIZE(81, 58), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
|
|
|
@ -37,6 +37,9 @@ const upb_MiniTable envoy__admin__v3__ServerInfo_msg_init = {
|
|||
&envoy_admin_v3_ServerInfo_submsgs[0],
|
||||
&envoy_admin_v3_ServerInfo__fields[0],
|
||||
UPB_SIZE(48, 80), 7, kUpb_ExtMode_NonExtendable, 7, UPB_FASTTABLE_MASK(56), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.ServerInfo",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
@ -55,21 +58,21 @@ static const upb_MiniTableSub envoy_admin_v3_CommandLineOptions_submsgs[3] = {
|
|||
{.UPB_PRIVATE(submsg) = &google__protobuf__Duration_msg_init},
|
||||
};
|
||||
|
||||
static const upb_MiniTableField envoy_admin_v3_CommandLineOptions__fields[34] = {
|
||||
{1, UPB_SIZE(80, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)},
|
||||
static const upb_MiniTableField envoy_admin_v3_CommandLineOptions__fields[36] = {
|
||||
{1, UPB_SIZE(88, 64), 0, kUpb_NoSub, 4, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_8Byte << kUpb_FieldRep_Shift)},
|
||||
{2, 12, 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
||||
{3, UPB_SIZE(88, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{4, UPB_SIZE(96, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{3, UPB_SIZE(96, 72), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{4, UPB_SIZE(104, 88), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{5, 16, 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{6, 104, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{6, UPB_SIZE(112, 104), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{7, 20, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
||||
{8, UPB_SIZE(112, 120), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{9, UPB_SIZE(120, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{10, UPB_SIZE(128, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{11, UPB_SIZE(136, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{13, UPB_SIZE(144, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{14, UPB_SIZE(152, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{15, UPB_SIZE(160, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{8, 120, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{9, UPB_SIZE(128, 136), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{10, UPB_SIZE(136, 152), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{11, UPB_SIZE(144, 168), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{13, UPB_SIZE(152, 184), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{14, UPB_SIZE(160, 200), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{15, UPB_SIZE(168, 216), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{16, UPB_SIZE(24, 232), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
||||
{17, UPB_SIZE(28, 240), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
||||
{18, UPB_SIZE(32, 248), 66, 2, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
||||
|
@ -83,19 +86,24 @@ static const upb_MiniTableField envoy_admin_v3_CommandLineOptions__fields[34] =
|
|||
{28, UPB_SIZE(52, 256), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
||||
{30, UPB_SIZE(56, 39), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{31, UPB_SIZE(57, 40), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{32, UPB_SIZE(168, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{32, UPB_SIZE(176, 264), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{33, UPB_SIZE(60, 44), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
||||
{34, UPB_SIZE(64, 48), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{35, UPB_SIZE(176, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{35, UPB_SIZE(184, 280), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
||||
{36, UPB_SIZE(68, 52), 0, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
||||
{37, UPB_SIZE(72, 56), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{38, UPB_SIZE(76, 296), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
||||
{39, UPB_SIZE(80, 57), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
{40, UPB_SIZE(81, 58), 0, kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)},
|
||||
};
|
||||
|
||||
const upb_MiniTable envoy__admin__v3__CommandLineOptions_msg_init = {
|
||||
&envoy_admin_v3_CommandLineOptions_submsgs[0],
|
||||
&envoy_admin_v3_CommandLineOptions__fields[0],
|
||||
UPB_SIZE(184, 304), 34, kUpb_ExtMode_NonExtendable, 11, UPB_FASTTABLE_MASK(248), 0,
|
||||
UPB_SIZE(192, 304), 36, kUpb_ExtMode_NonExtendable, 11, UPB_FASTTABLE_MASK(248), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.CommandLineOptions",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x004000003f000008, &upb_psv8_1bt},
|
||||
|
|
|
@ -96,11 +96,11 @@ UPB_INLINE bool envoy_admin_v3_TapRequest_has_tap_config(const envoy_admin_v3_Ta
|
|||
|
||||
UPB_INLINE void envoy_admin_v3_TapRequest_set_config_id(envoy_admin_v3_TapRequest *msg, upb_StringView value) {
|
||||
const upb_MiniTableField field = {1, 16, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE void envoy_admin_v3_TapRequest_set_tap_config(envoy_admin_v3_TapRequest *msg, struct envoy_config_tap_v3_TapConfig* value) {
|
||||
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
|
||||
_upb_Message_SetNonExtensionField((upb_Message *)msg, &field, &value);
|
||||
upb_Message_SetBaseField((upb_Message *)msg, &field, &value);
|
||||
}
|
||||
UPB_INLINE struct envoy_config_tap_v3_TapConfig* envoy_admin_v3_TapRequest_mutable_tap_config(envoy_admin_v3_TapRequest* msg, upb_Arena* arena) {
|
||||
struct envoy_config_tap_v3_TapConfig* sub = (struct envoy_config_tap_v3_TapConfig*)envoy_admin_v3_TapRequest_tap_config(msg);
|
||||
|
|
|
@ -29,6 +29,9 @@ const upb_MiniTable envoy__admin__v3__TapRequest_msg_init = {
|
|||
&envoy_admin_v3_TapRequest_submsgs[0],
|
||||
&envoy_admin_v3_TapRequest__fields[0],
|
||||
UPB_SIZE(24, 40), 2, kUpb_ExtMode_NonExtendable, 2, UPB_FASTTABLE_MASK(8), 0,
|
||||
#ifdef UPB_TRACING_ENABLED
|
||||
"envoy.admin.v3.TapRequest",
|
||||
#endif
|
||||
UPB_FASTTABLE_INIT({
|
||||
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
||||
{0x001000003f00000a, &upb_pss_1bt},
|
||||
|
|
|
@ -46,7 +46,7 @@ UPB_INLINE void envoy_annotations_set_disallowed_by_default(struct google_protob
|
|||
UPB_ASSUME(upb_MiniTableField_IsScalar(&ext->UPB_PRIVATE(field)));
|
||||
UPB_ASSUME(UPB_PRIVATE(_upb_MiniTableField_GetRep)(
|
||||
&ext->UPB_PRIVATE(field)) == kUpb_FieldRep_1Byte);
|
||||
bool ok = _upb_Message_SetExtensionField((upb_Message*)msg, ext, &val, arena);
|
||||
bool ok = upb_Message_SetExtension((upb_Message*)msg, ext, &val, arena);
|
||||
UPB_ASSERT(ok);
|
||||
}
|
||||
UPB_INLINE bool envoy_annotations_has_deprecated_at_minor_version(const struct google_protobuf_FieldOptions* msg) {
|
||||
|
@ -70,7 +70,7 @@ UPB_INLINE void envoy_annotations_set_deprecated_at_minor_version(struct google_
|
|||
UPB_ASSUME(upb_MiniTableField_IsScalar(&ext->UPB_PRIVATE(field)));
|
||||
UPB_ASSUME(UPB_PRIVATE(_upb_MiniTableField_GetRep)(
|
||||
&ext->UPB_PRIVATE(field)) == kUpb_FieldRep_StringView);
|
||||
bool ok = _upb_Message_SetExtensionField((upb_Message*)msg, ext, &val, arena);
|
||||
bool ok = upb_Message_SetExtension((upb_Message*)msg, ext, &val, arena);
|
||||
UPB_ASSERT(ok);
|
||||
}
|
||||
UPB_INLINE bool envoy_annotations_has_disallowed_by_default_enum(const struct google_protobuf_EnumValueOptions* msg) {
|
||||
|
@ -94,7 +94,7 @@ UPB_INLINE void envoy_annotations_set_disallowed_by_default_enum(struct google_p
|
|||
UPB_ASSUME(upb_MiniTableField_IsScalar(&ext->UPB_PRIVATE(field)));
|
||||
UPB_ASSUME(UPB_PRIVATE(_upb_MiniTableField_GetRep)(
|
||||
&ext->UPB_PRIVATE(field)) == kUpb_FieldRep_1Byte);
|
||||
bool ok = _upb_Message_SetExtensionField((upb_Message*)msg, ext, &val, arena);
|
||||
bool ok = upb_Message_SetExtension((upb_Message*)msg, ext, &val, arena);
|
||||
UPB_ASSERT(ok);
|
||||
}
|
||||
UPB_INLINE bool envoy_annotations_has_deprecated_at_minor_version_enum(const struct google_protobuf_EnumValueOptions* msg) {
|
||||
|
@ -118,7 +118,7 @@ UPB_INLINE void envoy_annotations_set_deprecated_at_minor_version_enum(struct go
|
|||
UPB_ASSUME(upb_MiniTableField_IsScalar(&ext->UPB_PRIVATE(field)));
|
||||
UPB_ASSUME(UPB_PRIVATE(_upb_MiniTableField_GetRep)(
|
||||
&ext->UPB_PRIVATE(field)) == kUpb_FieldRep_StringView);
|
||||
bool ok = _upb_Message_SetExtensionField((upb_Message*)msg, ext, &val, arena);
|
||||
bool ok = upb_Message_SetExtension((upb_Message*)msg, ext, &val, arena);
|
||||
UPB_ASSERT(ok);
|
||||
}
|
||||
#ifdef __cplusplus
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue