Compare commits

..

12 Commits

Author SHA1 Message Date
Hannah Shi 52d0c6c326
Merge release 1.73.x to main (#336)
* remove cronet files

* remove cronet files

* Sync c-core 1.72.0-pre1

* use c++17 for swift package

* update ios deployment target to 15.0

* Sync c-core 1.72.0

* Sync c-core 1.73.0-pre1

* include additional files

* Re-sync c-core 1.73.0-pre1

* exclude inc files from swift package

* upgrade abseil swift to 0.20250127.1 (#330)

* Sync c-core 1.73.0-pre2

* Sync c-core 1.73.0

* Sync c-core 1.73.1
2025-07-12 16:19:13 -07:00
Hannah Shi d88573fd5f
Merge from 1.73.x (#329)
* remove cronet files

* remove cronet files

* Sync c-core 1.72.0-pre1

* use c++17 for swift package

* update ios deployment target to 15.0

* Sync c-core 1.72.0

* Sync c-core 1.73.0-pre1

* include additional files

* Re-sync c-core 1.73.0-pre1

* exclude inc files from swift package
2025-05-24 11:39:33 -07:00
Hannah Shi e584a88115 Re-sync c-core 1.73.0-pre1 2025-05-24 09:40:48 -07:00
Hannah Shi 96ddc18603 include additional files 2025-05-24 09:40:48 -07:00
Hannah Shi fcb8b99697 Sync c-core 1.73.0-pre1 2025-05-24 09:40:48 -07:00
Hannah Shi 9849f527f0 Sync c-core 1.72.0 2025-05-24 09:40:48 -07:00
Hannah Shi 30bd3c9d49 Sync c-core 1.72.0-pre1 2025-05-24 09:40:48 -07:00
Hannah Shi 78593d170e update ios deployment target to 15.0 2025-04-24 18:00:28 -07:00
Hannah Shi c92e41c10b use c++17 for swift package 2025-04-23 13:32:00 -07:00
Hannah Shi b641ac3789 remove cronet files 2025-04-09 15:51:16 -07:00
Hannah Shi 5c9cda2cf6 remove cronet files 2025-04-09 15:51:16 -07:00
Hannah Shi 6797fe4c02 update github workflow to use macos-14 2025-04-09 14:34:57 -07:00
675 changed files with 39184 additions and 19860 deletions

View File

@ -23,7 +23,7 @@ env:
jobs:
release-cocoapod:
runs-on: macos-12
runs-on: macos-14
steps:
- name: Repo checkout
uses: actions/checkout@v3

View File

@ -24,7 +24,7 @@ let package = Package(
],
dependencies: [
.package(url: "https://github.com/firebase/abseil-cpp-SwiftPM.git", "0.20240722.0"..<"0.20240723.0"),
.package(url: "https://github.com/firebase/abseil-cpp-SwiftPM.git", "0.20250127.0"..<"0.20250128.0"),
.package(url: "https://github.com/firebase/boringssl-SwiftPM.git", "0.32.0"..<"0.33.0"),
],
@ -41,6 +41,8 @@ let package = Package(
"src/cpp/",
"third_party/upb/upb/port/def.inc",
"third_party/upb/upb/port/undef.inc",
"third_party/utf8_range/utf8_range_sse.inc",
"third_party/utf8_range/utf8_range_neon.inc",
"third_party/re2/LICENSE",
"third_party/utf8_range/LICENSE",
"third_party/xxhash/LICENSE",
@ -82,6 +84,8 @@ let package = Package(
path: basePath,
exclude: [
"tests",
"include/grpcpp/ports_undef.inc",
"include/grpcpp/ports_def.inc",
],
sources: [
"src/cpp/",

View File

@ -22,7 +22,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-C++'
# TODO (mxyan): use version that match gRPC version when pod is stabilized
version = '1.72.1'
version = '1.73.1'
s.version = version
s.summary = 'gRPC C++ library'
s.homepage = 'https://grpc.io'
@ -232,7 +232,7 @@ Pod::Spec.new do |s|
ss.dependency "#{s.name}/Privacy", version
ss.dependency "#{s.name}/Interface", version
ss.dependency 'gRPC-Core', version
abseil_version = '~> 1.20240722.0'
abseil_version = '~> 1.20250127.1'
ss.dependency 'abseil/algorithm/container', abseil_version
ss.dependency 'abseil/base/base', abseil_version
ss.dependency 'abseil/base/config', abseil_version
@ -240,6 +240,7 @@ Pod::Spec.new do |s|
ss.dependency 'abseil/base/log_severity', abseil_version
ss.dependency 'abseil/base/no_destructor', abseil_version
ss.dependency 'abseil/cleanup/cleanup', abseil_version
ss.dependency 'abseil/container/btree', abseil_version
ss.dependency 'abseil/container/flat_hash_map', abseil_version
ss.dependency 'abseil/container/flat_hash_set', abseil_version
ss.dependency 'abseil/container/inlined_vector', abseil_version
@ -294,6 +295,7 @@ Pod::Spec.new do |s|
'src/core/channelz/channel_trace.h',
'src/core/channelz/channelz.h',
'src/core/channelz/channelz_registry.h',
'src/core/channelz/ztrace_collector.h',
'src/core/client_channel/backup_poller.h',
'src/core/client_channel/client_channel.h',
'src/core/client_channel/client_channel_args.h',
@ -393,7 +395,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/context_list_entry.h',
'src/core/ext/transport/chttp2/transport/decode_huff.h',
'src/core/ext/transport/chttp2/transport/flow_control.h',
'src/core/ext/transport/chttp2/transport/frame.h',
@ -412,8 +413,10 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http2_status.h',
'src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
'src/core/ext/transport/chttp2/transport/ping_abuse_policy.h',
'src/core/ext/transport/chttp2/transport/ping_callbacks.h',
@ -951,7 +954,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine_context.h',
'src/core/lib/event_engine/extensions/blocking_dns.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/channelz.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/iomgr_compatible.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
@ -1029,7 +1034,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/event_engine_shims/endpoint.h',
'src/core/lib/iomgr/event_engine_shims/tcp_client.h',
'src/core/lib/iomgr/exec_ctx.h',
'src/core/lib/iomgr/executor.h',
'src/core/lib/iomgr/internal_errqueue.h',
'src/core/lib/iomgr/iocp_windows.h',
'src/core/lib/iomgr/iomgr.h',
@ -1205,12 +1209,16 @@ Pod::Spec.new do |s|
'src/core/service_config/service_config_impl.h',
'src/core/service_config/service_config_parser.h',
'src/core/telemetry/call_tracer.h',
'src/core/telemetry/context_list_entry.h',
'src/core/telemetry/default_tcp_tracer.h',
'src/core/telemetry/histogram_view.h',
'src/core/telemetry/metrics.h',
'src/core/telemetry/stats.h',
'src/core/telemetry/stats_data.h',
'src/core/telemetry/tcp_tracer.h',
'src/core/transport/auth_context.h',
'src/core/transport/endpoint_transport.h',
'src/core/transport/endpoint_transport_client_channel_factory.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',
'src/core/tsi/alts/frame_protector/alts_crypter.h',
@ -1259,6 +1267,7 @@ Pod::Spec.new do |s|
'src/core/util/event_log.h',
'src/core/util/examine_stack.h',
'src/core/util/fork.h',
'src/core/util/function_signature.h',
'src/core/util/gcp_metadata_query.h',
'src/core/util/gethostname.h',
'src/core/util/glob.h',
@ -1295,6 +1304,7 @@ Pod::Spec.new do |s|
'src/core/util/ref_counted_ptr.h',
'src/core/util/ref_counted_string.h',
'src/core/util/ring_buffer.h',
'src/core/util/shared_bit_gen.h',
'src/core/util/single_set_ptr.h',
'src/core/util/sorted_pack.h',
'src/core/util/spinlock.h',
@ -1603,6 +1613,7 @@ Pod::Spec.new do |s|
'src/core/channelz/channel_trace.h',
'src/core/channelz/channelz.h',
'src/core/channelz/channelz_registry.h',
'src/core/channelz/ztrace_collector.h',
'src/core/client_channel/backup_poller.h',
'src/core/client_channel/client_channel.h',
'src/core/client_channel/client_channel_args.h',
@ -1702,7 +1713,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/context_list_entry.h',
'src/core/ext/transport/chttp2/transport/decode_huff.h',
'src/core/ext/transport/chttp2/transport/flow_control.h',
'src/core/ext/transport/chttp2/transport/frame.h',
@ -1721,8 +1731,10 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http2_status.h',
'src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
'src/core/ext/transport/chttp2/transport/ping_abuse_policy.h',
'src/core/ext/transport/chttp2/transport/ping_callbacks.h',
@ -2260,7 +2272,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine_context.h',
'src/core/lib/event_engine/extensions/blocking_dns.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/channelz.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/iomgr_compatible.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
@ -2338,7 +2352,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/event_engine_shims/endpoint.h',
'src/core/lib/iomgr/event_engine_shims/tcp_client.h',
'src/core/lib/iomgr/exec_ctx.h',
'src/core/lib/iomgr/executor.h',
'src/core/lib/iomgr/internal_errqueue.h',
'src/core/lib/iomgr/iocp_windows.h',
'src/core/lib/iomgr/iomgr.h',
@ -2514,12 +2527,16 @@ Pod::Spec.new do |s|
'src/core/service_config/service_config_impl.h',
'src/core/service_config/service_config_parser.h',
'src/core/telemetry/call_tracer.h',
'src/core/telemetry/context_list_entry.h',
'src/core/telemetry/default_tcp_tracer.h',
'src/core/telemetry/histogram_view.h',
'src/core/telemetry/metrics.h',
'src/core/telemetry/stats.h',
'src/core/telemetry/stats_data.h',
'src/core/telemetry/tcp_tracer.h',
'src/core/transport/auth_context.h',
'src/core/transport/endpoint_transport.h',
'src/core/transport/endpoint_transport_client_channel_factory.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',
'src/core/tsi/alts/frame_protector/alts_crypter.h',
@ -2568,6 +2585,7 @@ Pod::Spec.new do |s|
'src/core/util/event_log.h',
'src/core/util/examine_stack.h',
'src/core/util/fork.h',
'src/core/util/function_signature.h',
'src/core/util/gcp_metadata_query.h',
'src/core/util/gethostname.h',
'src/core/util/glob.h',
@ -2604,6 +2622,7 @@ Pod::Spec.new do |s|
'src/core/util/ref_counted_ptr.h',
'src/core/util/ref_counted_string.h',
'src/core/util/ring_buffer.h',
'src/core/util/shared_bit_gen.h',
'src/core/util/single_set_ptr.h',
'src/core/util/sorted_pack.h',
'src/core/util/spinlock.h',

View File

@ -21,7 +21,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-Core'
version = '1.72.1'
version = '1.73.1'
s.version = version
s.summary = 'Core cross-platform gRPC library, written in C'
s.homepage = 'https://grpc.io'
@ -47,7 +47,7 @@ Pod::Spec.new do |s|
s.requires_arc = false
name = 'grpc'
abseil_version = '~> 1.20240722.0'
abseil_version = '~> 1.20250127.1'
# When creating a dynamic framework, name it grpc.framework instead of gRPC-Core.framework.
# This lets users write their includes like `#include <grpc/grpc.h>` as opposed to `#include
@ -127,6 +127,7 @@ Pod::Spec.new do |s|
'include/grpc/event_engine/extensible.h',
'include/grpc/event_engine/internal/memory_allocator_impl.h',
'include/grpc/event_engine/internal/slice_cast.h',
'include/grpc/event_engine/internal/write_event.h',
'include/grpc/event_engine/memory_allocator.h',
'include/grpc/event_engine/memory_request.h',
'include/grpc/event_engine/port.h',
@ -200,7 +201,7 @@ Pod::Spec.new do |s|
ss.libraries = 'z'
ss.dependency "#{s.name}/Interface", version
ss.dependency "#{s.name}/Privacy", version
ss.dependency 'BoringSSL-GRPC', '0.0.40'
ss.dependency 'BoringSSL-GRPC', '0.0.41'
ss.dependency 'abseil/algorithm/container', abseil_version
ss.dependency 'abseil/base/base', abseil_version
ss.dependency 'abseil/base/config', abseil_version
@ -208,6 +209,7 @@ Pod::Spec.new do |s|
ss.dependency 'abseil/base/log_severity', abseil_version
ss.dependency 'abseil/base/no_destructor', abseil_version
ss.dependency 'abseil/cleanup/cleanup', abseil_version
ss.dependency 'abseil/container/btree', abseil_version
ss.dependency 'abseil/container/flat_hash_map', abseil_version
ss.dependency 'abseil/container/flat_hash_set', abseil_version
ss.dependency 'abseil/container/inlined_vector', abseil_version
@ -279,6 +281,7 @@ Pod::Spec.new do |s|
'src/core/channelz/channelz.h',
'src/core/channelz/channelz_registry.cc',
'src/core/channelz/channelz_registry.h',
'src/core/channelz/ztrace_collector.h',
'src/core/client_channel/backup_poller.cc',
'src/core/client_channel/backup_poller.h',
'src/core/client_channel/client_channel.cc',
@ -469,6 +472,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/stateful_session/stateful_session_service_config_parser.h',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/alpn/alpn.h',
'src/core/ext/transport/chttp2/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.h',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
@ -481,7 +485,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/context_list_entry.h',
'src/core/ext/transport/chttp2/transport/decode_huff.cc',
'src/core/ext/transport/chttp2/transport/decode_huff.h',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
@ -516,9 +519,11 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http2_status.h',
'src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc',
@ -1369,6 +1374,7 @@ Pod::Spec.new do |s|
'src/core/handshaker/proxy_mapper.h',
'src/core/handshaker/proxy_mapper_registry.cc',
'src/core/handshaker/proxy_mapper_registry.h',
'src/core/handshaker/security/legacy_secure_endpoint.cc',
'src/core/handshaker/security/secure_endpoint.cc',
'src/core/handshaker/security/secure_endpoint.h',
'src/core/handshaker/security/security_handshaker.cc',
@ -1422,7 +1428,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/event_engine_context.h',
'src/core/lib/event_engine/extensions/blocking_dns.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/channelz.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/iomgr_compatible.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
@ -1562,8 +1570,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/event_engine_shims/tcp_client.h',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/exec_ctx.h',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor.h',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
@ -1879,6 +1885,7 @@ Pod::Spec.new do |s|
'src/core/resolver/xds/xds_dependency_manager.h',
'src/core/resolver/xds/xds_resolver.cc',
'src/core/resolver/xds/xds_resolver_attributes.h',
'src/core/server/add_port.cc',
'src/core/server/server.cc',
'src/core/server/server.h',
'src/core/server/server_call_tracer_filter.cc',
@ -1899,6 +1906,9 @@ Pod::Spec.new do |s|
'src/core/service_config/service_config_parser.h',
'src/core/telemetry/call_tracer.cc',
'src/core/telemetry/call_tracer.h',
'src/core/telemetry/context_list_entry.h',
'src/core/telemetry/default_tcp_tracer.cc',
'src/core/telemetry/default_tcp_tracer.h',
'src/core/telemetry/histogram_view.cc',
'src/core/telemetry/histogram_view.h',
'src/core/telemetry/metrics.cc',
@ -1907,9 +1917,13 @@ Pod::Spec.new do |s|
'src/core/telemetry/stats.h',
'src/core/telemetry/stats_data.cc',
'src/core/telemetry/stats_data.h',
'src/core/telemetry/tcp_tracer.cc',
'src/core/telemetry/tcp_tracer.h',
'src/core/transport/auth_context.cc',
'src/core/transport/auth_context.h',
'src/core/transport/endpoint_transport.h',
'src/core/transport/endpoint_transport_client_channel_factory.cc',
'src/core/transport/endpoint_transport_client_channel_factory.h',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/crypt/gsec.h',
@ -1994,6 +2008,7 @@ Pod::Spec.new do |s|
'src/core/util/examine_stack.h',
'src/core/util/fork.cc',
'src/core/util/fork.h',
'src/core/util/function_signature.h',
'src/core/util/gcp_metadata_query.cc',
'src/core/util/gcp_metadata_query.h',
'src/core/util/gethostname.h',
@ -2068,6 +2083,8 @@ Pod::Spec.new do |s|
'src/core/util/ref_counted_string.cc',
'src/core/util/ref_counted_string.h',
'src/core/util/ring_buffer.h',
'src/core/util/shared_bit_gen.cc',
'src/core/util/shared_bit_gen.h',
'src/core/util/single_set_ptr.h',
'src/core/util/sorted_pack.h',
'src/core/util/spinlock.h',
@ -2461,6 +2478,7 @@ Pod::Spec.new do |s|
'src/core/channelz/channel_trace.h',
'src/core/channelz/channelz.h',
'src/core/channelz/channelz_registry.h',
'src/core/channelz/ztrace_collector.h',
'src/core/client_channel/backup_poller.h',
'src/core/client_channel/client_channel.h',
'src/core/client_channel/client_channel_args.h',
@ -2560,7 +2578,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/bin_encoder.h',
'src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h',
'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
'src/core/ext/transport/chttp2/transport/context_list_entry.h',
'src/core/ext/transport/chttp2/transport/decode_huff.h',
'src/core/ext/transport/chttp2/transport/flow_control.h',
'src/core/ext/transport/chttp2/transport/frame.h',
@ -2579,8 +2596,10 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http2_status.h',
'src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
'src/core/ext/transport/chttp2/transport/ping_abuse_policy.h',
'src/core/ext/transport/chttp2/transport/ping_callbacks.h',
@ -3118,7 +3137,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine_context.h',
'src/core/lib/event_engine/extensions/blocking_dns.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/channelz.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/iomgr_compatible.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
@ -3196,7 +3217,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/event_engine_shims/endpoint.h',
'src/core/lib/iomgr/event_engine_shims/tcp_client.h',
'src/core/lib/iomgr/exec_ctx.h',
'src/core/lib/iomgr/executor.h',
'src/core/lib/iomgr/internal_errqueue.h',
'src/core/lib/iomgr/iocp_windows.h',
'src/core/lib/iomgr/iomgr.h',
@ -3372,12 +3392,16 @@ Pod::Spec.new do |s|
'src/core/service_config/service_config_impl.h',
'src/core/service_config/service_config_parser.h',
'src/core/telemetry/call_tracer.h',
'src/core/telemetry/context_list_entry.h',
'src/core/telemetry/default_tcp_tracer.h',
'src/core/telemetry/histogram_view.h',
'src/core/telemetry/metrics.h',
'src/core/telemetry/stats.h',
'src/core/telemetry/stats_data.h',
'src/core/telemetry/tcp_tracer.h',
'src/core/transport/auth_context.h',
'src/core/transport/endpoint_transport.h',
'src/core/transport/endpoint_transport_client_channel_factory.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',
'src/core/tsi/alts/frame_protector/alts_crypter.h',
@ -3426,6 +3450,7 @@ Pod::Spec.new do |s|
'src/core/util/event_log.h',
'src/core/util/examine_stack.h',
'src/core/util/fork.h',
'src/core/util/function_signature.h',
'src/core/util/gcp_metadata_query.h',
'src/core/util/gethostname.h',
'src/core/util/glob.h',
@ -3462,6 +3487,7 @@ Pod::Spec.new do |s|
'src/core/util/ref_counted_ptr.h',
'src/core/util/ref_counted_string.h',
'src/core/util/ring_buffer.h',
'src/core/util/shared_bit_gen.h',
'src/core/util/single_set_ptr.h',
'src/core/util/sorted_pack.h',
'src/core/util/spinlock.h',

View File

@ -21,7 +21,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-ProtoRPC'
version = '1.72.1'
version = '1.73.1'
s.version = version
s.summary = 'RPC library for Protocol Buffers, based on gRPC'
s.homepage = 'https://grpc.io'

View File

@ -21,7 +21,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC-RxLibrary'
version = '1.72.1'
version = '1.73.1'
s.version = version
s.summary = 'Reactive Extensions library for iOS/OSX.'
s.homepage = 'https://grpc.io'

View File

@ -20,7 +20,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC'
version = '1.72.1'
version = '1.73.1'
s.version = version
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'https://grpc.io'

View File

@ -16,11 +16,14 @@
#include <grpc/event_engine/endpoint_config.h>
#include <grpc/event_engine/extensible.h>
#include <grpc/event_engine/internal/write_event.h>
#include <grpc/event_engine/memory_allocator.h>
#include <grpc/event_engine/port.h>
#include <grpc/event_engine/slice_buffer.h>
#include <grpc/support/port_platform.h>
#include <bitset>
#include <initializer_list>
#include <vector>
#include "absl/functional/any_invocable.h"
@ -180,13 +183,26 @@ class EventEngine : public std::enable_shared_from_this<EventEngine>,
/// EventEngine Endpoint Read API call.
///
/// Passed as argument to an Endpoint \a Read
struct ReadArgs {
class ReadArgs final {
public:
ReadArgs() = default;
ReadArgs(const ReadArgs&) = delete;
ReadArgs& operator=(const ReadArgs&) = delete;
ReadArgs(ReadArgs&&) = default;
ReadArgs& operator=(ReadArgs&&) = default;
// A suggestion to the endpoint implementation to read at-least the
// specified number of bytes over the network connection before marking
// the endpoint read operation as complete. gRPC may use this argument
// to minimize the number of endpoint read API calls over the lifetime
// of a connection.
int64_t read_hint_bytes;
void set_read_hint_bytes(int64_t read_hint_bytes) {
read_hint_bytes_ = read_hint_bytes;
}
int64_t read_hint_bytes() const { return read_hint_bytes_; }
private:
int64_t read_hint_bytes_ = 1;
};
/// Reads data from the Endpoint.
///
@ -212,20 +228,110 @@ class EventEngine : public std::enable_shared_from_this<EventEngine>,
/// statuses to \a on_read. For example, callbacks might expect to receive
/// CANCELLED on endpoint shutdown.
virtual bool Read(absl::AnyInvocable<void(absl::Status)> on_read,
SliceBuffer* buffer, const ReadArgs* args) = 0;
SliceBuffer* buffer, ReadArgs args) = 0;
//// The set of write events that can be reported by an Endpoint.
using WriteEvent = ::grpc_event_engine::experimental::internal::WriteEvent;
/// An output WriteMetric consists of a key and a value.
/// The space of keys can be queried from the endpoint via the
/// \a AllWriteMetrics, \a GetMetricName and \a GetMetricKey APIs.
/// The value is an int64_t that is implementation-defined. Check with the
/// endpoint implementation documentation for the semantics of each metric.
struct WriteMetric {
size_t key;
int64_t value;
};
using WriteEventCallback = absl::AnyInvocable<void(
WriteEvent, absl::Time, std::vector<WriteMetric>) const>;
// A bitmask of the events that the caller is interested in.
// Each bit corresponds to an entry in WriteEvent.
using WriteEventSet = std::bitset<static_cast<int>(WriteEvent::kCount)>;
// A sink to receive write events.
// The requested metrics are the keys of the metrics that the caller is
// interested in. The on_event callback will be called on each event
// requested.
class WriteEventSink final {
public:
WriteEventSink(absl::Span<const size_t> requested_metrics,
std::initializer_list<WriteEvent> requested_events,
WriteEventCallback on_event)
: requested_metrics_(requested_metrics),
on_event_(std::move(on_event)) {
for (auto event : requested_events) {
requested_events_mask_.set(static_cast<int>(event));
}
}
absl::Span<const size_t> requested_metrics() const {
return requested_metrics_;
}
bool requested_event(WriteEvent event) const {
return requested_events_mask_.test(static_cast<int>(event));
}
WriteEventSet requested_events_mask() const {
return requested_events_mask_;
}
WriteEventCallback TakeEventCallback() { return std::move(on_event_); }
private:
absl::Span<const size_t> requested_metrics_;
WriteEventSet requested_events_mask_;
// The callback to be called on each event.
WriteEventCallback on_event_;
};
/// A struct representing optional arguments that may be provided to an
/// EventEngine Endpoint Write API call.
///
/// Passed as argument to an Endpoint \a Write
struct WriteArgs {
class WriteArgs final {
public:
WriteArgs() = default;
WriteArgs(const WriteArgs&) = delete;
WriteArgs& operator=(const WriteArgs&) = delete;
WriteArgs(WriteArgs&&) = default;
WriteArgs& operator=(WriteArgs&&) = default;
// A sink to receive write events.
std::optional<WriteEventSink> TakeMetricsSink() {
auto sink = std::move(metrics_sink_);
metrics_sink_.reset();
return sink;
}
bool has_metrics_sink() const { return metrics_sink_.has_value(); }
void set_metrics_sink(WriteEventSink sink) {
metrics_sink_ = std::move(sink);
}
// Represents private information that may be passed by gRPC for
// select endpoints expected to be used only within google.
void* google_specific = nullptr;
// TODO(ctiller): Remove this method once all callers are migrated to
// metrics sink.
void* GetDeprecatedAndDiscouragedGoogleSpecificPointer() {
return google_specific_;
}
void SetDeprecatedAndDiscouragedGoogleSpecificPointer(void* pointer) {
google_specific_ = pointer;
}
// A suggestion to the endpoint implementation to group data to be written
// into frames of the specified max_frame_size. gRPC may use this
// argument to dynamically control the max sizes of frames sent to a
// receiver in response to high receiver memory pressure.
int64_t max_frame_size;
int64_t max_frame_size() const { return max_frame_size_; }
void set_max_frame_size(int64_t max_frame_size) {
max_frame_size_ = max_frame_size;
}
private:
std::optional<WriteEventSink> metrics_sink_;
void* google_specific_ = nullptr;
int64_t max_frame_size_ = 1024 * 1024;
};
/// Writes data out on the connection.
///
@ -248,11 +354,22 @@ class EventEngine : public std::enable_shared_from_this<EventEngine>,
/// statuses to \a on_writable. For example, callbacks might expect to
/// receive CANCELLED on endpoint shutdown.
virtual bool Write(absl::AnyInvocable<void(absl::Status)> on_writable,
SliceBuffer* data, const WriteArgs* args) = 0;
SliceBuffer* data, WriteArgs args) = 0;
/// Returns an address in the format described in DNSResolver. The returned
/// values are expected to remain valid for the life of the Endpoint.
virtual const ResolvedAddress& GetPeerAddress() const = 0;
virtual const ResolvedAddress& GetLocalAddress() const = 0;
/// Returns the list of write metrics that the endpoint supports.
/// The keys are used to identify the metrics in the GetMetricName and
/// GetMetricKey APIs. The current value of the metric can be queried by
/// adding a WriteEventSink to the WriteArgs of a Write call.
virtual std::vector<size_t> AllWriteMetrics() = 0;
/// Returns the name of the write metric with the given key.
/// If the key is not found, returns std::nullopt.
virtual std::optional<absl::string_view> GetMetricName(size_t key) = 0;
/// Returns the key of the write metric with the given name.
/// If the name is not found, returns std::nullopt.
virtual std::optional<size_t> GetMetricKey(absl::string_view name) = 0;
};
/// Called when a new connection is established.
@ -334,7 +451,7 @@ class EventEngine : public std::enable_shared_from_this<EventEngine>,
/// when the object is destroyed and all pending callbacks will be called
/// shortly. If cancellation races with request completion, implementations
/// may choose to either cancel or satisfy the request.
class DNSResolver {
class DNSResolver : public Extensible {
public:
/// Optional configuration for DNSResolvers.
struct ResolverOptions {

View File

@ -0,0 +1,34 @@
// Copyright 2022 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_EVENT_ENGINE_INTERNAL_WRITE_EVENT_H
#define GRPC_EVENT_ENGINE_INTERNAL_WRITE_EVENT_H
namespace grpc_event_engine::experimental::internal {
// Use of this enum via this name is internal to gRPC.
// API users should get this enumeration via the
// EventEngine::Endpoint::WriteEvent.
enum class WriteEvent {
kSendMsg,
kScheduled,
kSent,
kAcked,
kClosed,
kCount // Must be last.
};
} // namespace grpc_event_engine::experimental::internal
#endif // GRPC_EVENT_ENGINE_INTERNAL_WRITE_EVENT_H

View File

@ -51,11 +51,13 @@
* server will close the connection. Int valued, milliseconds. INT_MAX means
* unlimited. Defaults to INT_MAX. */
#define GRPC_ARG_MAX_CONNECTION_IDLE_MS "grpc.max_connection_idle_ms"
/** Maximum time that a channel may exist. Int valued, milliseconds.
* INT_MAX means unlimited. Defaults to INT_MAX. */
/** Maximum amount of time in milliseconds that a connection may exist before it
* will be gracefully shut down. Refer
* https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md for
* more details. Int valued, defaults to INT_MAX (disabled). */
#define GRPC_ARG_MAX_CONNECTION_AGE_MS "grpc.max_connection_age_ms"
/** Grace period after the channel reaches its max age. Int valued,
milliseconds. INT_MAX means unlimited. Defaults to INT_MAX. */
/** Grace period in milliseconds after connection reaches its max age for
* outstanding RPCs to complete. Int valued, defaults to INT_MAX (disabled). */
#define GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS "grpc.max_connection_age_grace_ms"
/** Timeout after the last RPC finishes on the client channel at which the
* channel goes back into IDLE state. Int valued, milliseconds. INT_MAX means
@ -168,13 +170,19 @@
/** Secondary user agent: goes at the end of the user-agent metadata
sent on each request. A string. */
#define GRPC_ARG_SECONDARY_USER_AGENT_STRING "grpc.secondary_user_agent"
/** The minimum time between subsequent connection attempts, in ms. Defaults to
* 20 seconds. */
/** The minimum time between subsequent connection attempts, in ms. Refer to
* MIN_CONNECT_TIMEOUT from
* https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Defaults
* to 20 seconds. */
#define GRPC_ARG_MIN_RECONNECT_BACKOFF_MS "grpc.min_reconnect_backoff_ms"
/** The maximum time between subsequent connection attempts, in ms. Defaults to
* 120 seconds. */
/** The maximum time between subsequent connection attempts, in ms. Refer to
* MAX_BACKOFF from
* https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Defaults
* to 120 seconds. */
#define GRPC_ARG_MAX_RECONNECT_BACKOFF_MS "grpc.max_reconnect_backoff_ms"
/** The time between the first and second connection attempts, in ms. Defaults
/** The time between the first and second connection attempts, in ms. Refer to
* INITIAL_BACKOFF from
* https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Defaults
* to 1 second. */
#define GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS \
"grpc.initial_reconnect_backoff_ms"

View File

@ -41,8 +41,9 @@ extern "C" {
* Its value is an int from the \a grpc_compression_algorithm enum. */
#define GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \
"grpc.default_compression_algorithm"
/** Default compression level for the channel.
* Its value is an int from the \a grpc_compression_level enum. */
/** Set the default compression level for the channel.
* Valid values are defined by the enum type grpc_compression_level, defaults to
* GRPC_COMPRESS_LEVEL_NONE. */
#define GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL "grpc.default_compression_level"
/** Compression algorithms supported by the channel.
* Its value is a bitset (an int). Bits correspond to algorithms in \a

View File

@ -73,7 +73,7 @@ struct grpc_slice {
} data;
};
#define GRPC_SLICE_BUFFER_INLINE_ELEMENTS 6
#define GRPC_SLICE_BUFFER_INLINE_ELEMENTS 3
/** Represents an expandable array of slices, to be interpreted as a
single item. */

View File

@ -22,6 +22,7 @@
#include <map>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
@ -72,27 +73,8 @@ class Json {
json.value_ = NumberValue{std::move(str)};
return json;
}
static Json FromNumber(int32_t value) {
Json json;
json.value_ = NumberValue{absl::StrCat(value)};
return json;
}
static Json FromNumber(uint32_t value) {
Json json;
json.value_ = NumberValue{absl::StrCat(value)};
return json;
}
static Json FromNumber(int64_t value) {
Json json;
json.value_ = NumberValue{absl::StrCat(value)};
return json;
}
static Json FromNumber(uint64_t value) {
Json json;
json.value_ = NumberValue{absl::StrCat(value)};
return json;
}
static Json FromNumber(double value) {
template <typename T>
static std::enable_if_t<std::is_arithmetic_v<T>, Json> FromNumber(T value) {
Json json;
json.value_ = NumberValue{absl::StrCat(value)};
return json;

View File

@ -26,17 +26,20 @@
#define GRPC_DEPRECATED(reason)
#endif // __cplusplus >= 201402L
#ifndef GPR_DISABLE_ABSEIL_SYNC
/*
* Defines GPR_ABSEIL_SYNC to use synchronization features from Abseil
*
* You can opt for gRPC's native synchronization by enabling
* GPR_DISABLE_ABSEIL_SYNC. However, this flag is temporary and will be
* removed once the Abseil synchronization is stabilized.
* If you encounter any issues with this feature, please report them
* by filing a bug at https://github.com/grpc/grpc.
*/
#ifndef GPR_ABSEIL_SYNC
#if defined(__APPLE__)
// This is disabled on Apple platforms because macos/grpc_basictests_c_cpp
// fails with this. https://github.com/grpc/grpc/issues/23661
#else
#define GPR_ABSEIL_SYNC 1
#endif
#endif // GPR_ABSEIL_SYNC
#endif // GPR_DISABLE_ABSEIL_SYNC
/* Get windows.h included everywhere (we need it) */
#if defined(_WIN64) || defined(WIN64) || defined(_WIN32) || defined(WIN32)

View File

@ -116,8 +116,8 @@ class CallbackUnaryHandler : public grpc::internal::MethodHandler {
// A callback that only contains a call to MaybeDone can be run as an
// inline callback regardless of whether or not OnDone is inlineable
// because if the actual OnDone callback needs to be scheduled, MaybeDone
// is responsible for dispatching to an executor thread if needed. Thus,
// when setting up the finish_tag_, we can set its own callback to
// is responsible for dispatching to an EventEngine thread if needed.
// Thus, when setting up the finish_tag_, we can set its own callback to
// inlineable.
finish_tag_.Set(
call_.call(),
@ -152,9 +152,9 @@ class CallbackUnaryHandler : public grpc::internal::MethodHandler {
this->Ref();
// The callback for this function should not be marked inline because it
// is directly invoking a user-controlled reaction
// (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
// thread. However, any OnDone needed after that can be inlined because it
// is already running on an executor thread.
// (OnSendInitialMetadataDone). Thus it must be dispatched to an
// EventEngine thread. However, any OnDone needed after that can be
// inlined because it is already running on an EventEngine thread.
meta_tag_.Set(
call_.call(),
[this](bool ok) {
@ -340,7 +340,7 @@ class CallbackClientStreamingHandler : public grpc::internal::MethodHandler {
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
// the EventEngine thread to which this callback is dispatched.
meta_tag_.Set(
call_.call(),
[this](bool ok) {
@ -380,7 +380,7 @@ class CallbackClientStreamingHandler : public grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed);
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
// the EventEngine thread to which this callback is dispatched.
read_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
@ -544,7 +544,7 @@ class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
// the EventEngine thread to which this callback is dispatched.
meta_tag_.Set(
call_.call(),
[this](bool ok) {
@ -607,7 +607,7 @@ class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed);
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
// the EventEngine thread to which this callback is dispatched.
write_tag_.Set(
call_.call(),
[this, reactor](bool ok) {
@ -756,7 +756,7 @@ class CallbackBidiHandler : public grpc::internal::MethodHandler {
this->Ref();
// The callback for this function should not be inlined because it invokes
// a user-controlled reaction, but any resulting OnDone can be inlined in
// the executor to which this callback is dispatched.
// the EventEngine thread to which this callback is dispatched.
meta_tag_.Set(
call_.call(),
[this](bool ok) {
@ -821,7 +821,7 @@ class CallbackBidiHandler : public grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed);
// The callbacks for these functions should not be inlined because they
// invoke user-controlled reactions, but any resulting OnDones can be
// inlined in the executor to which a callback is dispatched.
// inlined in the EventEngine thread to which a callback is dispatched.
write_tag_.Set(
call_.call(),
[this, reactor](bool ok) {

View File

@ -501,9 +501,9 @@ class ServerContextBase {
void OnCancel() override {}
void OnDone() override {}
// Override InternalInlineable for this class since its reactions are
// trivial and thus do not need to be run from the executor (triggering a
// thread hop). This should only be used by internal reactors (thus the
// name) and not by user application code.
// trivial and thus do not need to be run from the EventEngine (potentially
// triggering a thread hop). This should only be used by internal reactors
// (thus the name) and not by user application code.
bool InternalInlineable() override { return true; }
};

View File

@ -636,8 +636,8 @@ class ClientCallbackReaderWriterImpl
// like StartCall or RemoveHold. If this is the last operation or hold on this
// object, it will invoke the OnDone reaction. If MaybeFinish was called from
// a reaction, it can call OnDone directly. If not, it would need to schedule
// OnDone onto an executor thread to avoid the possibility of deadlocking with
// any locks in the user code that invoked it.
// OnDone onto an EventEngine thread to avoid the possibility of deadlocking
// with any locks in the user code that invoked it.
void MaybeFinish(bool from_reaction) {
if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
1, std::memory_order_acq_rel) == 1)) {

View File

@ -56,7 +56,7 @@ class ServerReactor {
virtual void OnCancel() = 0;
// The following is not API. It is for internal use only and specifies whether
// all reactions of this Reactor can be run without an extra executor
// all reactions of this Reactor can be run without extra EventEngine
// scheduling. This should only be used for internally-defined reactors with
// trivial reactions.
virtual bool InternalInlineable() { return false; }
@ -90,7 +90,7 @@ class ServerCallbackCall {
// advance (used for the ServerContext CompletionOp), and one for where we
// know the inlineability of the OnDone reaction. You should set the inline
// flag to true if either the Reactor is InternalInlineable() or if this
// callback is already being forced to run dispatched to an executor
// callback is already being forced to run dispatched to an EventEngine thread
// (typically because it contains additional work than just the MaybeDone).
void MaybeDone() {
@ -141,12 +141,12 @@ class ServerCallbackCall {
// ever invoked on a fully-Unref'fed ServerCallbackCall.
virtual void CallOnDone() = 0;
// If the OnDone reaction is inlineable, execute it inline. Otherwise send it
// to an executor.
// If the OnDone reaction is inlineable, execute it inline. Otherwise run it
// async on EventEngine.
void ScheduleOnDone(bool inline_ondone);
// If the OnCancel reaction is inlineable, execute it inline. Otherwise send
// it to an executor.
// If the OnCancel reaction is inlineable, execute it inline. Otherwise run it
// async on EventEngine.
void CallOnCancel(ServerReactor* reactor);
// Implement the cancellation constraint counter. Return true if OnCancel

View File

@ -19,9 +19,9 @@
#define GRPCPP_VERSION_INFO_H
#define GRPC_CPP_VERSION_MAJOR 1
#define GRPC_CPP_VERSION_MINOR 72
#define GRPC_CPP_VERSION_MINOR 73
#define GRPC_CPP_VERSION_PATCH 1
#define GRPC_CPP_VERSION_TAG ""
#define GRPC_CPP_VERSION_STRING "1.72.1"
#define GRPC_CPP_VERSION_STRING "1.73.1"
#endif // GRPCPP_VERSION_INFO_H
#endif // GRPCPP_VERSION_INFO_H

View File

@ -28,10 +28,14 @@ targets = (
'grpc_authorization_provider',
'gpr',
'upb_base_lib',
'upb_hash_lib',
'upb_lex_lib',
'upb_mem_lib',
'upb_message_lib',
'upb_mini_descriptor_lib',
'upb_mini_table_lib',
'upb_json_lib',
'upb_reflection_lib',
'upb_textformat_lib',
'upb_wire_lib',
'utf8_range_lib',

View File

@ -1316,7 +1316,7 @@ template <typename T>
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline OperationExecutor<
T>::~OperationExecutor() {
if (promise_data_ != nullptr) {
ops_->early_destroy(promise_data_);
if (ops_ != end_ops_) ops_->early_destroy(promise_data_);
gpr_free_aligned(promise_data_);
}
}
@ -1349,7 +1349,10 @@ OperationExecutor<T>::InitStep(T input, void* call_data) {
ops_->promise_init(promise_data_, Offset(call_data, ops_->call_offset),
ops_->channel_data, std::move(input));
if (auto* r = p.value_if_ready()) {
if (r->ok == nullptr) return std::move(*r);
if (r->ok == nullptr) {
ops_ = end_ops_;
return std::move(*r);
}
input = std::move(r->ok);
++ops_;
continue;
@ -1443,20 +1446,20 @@ constexpr bool MethodHasChannelAccess<R (T::*)()> = false;
template <typename T, typename R, typename A, typename C>
constexpr bool MethodHasChannelAccess<R (T::*)(A, C)> = true;
template <auto... Ts>
constexpr bool AnyMethodHasChannelAccess =
(MethodHasChannelAccess<decltype(Ts)> || ...);
template <typename... Ts>
constexpr bool AnyMethodHasChannelAccess = (MethodHasChannelAccess<Ts> || ...);
// Composite for a given channel type to determine if any of its interceptors
// fall into this category: later code should use this.
template <typename Derived>
inline constexpr bool CallHasChannelAccess() {
return AnyMethodHasChannelAccess<&Derived::Call::OnClientInitialMetadata,
&Derived::Call::OnClientToServerMessage,
&Derived::Call::OnServerInitialMetadata,
&Derived::Call::OnServerToClientMessage,
&Derived::Call::OnServerTrailingMetadata,
&Derived::Call::OnFinalize>;
return AnyMethodHasChannelAccess<
decltype(&Derived::Call::OnClientInitialMetadata),
decltype(&Derived::Call::OnClientToServerMessage),
decltype(&Derived::Call::OnServerInitialMetadata),
decltype(&Derived::Call::OnServerToClientMessage),
decltype(&Derived::Call::OnServerTrailingMetadata),
decltype(&Derived::Call::OnFinalize)>;
}
} // namespace filters_detail
@ -1667,7 +1670,9 @@ class CallFilters {
}
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout),
std::move(filters_->*input_location), filters_->call_data_));
std::move(filters_->*input_location),
filters_detail::Offset(filters_->call_data_,
stack_current_->call_data_offset)));
} else {
return FinishStep(executor_.Step(filters_->call_data_));
}
@ -1684,9 +1689,10 @@ class CallFilters {
(filters_->call_state_.*on_done)();
return ValueOrFailure<Output>{std::move(r->ok)};
}
return FinishStep(
executor_.Start(&(stack_current_->stack->data_.*layout),
std::move(r->ok), filters_->call_data_));
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout), std::move(r->ok),
filters_detail::Offset(filters_->call_data_,
stack_current_->call_data_offset)));
}
(filters_->call_state_.*on_done)();
filters_->PushServerTrailingMetadata(std::move(r->error));
@ -1724,7 +1730,9 @@ class CallFilters {
}
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout),
std::move(filters_->*input_location), filters_->call_data_));
std::move(filters_->*input_location),
filters_detail::Offset(filters_->call_data_,
stack_current_->call_data_offset)));
} else {
return FinishStep(executor_.Step(filters_->call_data_));
}
@ -1739,9 +1747,10 @@ class CallFilters {
if (stack_current_ == stack_end_) {
return NextMsg{std::move(r->ok), &filters_->call_state_};
}
return FinishStep(
executor_.Start(&(stack_current_->stack->data_.*layout),
std::move(r->ok), filters_->call_data_));
return FinishStep(executor_.Start(
&(stack_current_->stack->data_.*layout), std::move(r->ok),
filters_detail::Offset(filters_->call_data_,
stack_current_->call_data_offset)));
}
(filters_->call_state_.*on_done)();
filters_->PushServerTrailingMetadata(std::move(r->error));

View File

@ -144,6 +144,8 @@ class CallSpine final : public Party {
template <typename StatusType>
void CancelIfFailed(const StatusType& r) {
if (!IsStatusOk(r)) {
GRPC_TRACE_LOG(call_state, INFO)
<< "[call_state] spine " << this << " fails: " << r;
Cancel();
}
}

View File

@ -62,6 +62,16 @@ class ServerMetadataOrHandle {
return value_;
}
ServerMetadataHandle TakeMetadata() && {
CHECK(!ok());
return std::move(server_metadata_);
}
ValueType TakeValue() && {
CHECK(ok());
return std::move(value_);
}
private:
ServerMetadataOrHandle(ServerMetadataHandle server_metadata, ValueType value)
: server_metadata_(std::move(server_metadata)),
@ -85,6 +95,13 @@ struct FailureStatusCastImpl<ServerMetadataOrHandle<T>, ServerMetadataHandle&> {
}
};
template <>
struct FailureStatusCastImpl<ServerMetadataHandle, ServerMetadataHandle&> {
static ServerMetadataHandle Cast(ServerMetadataHandle& t) {
return std::move(t);
}
};
template <typename T>
inline bool IsStatusOk(const ServerMetadataOrHandle<T>& x) {
return x.ok();

View File

@ -29,7 +29,9 @@
#include "src/core/channelz/channelz.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/string.h"
#include "src/core/util/sync.h"
#include "src/core/util/time.h"
namespace grpc_core {
@ -52,41 +54,8 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data)
ChannelTrace::TraceEvent::~TraceEvent() { CSliceUnref(data_); }
namespace {
const char* SeverityString(ChannelTrace::Severity severity) {
switch (severity) {
case ChannelTrace::Severity::Info:
return "CT_INFO";
case ChannelTrace::Severity::Warning:
return "CT_WARNING";
case ChannelTrace::Severity::Error:
return "CT_ERROR";
default:
GPR_UNREACHABLE_CODE(return "CT_UNKNOWN");
}
}
} // anonymous namespace
Json ChannelTrace::TraceEvent::RenderTraceEvent() const {
char* description = grpc_slice_to_c_string(data_);
Json::Object object = {
{"description", Json::FromString(description)},
{"severity", Json::FromString(SeverityString(severity_))},
{"timestamp", Json::FromString(gpr_format_timespec(timestamp_))},
};
gpr_free(description);
if (referenced_entity_ != nullptr) {
const bool is_channel =
(referenced_entity_->type() == BaseNode::EntityType::kTopLevelChannel ||
referenced_entity_->type() == BaseNode::EntityType::kInternalChannel);
object[is_channel ? "channelRef" : "subchannelRef"] = Json::FromObject({
{(is_channel ? "channelId" : "subchannelId"),
Json::FromString(absl::StrCat(referenced_entity_->uuid()))},
});
}
return Json::FromObject(std::move(object));
RefCountedPtr<BaseNode> ChannelTrace::TraceEvent::referenced_entity() const {
return referenced_entity_;
}
//
@ -115,9 +84,8 @@ void ChannelTrace::AddTraceEventHelper(TraceEvent* new_trace_event) {
// first event case
if (head_trace_ == nullptr) {
head_trace_ = tail_trace_ = new_trace_event;
}
// regular event add case
else {
} else {
// regular event add case
tail_trace_->set_next(new_trace_event);
tail_trace_ = tail_trace_->next();
}
@ -151,6 +119,25 @@ void ChannelTrace::AddTraceEventWithReference(
new TraceEvent(severity, data, std::move(referenced_entity)));
}
std::string ChannelTrace::TraceEvent::description() const {
char* description = grpc_slice_to_c_string(data_);
std::string s(description);
gpr_free(description);
return s;
}
void ChannelTrace::ForEachTraceEventLocked(
absl::FunctionRef<void(gpr_timespec, Severity, std::string,
RefCountedPtr<BaseNode>)>
callback) const {
TraceEvent* it = head_trace_;
while (it != nullptr) {
callback(it->timestamp(), it->severity(), it->description(),
it->referenced_entity());
it = it->next();
}
}
Json ChannelTrace::RenderJson() const {
// Tracing is disabled if max_event_memory_ == 0.
if (max_event_memory_ == 0) {
@ -166,15 +153,36 @@ Json ChannelTrace::RenderJson() const {
Json::FromString(absl::StrCat(num_events_logged_));
}
// Only add in the event list if it is non-empty.
if (head_trace_ != nullptr) {
Json::Array array;
for (TraceEvent* it = head_trace_; it != nullptr; it = it->next()) {
array.emplace_back(it->RenderTraceEvent());
Json::Array array;
ForEachTraceEventLocked([&array](gpr_timespec timestamp, Severity severity,
std::string description,
RefCountedPtr<BaseNode> referenced_entity) {
Json::Object object = {
{"description", Json::FromString(description)},
{"severity", Json::FromString(SeverityString(severity))},
{"timestamp", Json::FromString(gpr_format_timespec(timestamp))},
};
if (referenced_entity != nullptr) {
const bool is_channel =
(referenced_entity->type() ==
BaseNode::EntityType::kTopLevelChannel ||
referenced_entity->type() == BaseNode::EntityType::kInternalChannel);
object[is_channel ? "channelRef" : "subchannelRef"] = Json::FromObject({
{(is_channel ? "channelId" : "subchannelId"),
Json::FromString(absl::StrCat(referenced_entity->uuid()))},
});
}
array.emplace_back(Json::FromObject(std::move(object)));
});
if (!array.empty()) {
object["events"] = Json::FromArray(std::move(array));
}
return Json::FromObject(std::move(object));
}
std::string ChannelTrace::creation_timestamp() const {
return gpr_format_timespec(time_created_);
}
} // namespace channelz
} // namespace grpc_core

View File

@ -24,6 +24,7 @@
#include <grpc/support/time.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "absl/base/thread_annotations.h"
#include "src/core/util/json/json.h"
@ -54,6 +55,19 @@ class ChannelTrace {
Error
};
static const char* SeverityString(ChannelTrace::Severity severity) {
switch (severity) {
case ChannelTrace::Severity::Info:
return "CT_INFO";
case ChannelTrace::Severity::Warning:
return "CT_WARNING";
case ChannelTrace::Severity::Error:
return "CT_ERROR";
default:
GPR_UNREACHABLE_CODE(return "CT_UNKNOWN");
}
}
// Adds a new trace event to the tracing object
//
// NOTE: each ChannelTrace tracks the memory used by its list of trace
@ -82,6 +96,19 @@ class ChannelTrace {
// object may incorporate the json before rendering.
Json RenderJson() const;
void ForEachTraceEvent(
absl::FunctionRef<void(gpr_timespec, Severity, std::string,
RefCountedPtr<BaseNode>)>
callback) const {
MutexLock lock(&mu_);
ForEachTraceEventLocked(callback);
}
std::string creation_timestamp() const;
uint64_t num_events_logged() const {
MutexLock lock(&mu_);
return num_events_logged_;
}
private:
friend size_t testing::GetSizeofTraceEvent(void);
@ -99,15 +126,15 @@ class ChannelTrace {
~TraceEvent();
// Renders the data inside of this TraceEvent into a json object. This is
// used by the ChannelTrace, when it is rendering itself.
Json RenderTraceEvent() const;
// set and get for the next_ pointer.
TraceEvent* next() const { return next_; }
void set_next(TraceEvent* next) { next_ = next; }
size_t memory_usage() const { return memory_usage_; }
gpr_timespec timestamp() const { return timestamp_; }
Severity severity() const { return severity_; }
std::string description() const;
RefCountedPtr<BaseNode> referenced_entity() const;
private:
const gpr_timespec timestamp_;
@ -121,6 +148,10 @@ class ChannelTrace {
// Internal helper to add and link in a trace event
void AddTraceEventHelper(TraceEvent* new_trace_event);
void ForEachTraceEventLocked(
absl::FunctionRef<void(gpr_timespec, Severity, std::string,
RefCountedPtr<BaseNode>)>) const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const size_t max_event_memory_;
const gpr_timespec time_created_;

View File

@ -25,6 +25,10 @@
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <string>
#include <tuple>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
@ -39,12 +43,123 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/util/json/json_writer.h"
#include "src/core/util/string.h"
#include "src/core/util/time.h"
#include "src/core/util/uri.h"
#include "src/core/util/useful.h"
namespace grpc_core {
namespace channelz {
//
// DataSink
//
namespace {
class ChildObjectCollector {
public:
void Add(RefCountedPtr<BaseNode> node) {
child_objects_[node->type()].insert(node->uuid());
}
void Add(std::vector<RefCountedPtr<BaseNode>> nodes) {
for (auto& node : nodes) Add(std::move(node));
}
// Calls AddAdditionalInfo to export the collected child objects.
void Finalize(DataSink& sink) {
if (child_objects_.empty()) return;
Json::Object subobjects;
for (const auto& [type, child_objects] : child_objects_) {
std::string key;
switch (type) {
case BaseNode::EntityType::kTopLevelChannel:
case BaseNode::EntityType::kSubchannel:
case BaseNode::EntityType::kListenSocket:
case BaseNode::EntityType::kServer:
case BaseNode::EntityType::kInternalChannel: {
LOG(ERROR)
<< "Nodes of type " << BaseNode::EntityTypeString(type)
<< " not supported for child object collection in DataSink";
continue;
}
case BaseNode::EntityType::kSocket:
key = "subSockets";
break;
case BaseNode::EntityType::kCall:
key = "calls";
break;
}
Json::Array uuids;
uuids.reserve(child_objects.size());
for (int64_t uuid : child_objects) {
uuids.push_back(Json::FromNumber(uuid));
}
subobjects[key] = Json::FromArray(std::move(uuids));
}
sink.AddAdditionalInfo("childObjects", std::move(subobjects));
}
private:
std::map<BaseNode::EntityType, std::set<int64_t>> child_objects_;
};
class JsonDataSink final : public DataSink {
public:
explicit JsonDataSink(Json::Object& output) : output_(output) {
CHECK(output_.find("additionalInfo") == output_.end());
}
~JsonDataSink() {
collector_.Finalize(*this);
if (additional_info_ != nullptr) {
output_["additionalInfo"] =
Json::FromObject(std::move(*additional_info_));
}
}
void AddAdditionalInfo(absl::string_view name,
Json::Object additional_info) override {
if (additional_info_ == nullptr) {
additional_info_ = std::make_unique<Json::Object>();
}
additional_info_->emplace(name,
Json::FromObject(std::move(additional_info)));
}
void AddChildObjects(
std::vector<RefCountedPtr<BaseNode>> child_objects) override {
collector_.Add(std::move(child_objects));
}
private:
Json::Object& output_;
std::unique_ptr<Json::Object> additional_info_;
ChildObjectCollector collector_;
};
class ExplicitJsonDataSink final : public DataSink {
public:
void AddAdditionalInfo(absl::string_view name,
Json::Object additional_info) override {
additional_info_.emplace(name,
Json::FromObject(std::move(additional_info)));
}
void AddChildObjects(
std::vector<RefCountedPtr<BaseNode>> child_objects) override {
collector_.Add(std::move(child_objects));
}
Json::Object Finalize() {
collector_.Finalize(*this);
return std::move(additional_info_);
}
private:
Json::Object additional_info_;
ChildObjectCollector collector_;
};
} // namespace
//
// BaseNode
//
@ -55,13 +170,95 @@ BaseNode::BaseNode(EntityType type, std::string name)
ChannelzRegistry::Register(this);
}
BaseNode::~BaseNode() { ChannelzRegistry::Unregister(uuid_); }
void BaseNode::Orphaned() { ChannelzRegistry::Unregister(this); }
intptr_t BaseNode::UuidSlow() { return ChannelzRegistry::NumberNode(this); }
std::string BaseNode::RenderJsonString() {
Json json = RenderJson();
return JsonDump(json);
}
void BaseNode::PopulateJsonFromDataSources(Json::Object& json) {
JsonDataSink sink(json);
MutexLock lock(&data_sources_mu_);
for (DataSource* data_source : data_sources_) {
data_source->AddData(sink);
}
}
Json::Object BaseNode::AdditionalInfo() {
ExplicitJsonDataSink sink;
MutexLock lock(&data_sources_mu_);
for (DataSource* data_source : data_sources_) {
data_source->AddData(sink);
}
return sink.Finalize();
}
void BaseNode::RunZTrace(
absl::string_view name, Timestamp deadline,
std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine,
absl::AnyInvocable<void(Json)> callback) {
// Limit deadline to help contain potential resource exhaustion due to
// tracing.
deadline = std::min(deadline, Timestamp::Now() + Duration::Minutes(10));
auto fail = [&callback, event_engine](absl::Status status) {
event_engine->Run(
[callback = std::move(callback), status = std::move(status)]() mutable {
Json::Object object;
object["status"] = Json::FromString(status.ToString());
callback(Json::FromObject(std::move(object)));
});
};
std::unique_ptr<ZTrace> ztrace;
{
MutexLock lock(&data_sources_mu_);
for (auto* data_source : data_sources_) {
if (auto found_ztrace = data_source->GetZTrace(name);
found_ztrace != nullptr) {
if (ztrace == nullptr) {
ztrace = std::move(found_ztrace);
} else {
fail(absl::InternalError(
absl::StrCat("Ambiguous ztrace handler: ", name)));
return;
}
}
}
}
if (ztrace == nullptr) {
fail(absl::NotFoundError(absl::StrCat("ztrace not found: ", name)));
return;
}
ztrace->Run(deadline, std::move(args), event_engine, std::move(callback));
}
//
// DataSource
//
DataSource::DataSource(RefCountedPtr<BaseNode> node) : node_(std::move(node)) {
if (node_ == nullptr) return;
MutexLock lock(&node_->data_sources_mu_);
node_->data_sources_.push_back(this);
}
DataSource::~DataSource() {
DCHECK(node_ == nullptr) << "DataSource must be ResetDataSource()'d in the "
"most derived class before destruction";
}
void DataSource::ResetDataSource() {
RefCountedPtr<BaseNode> node = std::move(node_);
if (node == nullptr) return;
MutexLock lock(&node->data_sources_mu_);
node->data_sources_.erase(
std::remove(node->data_sources_.begin(), node->data_sources_.end(), this),
node->data_sources_.end());
}
//
// CallCountingHelper
//
@ -80,24 +277,21 @@ void CallCountingHelper::RecordCallSucceeded() {
calls_succeeded_.fetch_add(1, std::memory_order_relaxed);
}
void CallCountingHelper::PopulateCallCounts(Json::Object* json) {
auto calls_started = calls_started_.load(std::memory_order_relaxed);
auto calls_succeeded = calls_succeeded_.load(std::memory_order_relaxed);
auto calls_failed = calls_failed_.load(std::memory_order_relaxed);
auto last_call_started_cycle =
last_call_started_cycle_.load(std::memory_order_relaxed);
//
// CallCounts
//
void CallCounts::PopulateJson(Json::Object& json) const {
if (calls_started != 0) {
(*json)["callsStarted"] = Json::FromString(absl::StrCat(calls_started));
gpr_timespec ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(last_call_started_cycle), GPR_CLOCK_REALTIME);
(*json)["lastCallStartedTimestamp"] =
Json::FromString(gpr_format_timespec(ts));
json["callsStarted"] = Json::FromString(absl::StrCat(calls_started));
json["lastCallStartedTimestamp"] =
Json::FromString(last_call_started_timestamp());
}
if (calls_succeeded != 0) {
(*json)["callsSucceeded"] = Json::FromString(absl::StrCat(calls_succeeded));
json["callsSucceeded"] = Json::FromString(absl::StrCat(calls_succeeded));
}
if (calls_failed != 0) {
(*json)["callsFailed"] = Json::FromString(absl::StrCat(calls_failed));
json["callsFailed"] = Json::FromString(absl::StrCat(calls_failed));
}
}
@ -121,33 +315,20 @@ void PerCpuCallCountingHelper::RecordCallSucceeded() {
std::memory_order_relaxed);
}
void PerCpuCallCountingHelper::PopulateCallCounts(Json::Object* json) {
int64_t calls_started = 0;
int64_t calls_succeeded = 0;
int64_t calls_failed = 0;
gpr_cycle_counter last_call_started_cycle = 0;
CallCounts PerCpuCallCountingHelper::GetCallCounts() const {
CallCounts call_counts;
for (const auto& cpu : per_cpu_data_) {
calls_started += cpu.calls_started.load(std::memory_order_relaxed);
calls_succeeded += cpu.calls_succeeded.load(std::memory_order_relaxed);
calls_failed += cpu.calls_failed.load(std::memory_order_relaxed);
last_call_started_cycle =
std::max(last_call_started_cycle,
call_counts.calls_started +=
cpu.calls_started.load(std::memory_order_relaxed);
call_counts.calls_succeeded +=
cpu.calls_succeeded.load(std::memory_order_relaxed);
call_counts.calls_failed +=
cpu.calls_failed.load(std::memory_order_relaxed);
call_counts.last_call_started_cycle =
std::max(call_counts.last_call_started_cycle,
cpu.last_call_started_cycle.load(std::memory_order_relaxed));
}
if (calls_started != 0) {
(*json)["callsStarted"] = Json::FromString(absl::StrCat(calls_started));
gpr_timespec ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(last_call_started_cycle), GPR_CLOCK_REALTIME);
(*json)["lastCallStartedTimestamp"] =
Json::FromString(gpr_format_timespec(ts));
}
if (calls_succeeded != 0) {
(*json)["callsSucceeded"] = Json::FromString(absl::StrCat(calls_succeeded));
}
if (calls_failed != 0) {
(*json)["callsFailed"] = Json::FromString(absl::StrCat(calls_failed));
}
return call_counts;
}
//
@ -179,18 +360,48 @@ const char* ChannelNode::GetChannelConnectivityStateChangeString(
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
Json ChannelNode::RenderJson() {
Json::Object data = {
{"target", Json::FromString(target_)},
};
namespace {
std::set<intptr_t> ChildIdSet(const BaseNode* parent,
BaseNode::EntityType type) {
std::set<intptr_t> ids;
auto [children, _] = ChannelzRegistry::GetChildrenOfType(
0, parent, type, std::numeric_limits<size_t>::max());
for (const auto& node : children) {
ids.insert(node->uuid());
}
return ids;
}
} // namespace
std::set<intptr_t> ChannelNode::child_channels() const {
return ChildIdSet(this, BaseNode::EntityType::kInternalChannel);
}
std::set<intptr_t> ChannelNode::child_subchannels() const {
return ChildIdSet(this, BaseNode::EntityType::kSubchannel);
}
std::optional<std::string> ChannelNode::connectivity_state() {
// Connectivity state.
// If low-order bit is on, then the field is set.
int state_field = connectivity_state_.load(std::memory_order_relaxed);
if ((state_field & 1) != 0) {
grpc_connectivity_state state =
static_cast<grpc_connectivity_state>(state_field >> 1);
return ConnectivityStateName(state);
}
return std::nullopt;
}
Json ChannelNode::RenderJson() {
Json::Object data = {
{"target", Json::FromString(target_)},
};
if (auto cs = connectivity_state(); cs.has_value()) {
data["state"] = Json::FromObject({
{"state", Json::FromString(ConnectivityStateName(state))},
{"state", Json::FromString(cs.value())},
});
}
// Fill in the channel trace if applicable.
@ -199,7 +410,7 @@ Json ChannelNode::RenderJson() {
data["trace"] = std::move(trace_json);
}
// Ask CallCountingHelper to populate call count data.
call_counter_.PopulateCallCounts(&data);
call_counter_.GetCallCounts().PopulateJson(data);
// Construct outer object.
Json::Object json = {
{"ref", Json::FromObject({
@ -210,23 +421,25 @@ Json ChannelNode::RenderJson() {
// Template method. Child classes may override this to add their specific
// functionality.
PopulateChildRefs(&json);
PopulateJsonFromDataSources(json);
return Json::FromObject(std::move(json));
}
void ChannelNode::PopulateChildRefs(Json::Object* json) {
MutexLock lock(&child_mu_);
if (!child_subchannels_.empty()) {
auto child_subchannels = this->child_subchannels();
auto child_channels = this->child_channels();
if (!child_subchannels.empty()) {
Json::Array array;
for (intptr_t subchannel_uuid : child_subchannels_) {
for (intptr_t subchannel_uuid : child_subchannels) {
array.emplace_back(Json::FromObject({
{"subchannelId", Json::FromString(absl::StrCat(subchannel_uuid))},
}));
}
(*json)["subchannelRef"] = Json::FromArray(std::move(array));
}
if (!child_channels_.empty()) {
if (!child_channels.empty()) {
Json::Array array;
for (intptr_t channel_uuid : child_channels_) {
for (intptr_t channel_uuid : child_channels) {
array.emplace_back(Json::FromObject({
{"channelId", Json::FromString(absl::StrCat(channel_uuid))},
}));
@ -241,26 +454,6 @@ void ChannelNode::SetConnectivityState(grpc_connectivity_state state) {
connectivity_state_.store(state_field, std::memory_order_relaxed);
}
void ChannelNode::AddChildChannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_channels_.insert(child_uuid);
}
void ChannelNode::RemoveChildChannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_channels_.erase(child_uuid);
}
void ChannelNode::AddChildSubchannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_subchannels_.insert(child_uuid);
}
void ChannelNode::RemoveChildSubchannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_subchannels_.erase(child_uuid);
}
//
// SubchannelNode
//
@ -282,13 +475,17 @@ void SubchannelNode::SetChildSocket(RefCountedPtr<SocketNode> socket) {
child_socket_ = std::move(socket);
}
Json SubchannelNode::RenderJson() {
// Create and fill the data child.
std::string SubchannelNode::connectivity_state() const {
grpc_connectivity_state state =
connectivity_state_.load(std::memory_order_relaxed);
return ConnectivityStateName(state);
}
Json SubchannelNode::RenderJson() {
// Create and fill the data child.
Json::Object data = {
{"state", Json::FromObject({
{"state", Json::FromString(ConnectivityStateName(state))},
{"state", Json::FromString(connectivity_state())},
})},
{"target", Json::FromString(target_)},
};
@ -298,7 +495,7 @@ Json SubchannelNode::RenderJson() {
data["trace"] = std::move(trace_json);
}
// Ask CallCountingHelper to populate call count data.
call_counter_.PopulateCallCounts(&data);
call_counter_.GetCallCounts().PopulateJson(data);
// Construct top-level object.
Json::Object object{
{"ref", Json::FromObject({
@ -320,7 +517,8 @@ Json SubchannelNode::RenderJson() {
}),
});
}
return Json::FromObject(object);
PopulateJsonFromDataSources(object);
return Json::FromObject(std::move(object));
}
//
@ -332,51 +530,25 @@ ServerNode::ServerNode(size_t channel_tracer_max_nodes)
ServerNode::~ServerNode() {}
void ServerNode::AddChildSocket(RefCountedPtr<SocketNode> node) {
MutexLock lock(&child_mu_);
child_sockets_.insert(std::pair(node->uuid(), std::move(node)));
}
void ServerNode::RemoveChildSocket(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_sockets_.erase(child_uuid);
}
void ServerNode::AddChildListenSocket(RefCountedPtr<ListenSocketNode> node) {
MutexLock lock(&child_mu_);
child_listen_sockets_.insert(std::pair(node->uuid(), std::move(node)));
}
void ServerNode::RemoveChildListenSocket(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_listen_sockets_.erase(child_uuid);
}
std::string ServerNode::RenderServerSockets(intptr_t start_socket_id,
intptr_t max_results) {
CHECK_GE(start_socket_id, 0);
CHECK_GE(max_results, 0);
// If user does not set max_results, we choose 500.
size_t pagination_limit = max_results == 0 ? 500 : max_results;
if (max_results == 0) max_results = 500;
Json::Object object;
{
MutexLock lock(&child_mu_);
size_t sockets_rendered = 0;
// Create list of socket refs.
Json::Array array;
auto it = child_sockets_.lower_bound(start_socket_id);
for (; it != child_sockets_.end() && sockets_rendered < pagination_limit;
++it, ++sockets_rendered) {
array.emplace_back(Json::FromObject({
{"socketId", Json::FromString(absl::StrCat(it->first))},
{"name", Json::FromString(it->second->name())},
}));
}
object["socketRef"] = Json::FromArray(std::move(array));
if (it == child_sockets_.end()) {
object["end"] = Json::FromBool(true);
}
auto [children, end] = ChannelzRegistry::GetChildrenOfType(
start_socket_id, this, BaseNode::EntityType::kSocket, max_results);
// Create list of socket refs.
Json::Array array;
for (const auto& child : children) {
array.emplace_back(Json::FromObject({
{"socketId", Json::FromString(absl::StrCat(child->uuid()))},
{"name", Json::FromString(child->name())},
}));
}
object["socketRef"] = Json::FromArray(std::move(array));
if (end) object["end"] = Json::FromBool(true);
return JsonDump(Json::FromObject(std::move(object)));
}
@ -388,7 +560,7 @@ Json ServerNode::RenderJson() {
data["trace"] = std::move(trace_json);
}
// Ask CallCountingHelper to populate call count data.
call_counter_.PopulateCallCounts(&data);
call_counter_.GetCallCounts().PopulateJson(data);
// Construct top-level object.
Json::Object object = {
{"ref", Json::FromObject({
@ -397,22 +569,47 @@ Json ServerNode::RenderJson() {
{"data", Json::FromObject(std::move(data))},
};
// Render listen sockets.
{
MutexLock lock(&child_mu_);
if (!child_listen_sockets_.empty()) {
Json::Array array;
for (const auto& it : child_listen_sockets_) {
array.emplace_back(Json::FromObject({
{"socketId", Json::FromString(absl::StrCat(it.first))},
{"name", Json::FromString(it.second->name())},
}));
}
object["listenSocket"] = Json::FromArray(std::move(array));
auto [children, _] = ChannelzRegistry::GetChildrenOfType(
0, this, BaseNode::EntityType::kListenSocket,
std::numeric_limits<size_t>::max());
if (!children.empty()) {
Json::Array array;
for (const auto& child : children) {
array.emplace_back(Json::FromObject({
{"socketId", Json::FromString(absl::StrCat(child->uuid()))},
{"name", Json::FromString(child->name())},
}));
}
object["listenSocket"] = Json::FromArray(std::move(array));
}
PopulateJsonFromDataSources(object);
return Json::FromObject(std::move(object));
}
std::map<intptr_t, RefCountedPtr<ListenSocketNode>>
ServerNode::child_listen_sockets() const {
std::map<intptr_t, RefCountedPtr<ListenSocketNode>> result;
auto [children, _] = ChannelzRegistry::GetChildrenOfType(
0, this, BaseNode::EntityType::kListenSocket,
std::numeric_limits<size_t>::max());
for (const auto& child : children) {
result[child->uuid()] = child->RefAsSubclass<ListenSocketNode>();
}
return result;
}
std::map<intptr_t, RefCountedPtr<SocketNode>> ServerNode::child_sockets()
const {
std::map<intptr_t, RefCountedPtr<SocketNode>> result;
auto [children, _] = ChannelzRegistry::GetChildrenOfType(
0, this, BaseNode::EntityType::kSocket,
std::numeric_limits<size_t>::max());
for (const auto& child : children) {
result[child->uuid()] = child->RefAsSubclass<SocketNode>();
}
return result;
}
//
// SocketNode::Security::Tls
//
@ -644,6 +841,7 @@ Json SocketNode::RenderJson() {
}
PopulateSocketAddressJson(&object, "remote", remote_.c_str());
PopulateSocketAddressJson(&object, "local", local_.c_str());
PopulateJsonFromDataSources(object);
return Json::FromObject(std::move(object));
}
@ -652,7 +850,7 @@ Json SocketNode::RenderJson() {
//
ListenSocketNode::ListenSocketNode(std::string local_addr, std::string name)
: BaseNode(EntityType::kSocket, std::move(name)),
: BaseNode(EntityType::kListenSocket, std::move(name)),
local_addr_(std::move(local_addr)) {}
Json ListenSocketNode::RenderJson() {
@ -663,6 +861,21 @@ Json ListenSocketNode::RenderJson() {
})},
};
PopulateSocketAddressJson(&object, "local", local_addr_.c_str());
PopulateJsonFromDataSources(object);
return Json::FromObject(std::move(object));
}
//
// CallNode
//
Json CallNode::RenderJson() {
Json::Object object = {
{"ref", Json::FromObject({
{"callId", Json::FromString(absl::StrCat(uuid()))},
})},
};
PopulateJsonFromDataSources(object);
return Json::FromObject(std::move(object));
}

View File

@ -27,6 +27,7 @@
#include <atomic>
#include <cstdint>
#include <initializer_list>
#include <map>
#include <optional>
#include <set>
@ -34,18 +35,29 @@
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "src/core/channelz/channel_trace.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/util/dual_ref_counted.h"
#include "src/core/util/json/json.h"
#include "src/core/util/per_cpu.h"
#include "src/core/util/ref_counted.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/string.h"
#include "src/core/util/sync.h"
#include "src/core/util/time.h"
#include "src/core/util/time_precise.h"
#include "src/core/util/useful.h"
// Channel arg key for channelz node.
#define GRPC_ARG_CHANNELZ_CHANNEL_NODE "grpc.internal.channelz_channel_node"
#define GRPC_ARG_CHANNELZ_CHANNEL_NODE \
"grpc.internal.no_subchannel.channelz_channel_node"
// Channel arg key for the containing base node
#define GRPC_ARG_CHANNELZ_CONTAINING_BASE_NODE \
"grpc.internal.no_subchannel.channelz_containing_base_node"
// Channel arg key for indicating an internal channel.
#define GRPC_ARG_CHANNELZ_IS_INTERNAL_CHANNEL \
@ -67,6 +79,8 @@ namespace channelz {
class SocketNode;
class ListenSocketNode;
class DataSource;
class ZTrace;
namespace testing {
class CallCountingHelperPeer;
@ -74,7 +88,7 @@ class SubchannelNodePeer;
} // namespace testing
// base class for all channelz entities
class BaseNode : public RefCounted<BaseNode> {
class BaseNode : public DualRefCounted<BaseNode> {
public:
// There are only four high level channelz entities. However, to support
// GetTopChannelsRequest, we split the Channel entity into two different
@ -84,14 +98,58 @@ class BaseNode : public RefCounted<BaseNode> {
kInternalChannel,
kSubchannel,
kServer,
kListenSocket,
kSocket,
kCall,
};
static absl::string_view EntityTypeString(EntityType type) {
switch (type) {
case EntityType::kTopLevelChannel:
return "top_level_channel";
case EntityType::kInternalChannel:
return "internal_channel";
case EntityType::kSubchannel:
return "subchannel";
case EntityType::kServer:
return "server";
case EntityType::kListenSocket:
return "listen_socket";
case EntityType::kSocket:
return "socket";
case EntityType::kCall:
return "call";
}
return "unknown";
}
protected:
BaseNode(EntityType type, std::string name);
public:
~BaseNode() override;
void Orphaned() override;
bool HasParent(const BaseNode* parent) const {
MutexLock lock(&parent_mu_);
return parents_.find(parent) != parents_.end();
}
void AddParent(BaseNode* parent) {
MutexLock lock(&parent_mu_);
parents_.insert(parent->WeakRef());
}
void RemoveParent(BaseNode* parent) {
MutexLock lock(&parent_mu_);
parents_.erase(parent);
}
static absl::string_view ChannelArgName() {
return GRPC_ARG_CHANNELZ_CONTAINING_BASE_NODE;
}
static int ChannelArgsCompare(const BaseNode* a, const BaseNode* b) {
return QsortCompare(a, b);
}
// All children must implement this function.
virtual Json RenderJson() = 0;
@ -101,15 +159,107 @@ class BaseNode : public RefCounted<BaseNode> {
std::string RenderJsonString();
EntityType type() const { return type_; }
intptr_t uuid() const { return uuid_; }
intptr_t uuid() {
const intptr_t id = uuid_.load(std::memory_order_relaxed);
if (id > 0) return id;
return UuidSlow();
}
const std::string& name() const { return name_; }
void RunZTrace(absl::string_view name, Timestamp deadline,
std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine,
absl::AnyInvocable<void(Json output)> callback);
Json::Object AdditionalInfo();
protected:
void PopulateJsonFromDataSources(Json::Object& json);
private:
// to allow the ChannelzRegistry to set uuid_ under its lock.
friend class ChannelzRegistry;
// allow data source to register/unregister itself
friend class DataSource;
using ParentSet = absl::flat_hash_set<WeakRefCountedPtr<BaseNode>,
WeakRefCountedPtrHash<BaseNode>,
WeakRefCountedPtrEq<BaseNode>>;
intptr_t UuidSlow();
const EntityType type_;
intptr_t uuid_;
uint64_t orphaned_index_ = 0; // updated by registry
std::atomic<intptr_t> uuid_;
std::string name_;
Mutex data_sources_mu_;
absl::InlinedVector<DataSource*, 3> data_sources_
ABSL_GUARDED_BY(data_sources_mu_);
BaseNode* prev_; // updated by registry
BaseNode* next_; // updated by registry
mutable Mutex parent_mu_;
ParentSet parents_ ABSL_GUARDED_BY(parent_mu_);
};
class ZTrace {
public:
virtual ~ZTrace() = default;
virtual void Run(Timestamp deadline, std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine,
absl::AnyInvocable<void(Json)>) = 0;
};
class DataSink {
public:
virtual void AddAdditionalInfo(absl::string_view name,
Json::Object additional_info) = 0;
virtual void AddChildObjects(
std::vector<RefCountedPtr<BaseNode>> children) = 0;
protected:
~DataSink() = default;
};
class DataSource {
public:
explicit DataSource(RefCountedPtr<BaseNode> node);
// Add any relevant json fragments to the output.
// This method must not cause the DataSource to be deleted, or else there will
// be a deadlock.
virtual void AddData(DataSink&) {}
// If this data source exports some ztrace, return it here.
virtual std::unique_ptr<ZTrace> GetZTrace(absl::string_view /*name*/) {
return nullptr;
}
protected:
~DataSource();
RefCountedPtr<BaseNode> channelz_node() { return node_; }
// This method must be called in the most derived class's destructor.
// It removes this data source from the node's list of data sources.
// If it is not called, then the AddData() function pointer may be invalid
// when the node is queried.
void ResetDataSource();
private:
RefCountedPtr<BaseNode> node_;
};
struct CallCounts {
int64_t calls_started = 0;
int64_t calls_succeeded = 0;
int64_t calls_failed = 0;
gpr_cycle_counter last_call_started_cycle = 0;
std::string last_call_started_timestamp() const {
return gpr_format_timespec(
gpr_cycle_counter_to_time(last_call_started_cycle));
}
void PopulateJson(Json::Object& json) const;
};
// This class is a helper class for channelz entities that deal with Channels,
@ -124,8 +274,14 @@ class CallCountingHelper final {
void RecordCallFailed();
void RecordCallSucceeded();
// Common rendering of the call count data and last_call_started_timestamp.
void PopulateCallCounts(Json::Object* json);
CallCounts GetCallCounts() const {
return {
calls_started_.load(std::memory_order_relaxed),
calls_succeeded_.load(std::memory_order_relaxed),
calls_failed_.load(std::memory_order_relaxed),
last_call_started_cycle_.load(std::memory_order_relaxed),
};
}
private:
// testing peer friend.
@ -143,8 +299,7 @@ class PerCpuCallCountingHelper final {
void RecordCallFailed();
void RecordCallSucceeded();
// Common rendering of the call count data and last_call_started_timestamp.
void PopulateCallCounts(Json::Object* json);
CallCounts GetCallCounts() const;
private:
// testing peer friend.
@ -167,6 +322,11 @@ class ChannelNode final : public BaseNode {
ChannelNode(std::string target, size_t channel_tracer_max_nodes,
bool is_internal_channel);
void Orphaned() override {
channel_args_ = ChannelArgs();
BaseNode::Orphaned();
}
static absl::string_view ChannelArgName() {
return GRPC_ARG_CHANNELZ_CHANNEL_NODE;
}
@ -190,21 +350,22 @@ class ChannelNode final : public BaseNode {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));
}
void SetChannelArgs(const ChannelArgs& channel_args) {
channel_args_ = channel_args;
}
void RecordCallStarted() { call_counter_.RecordCallStarted(); }
void RecordCallFailed() { call_counter_.RecordCallFailed(); }
void RecordCallSucceeded() { call_counter_.RecordCallSucceeded(); }
void SetConnectivityState(grpc_connectivity_state state);
// TODO(roth): take in a RefCountedPtr to the child channel so we can retrieve
// the human-readable name.
void AddChildChannel(intptr_t child_uuid);
void RemoveChildChannel(intptr_t child_uuid);
// TODO(roth): take in a RefCountedPtr to the child subchannel so we can
// retrieve the human-readable name.
void AddChildSubchannel(intptr_t child_uuid);
void RemoveChildSubchannel(intptr_t child_uuid);
const std::string& target() const { return target_; }
std::optional<std::string> connectivity_state();
CallCounts GetCallCounts() const { return call_counter_.GetCallCounts(); }
std::set<intptr_t> child_channels() const;
std::set<intptr_t> child_subchannels() const;
const ChannelTrace& trace() const { return trace_; }
const ChannelArgs& channel_args() const { return channel_args_; }
private:
void PopulateChildRefs(Json::Object* json);
@ -212,14 +373,13 @@ class ChannelNode final : public BaseNode {
std::string target_;
CallCountingHelper call_counter_;
ChannelTrace trace_;
// TODO(ctiller): keeping channel args here can create odd circular references
// that are hard to reason about. Consider moving this to a DataSource.
ChannelArgs channel_args_;
// Least significant bit indicates whether the value is set. Remaining
// bits are a grpc_connectivity_state value.
std::atomic<int> connectivity_state_{0};
Mutex child_mu_; // Guards sets below.
std::set<intptr_t> child_channels_;
std::set<intptr_t> child_subchannels_;
};
// Handles channelz bookkeeping for subchannels
@ -228,6 +388,11 @@ class SubchannelNode final : public BaseNode {
SubchannelNode(std::string target_address, size_t channel_tracer_max_nodes);
~SubchannelNode() override;
void Orphaned() override {
channel_args_ = ChannelArgs();
BaseNode::Orphaned();
}
// Sets the subchannel's connectivity state without health checking.
void UpdateConnectivityState(grpc_connectivity_state state);
@ -242,6 +407,9 @@ class SubchannelNode final : public BaseNode {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void SetChannelArgs(const ChannelArgs& channel_args) {
channel_args_ = channel_args;
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
@ -252,16 +420,29 @@ class SubchannelNode final : public BaseNode {
void RecordCallFailed() { call_counter_.RecordCallFailed(); }
void RecordCallSucceeded() { call_counter_.RecordCallSucceeded(); }
const std::string& target() const { return target_; }
std::string connectivity_state() const;
CallCounts GetCallCounts() const { return call_counter_.GetCallCounts(); }
RefCountedPtr<SocketNode> child_socket() const {
MutexLock lock(&socket_mu_);
return child_socket_;
}
const ChannelTrace& trace() const { return trace_; }
const ChannelArgs& channel_args() const { return channel_args_; }
private:
// Allows the channel trace test to access trace_.
friend class testing::SubchannelNodePeer;
std::atomic<grpc_connectivity_state> connectivity_state_{GRPC_CHANNEL_IDLE};
Mutex socket_mu_;
mutable Mutex socket_mu_;
RefCountedPtr<SocketNode> child_socket_ ABSL_GUARDED_BY(socket_mu_);
std::string target_;
CallCountingHelper call_counter_;
ChannelTrace trace_;
// TODO(ctiller): keeping channel args here can create odd circular references
// that are hard to reason about. Consider moving this to a DataSource.
ChannelArgs channel_args_;
};
// Handles channelz bookkeeping for servers
@ -271,19 +452,16 @@ class ServerNode final : public BaseNode {
~ServerNode() override;
void Orphaned() override {
channel_args_ = ChannelArgs();
BaseNode::Orphaned();
}
Json RenderJson() override;
std::string RenderServerSockets(intptr_t start_socket_id,
intptr_t max_results);
void AddChildSocket(RefCountedPtr<SocketNode> node);
void RemoveChildSocket(intptr_t child_uuid);
void AddChildListenSocket(RefCountedPtr<ListenSocketNode> node);
void RemoveChildListenSocket(intptr_t child_uuid);
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
@ -294,16 +472,28 @@ class ServerNode final : public BaseNode {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));
}
void SetChannelArgs(const ChannelArgs& channel_args) {
channel_args_ = channel_args;
}
void RecordCallStarted() { call_counter_.RecordCallStarted(); }
void RecordCallFailed() { call_counter_.RecordCallFailed(); }
void RecordCallSucceeded() { call_counter_.RecordCallSucceeded(); }
CallCounts GetCallCounts() const { return call_counter_.GetCallCounts(); }
std::map<intptr_t, RefCountedPtr<ListenSocketNode>> child_listen_sockets()
const;
std::map<intptr_t, RefCountedPtr<SocketNode>> child_sockets() const;
const ChannelTrace& trace() const { return trace_; }
const ChannelArgs& channel_args() const { return channel_args_; }
private:
PerCpuCallCountingHelper call_counter_;
ChannelTrace trace_;
Mutex child_mu_; // Guards child maps below.
std::map<intptr_t, RefCountedPtr<SocketNode>> child_sockets_;
std::map<intptr_t, RefCountedPtr<ListenSocketNode>> child_listen_sockets_;
// TODO(ctiller): keeping channel args here can create odd circular references
// that are hard to reason about. Consider moving this to a DataSource.
ChannelArgs channel_args_;
};
#define GRPC_ARG_CHANNELZ_SECURITY "grpc.internal.channelz_security"
@ -368,7 +558,51 @@ class SocketNode final : public BaseNode {
const std::string& remote() { return remote_; }
int64_t streams_started() const {
return streams_started_.load(std::memory_order_relaxed);
}
int64_t streams_succeeded() const {
return streams_succeeded_.load(std::memory_order_relaxed);
}
int64_t streams_failed() const {
return streams_failed_.load(std::memory_order_relaxed);
}
int64_t messages_sent() const {
return messages_sent_.load(std::memory_order_relaxed);
}
int64_t messages_received() const {
return messages_received_.load(std::memory_order_relaxed);
}
int64_t keepalives_sent() const {
return keepalives_sent_.load(std::memory_order_relaxed);
}
auto last_local_stream_created_timestamp() const {
return CycleCounterToTimestamp(
last_local_stream_created_cycle_.load(std::memory_order_relaxed));
}
auto last_remote_stream_created_timestamp() const {
return CycleCounterToTimestamp(
last_remote_stream_created_cycle_.load(std::memory_order_relaxed));
}
auto last_message_sent_timestamp() const {
return CycleCounterToTimestamp(
last_message_sent_cycle_.load(std::memory_order_relaxed));
}
auto last_message_received_timestamp() const {
return CycleCounterToTimestamp(
last_message_received_cycle_.load(std::memory_order_relaxed));
}
const std::string& local() const { return local_; }
const std::string& remote() const { return remote_; }
RefCountedPtr<Security> security() const { return security_; }
private:
std::optional<std::string> CycleCounterToTimestamp(
gpr_cycle_counter cycle_counter) const {
return gpr_format_timespec(gpr_cycle_counter_to_time(cycle_counter));
}
std::atomic<int64_t> streams_started_{0};
std::atomic<int64_t> streams_succeeded_{0};
std::atomic<int64_t> streams_failed_{0};
@ -396,6 +630,14 @@ class ListenSocketNode final : public BaseNode {
std::string local_addr_;
};
class CallNode final : public BaseNode {
public:
explicit CallNode(std::string name)
: BaseNode(EntityType::kCall, std::move(name)) {}
Json RenderJson() override;
};
} // namespace channelz
} // namespace grpc_core

View File

@ -24,154 +24,356 @@
#include <grpc/support/string_util.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "src/core/channelz/channelz.h"
#include "src/core/config/config_vars.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/util/json/json.h"
#include "src/core/util/json/json_reader.h"
#include "src/core/util/json/json_writer.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/sync.h"
namespace grpc_core {
namespace channelz {
namespace {
template <typename T>
std::string RenderArray(std::tuple<T, bool> values_and_end,
const std::string& key) {
auto& [values, end] = values_and_end;
Json::Object object;
if (!values.empty()) {
// Create list of channels.
Json::Array array;
for (size_t i = 0; i < values.size(); ++i) {
array.emplace_back(values[i]->RenderJson());
}
object[key] = Json::FromArray(std::move(array));
}
if (end) {
object["end"] = Json::FromBool(true);
}
return JsonDump(Json::FromObject(std::move(object)));
}
const int kPaginationLimit = 100;
Json RemoveAdditionalInfo(const Json& json) {
switch (json.type()) {
case Json::Type::kArray: {
Json::Array out;
for (const auto& node : json.array()) {
out.emplace_back(RemoveAdditionalInfo(node));
}
return Json::FromArray(std::move(out));
} break;
case Json::Type::kObject: {
Json::Object out;
for (const auto& [key, value] : json.object()) {
if (key == "additionalInfo") continue;
out[key] = RemoveAdditionalInfo(value);
}
return Json::FromObject(std::move(out));
} break;
default:
return json;
}
}
} // anonymous namespace
// TODO(ctiller): Temporary hack to remove fields that are objectionable to the
// protobuf parser (because we've not published them in protobuf yet).
char* ApplyHacks(const std::string& json_str) {
return gpr_strdup(StripAdditionalInfoFromJson(json_str).c_str());
}
} // namespace
std::string StripAdditionalInfoFromJson(absl::string_view json_str) {
auto json = JsonParse(json_str);
if (!json.ok()) return gpr_strdup(std::string(json_str).c_str());
return JsonDump(RemoveAdditionalInfo(*json));
}
ChannelzRegistry* ChannelzRegistry::Default() {
static ChannelzRegistry* singleton = new ChannelzRegistry();
return singleton;
}
void ChannelzRegistry::InternalRegister(BaseNode* node) {
MutexLock lock(&mu_);
node->uuid_ = ++uuid_generator_;
node_map_[node->uuid_] = node;
}
void ChannelzRegistry::InternalUnregister(intptr_t uuid) {
CHECK_GE(uuid, 1);
MutexLock lock(&mu_);
CHECK(uuid <= uuid_generator_);
node_map_.erase(uuid);
}
RefCountedPtr<BaseNode> ChannelzRegistry::InternalGet(intptr_t uuid) {
MutexLock lock(&mu_);
if (uuid < 1 || uuid > uuid_generator_) {
return nullptr;
}
auto it = node_map_.find(uuid);
if (it == node_map_.end()) return nullptr;
// Found node. Return only if its refcount is not zero (i.e., when we
// know that there is no other thread about to destroy it).
BaseNode* node = it->second;
return node->RefIfNonZero();
}
std::string ChannelzRegistry::InternalGetTopChannels(
intptr_t start_channel_id) {
std::vector<RefCountedPtr<BaseNode>> top_level_channels;
RefCountedPtr<BaseNode> node_after_pagination_limit;
{
MutexLock lock(&mu_);
for (auto it = node_map_.lower_bound(start_channel_id);
it != node_map_.end(); ++it) {
BaseNode* node = it->second;
RefCountedPtr<BaseNode> node_ref;
if (node->type() == BaseNode::EntityType::kTopLevelChannel &&
(node_ref = node->RefIfNonZero()) != nullptr) {
// Check if we are over pagination limit to determine if we need to set
// the "end" element. If we don't go through this block, we know that
// when the loop terminates, we have <= to kPaginationLimit.
// Note that because we have already increased this node's
// refcount, we need to decrease it, but we can't unref while
// holding the lock, because this may lead to a deadlock.
if (top_level_channels.size() == kPaginationLimit) {
node_after_pagination_limit = std::move(node_ref);
break;
}
top_level_channels.emplace_back(std::move(node_ref));
}
}
}
Json::Object object;
if (!top_level_channels.empty()) {
// Create list of channels.
Json::Array array;
for (size_t i = 0; i < top_level_channels.size(); ++i) {
array.emplace_back(top_level_channels[i]->RenderJson());
}
object["channel"] = Json::FromArray(std::move(array));
}
if (node_after_pagination_limit == nullptr) {
object["end"] = Json::FromBool(true);
}
return JsonDump(Json::FromObject(std::move(object)));
}
std::string ChannelzRegistry::InternalGetServers(intptr_t start_server_id) {
std::vector<RefCountedPtr<BaseNode>> servers;
RefCountedPtr<BaseNode> node_after_pagination_limit;
{
MutexLock lock(&mu_);
for (auto it = node_map_.lower_bound(start_server_id);
it != node_map_.end(); ++it) {
BaseNode* node = it->second;
RefCountedPtr<BaseNode> node_ref;
if (node->type() == BaseNode::EntityType::kServer &&
(node_ref = node->RefIfNonZero()) != nullptr) {
// Check if we are over pagination limit to determine if we need to set
// the "end" element. If we don't go through this block, we know that
// when the loop terminates, we have <= to kPaginationLimit.
// Note that because we have already increased this node's
// refcount, we need to decrease it, but we can't unref while
// holding the lock, because this may lead to a deadlock.
if (servers.size() == kPaginationLimit) {
node_after_pagination_limit = std::move(node_ref);
break;
}
servers.emplace_back(std::move(node_ref));
}
}
}
Json::Object object;
if (!servers.empty()) {
// Create list of servers.
Json::Array array;
for (size_t i = 0; i < servers.size(); ++i) {
array.emplace_back(servers[i]->RenderJson());
}
object["server"] = Json::FromArray(std::move(array));
}
if (node_after_pagination_limit == nullptr) {
object["end"] = Json::FromBool(true);
}
return JsonDump(Json::FromObject(std::move(object)));
std::vector<WeakRefCountedPtr<BaseNode>>
ChannelzRegistry::InternalGetAllEntities() {
return std::get<0>(QueryNodes(
0, [](const BaseNode*) { return true; },
std::numeric_limits<size_t>::max()));
}
void ChannelzRegistry::InternalLogAllEntities() {
std::vector<RefCountedPtr<BaseNode>> nodes;
{
MutexLock lock(&mu_);
for (auto& p : node_map_) {
RefCountedPtr<BaseNode> node = p.second->RefIfNonZero();
if (node != nullptr) {
nodes.emplace_back(std::move(node));
for (const auto& p : InternalGetAllEntities()) {
std::string json = p->RenderJsonString();
LOG(INFO) << json;
}
}
std::string ChannelzRegistry::GetTopChannelsJson(intptr_t start_channel_id) {
return RenderArray(GetTopChannels(start_channel_id), "channel");
}
std::string ChannelzRegistry::GetServersJson(intptr_t start_server_id) {
return RenderArray(GetServers(start_server_id), "server");
}
void ChannelzRegistry::InternalRegister(BaseNode* node) {
DCHECK_EQ(node->uuid_, -1);
const size_t node_shard_index = NodeShardIndex(node);
NodeShard& node_shard = node_shards_[node_shard_index];
MutexLock lock(&node_shard.mu);
node_shard.nursery.AddToHead(node);
}
void ChannelzRegistry::InternalUnregister(BaseNode* node) {
const size_t node_shard_index = NodeShardIndex(node);
NodeShard& node_shard = node_shards_[node_shard_index];
node_shard.mu.Lock();
CHECK_EQ(node->orphaned_index_, 0u);
intptr_t uuid = node->uuid_.load(std::memory_order_relaxed);
NodeList& remove_list = uuid == -1 ? node_shard.nursery : node_shard.numbered;
remove_list.Remove(node);
if (max_orphaned_per_shard_ == 0) {
// We are not tracking orphaned nodes... remove from the index
// if necessary, then exit out.
node_shard.mu.Unlock();
if (uuid != -1) {
MutexLock lock(&index_mu_);
index_.erase(uuid);
}
return;
}
NodeList& add_list =
uuid != -1 ? node_shard.orphaned_numbered : node_shard.orphaned;
// Ref counting: once a node becomes orphaned we add a single weak ref to it.
// We hold that ref until it gets garbage collected later.
node->WeakRef().release();
node->orphaned_index_ = node_shard.next_orphan_index;
CHECK_GT(node->orphaned_index_, 0u);
++node_shard.next_orphan_index;
add_list.AddToHead(node);
if (node_shard.TotalOrphaned() <= max_orphaned_per_shard_) {
// Below recycling thresholds: just exit out
node_shard.mu.Unlock();
return;
}
CHECK_EQ(node_shard.TotalOrphaned(), max_orphaned_per_shard_ + 1);
NodeList* gc_list;
// choose the oldest node to evict, regardless of numbered or not
if (node_shard.orphaned.tail == nullptr) {
CHECK_NE(node_shard.orphaned_numbered.tail, nullptr);
gc_list = &node_shard.orphaned_numbered;
} else if (node_shard.orphaned_numbered.tail == nullptr) {
gc_list = &node_shard.orphaned;
} else if (node_shard.orphaned.tail->orphaned_index_ <
node_shard.orphaned_numbered.tail->orphaned_index_) {
gc_list = &node_shard.orphaned;
} else {
gc_list = &node_shard.orphaned_numbered;
}
auto* n = gc_list->tail;
CHECK_GT(n->orphaned_index_, 0u);
gc_list->Remove(n);
// Note: we capture the reference to n previously added here, and release
// it when this smart pointer is destroyed, outside of any locks.
WeakRefCountedPtr<BaseNode> gcd_node(n);
node_shard.mu.Unlock();
if (gc_list == &node_shard.orphaned_numbered) {
MutexLock lock(&index_mu_);
intptr_t uuid = n->uuid_.load(std::memory_order_relaxed);
index_.erase(uuid);
}
}
void ChannelzRegistry::LoadConfig() {
const auto max_orphaned = ConfigVars::Get().ChannelzMaxOrphanedNodes();
if (max_orphaned == 0) {
max_orphaned_per_shard_ = 0;
} else {
max_orphaned_per_shard_ = std::max<int>(max_orphaned / kNodeShards, 1);
}
}
std::tuple<std::vector<WeakRefCountedPtr<BaseNode>>, bool>
ChannelzRegistry::QueryNodes(
intptr_t start_node, absl::FunctionRef<bool(const BaseNode*)> discriminator,
size_t max_results) {
// Mitigate drain hotspotting by randomizing the drain order each query.
std::vector<size_t> nursery_visitation_order;
for (size_t i = 0; i < kNodeShards; ++i) {
nursery_visitation_order.push_back(i);
}
absl::c_shuffle(nursery_visitation_order, SharedBitGen());
// In the iteration below, even once we have max_results nodes, we need
// to find the next node in order to know if we've hit the end. If we get
// through the loop without returning, then we return end=true. But if we
// find a node to add after we already have max_results nodes, then we
// return with end=false before exiting the loop. However, in the latter
// case, we will have already increased the ref count of the next node,
// so we need to unref it, but we can't do that while holding the lock.
// So instead, we store it in node_after_end, which will be unreffed
// after releasing the lock.
WeakRefCountedPtr<BaseNode> node_after_end;
std::vector<WeakRefCountedPtr<BaseNode>> result;
MutexLock index_lock(&index_mu_);
for (auto it = index_.lower_bound(start_node); it != index_.end(); ++it) {
BaseNode* node = it->second;
if (!discriminator(node)) continue;
auto node_ref = node->WeakRefIfNonZero();
if (node_ref == nullptr) continue;
if (result.size() == max_results) {
node_after_end = std::move(node_ref);
return std::tuple(std::move(result), false);
}
result.emplace_back(std::move(node_ref));
}
for (auto nursery_index : nursery_visitation_order) {
NodeShard& node_shard = node_shards_[nursery_index];
MutexLock shard_lock(&node_shard.mu);
for (auto [nursery, numbered] :
{std::pair(&node_shard.nursery, &node_shard.numbered),
std::pair(&node_shard.orphaned, &node_shard.orphaned_numbered)}) {
if (nursery->head == nullptr) continue;
BaseNode* n = nursery->head;
while (n != nullptr) {
if (!discriminator(n)) {
n = n->next_;
continue;
}
auto node_ref = n->WeakRefIfNonZero();
if (node_ref == nullptr) {
n = n->next_;
continue;
}
BaseNode* next = n->next_;
nursery->Remove(n);
numbered->AddToHead(n);
n->uuid_ = uuid_generator_;
++uuid_generator_;
index_.emplace(n->uuid_, n);
if (n->uuid_ >= start_node) {
if (result.size() == max_results) {
node_after_end = std::move(node_ref);
return std::tuple(std::move(result), false);
}
result.emplace_back(std::move(node_ref));
}
n = next;
}
}
}
for (size_t i = 0; i < nodes.size(); ++i) {
std::string json = nodes[i]->RenderJsonString();
LOG(INFO) << json;
CHECK(node_after_end == nullptr);
return std::tuple(std::move(result), true);
}
WeakRefCountedPtr<BaseNode> ChannelzRegistry::InternalGet(intptr_t uuid) {
MutexLock index_lock(&index_mu_);
auto it = index_.find(uuid);
if (it == index_.end()) return nullptr;
BaseNode* node = it->second;
return node->WeakRefIfNonZero();
}
intptr_t ChannelzRegistry::InternalNumberNode(BaseNode* node) {
// node must be strongly owned still
node->AssertStronglyOwned();
const size_t node_shard_index = NodeShardIndex(node);
NodeShard& node_shard = node_shards_[node_shard_index];
MutexLock index_lock(&index_mu_);
MutexLock lock(&node_shard.mu);
intptr_t uuid = node->uuid_.load(std::memory_order_relaxed);
if (uuid != -1) return uuid;
uuid = uuid_generator_;
++uuid_generator_;
node->uuid_ = uuid;
if (node->orphaned_index_ > 0) {
node_shard.orphaned.Remove(node);
node_shard.orphaned_numbered.AddToHead(node);
} else {
node_shard.nursery.Remove(node);
node_shard.numbered.AddToHead(node);
}
index_.emplace(uuid, node);
return uuid;
}
bool ChannelzRegistry::NodeList::Holds(BaseNode* node) const {
BaseNode* n = head;
while (n != nullptr) {
if (n == node) return true;
n = n->next_;
}
return false;
}
void ChannelzRegistry::NodeList::AddToHead(BaseNode* node) {
DCHECK(!Holds(node));
++count;
if (head != nullptr) head->prev_ = node;
node->next_ = head;
node->prev_ = nullptr;
head = node;
if (tail == nullptr) tail = node;
DCHECK(Holds(node));
}
void ChannelzRegistry::NodeList::Remove(BaseNode* node) {
DCHECK(Holds(node));
DCHECK_GT(count, 0u);
--count;
if (node->prev_ == nullptr) {
head = node->next_;
if (head == nullptr) {
DCHECK_EQ(count, 0u);
tail = nullptr;
DCHECK(!Holds(node));
return;
}
} else {
node->prev_->next_ = node->next_;
}
if (node->next_ == nullptr) {
tail = node->prev_;
} else {
node->next_->prev_ = node->prev_;
}
DCHECK(!Holds(node));
}
void ChannelzRegistry::TestOnlyReset() {
auto* p = Default();
p->uuid_generator_ = 1;
p->LoadConfig();
std::vector<WeakRefCountedPtr<BaseNode>> free_nodes;
for (size_t i = 0; i < kNodeShards; i++) {
MutexLock lock(&p->node_shards_[i].mu);
CHECK(p->node_shards_[i].nursery.head == nullptr);
CHECK(p->node_shards_[i].numbered.head == nullptr);
while (p->node_shards_[i].orphaned.head != nullptr) {
free_nodes.emplace_back(p->node_shards_[i].orphaned.head);
p->node_shards_[i].orphaned.Remove(p->node_shards_[i].orphaned.head);
}
while (p->node_shards_[i].orphaned_numbered.head != nullptr) {
free_nodes.emplace_back(p->node_shards_[i].orphaned_numbered.head);
p->node_shards_[i].orphaned_numbered.Remove(
p->node_shards_[i].orphaned_numbered.head);
}
}
std::vector<NodeShard> replace_node_shards(kNodeShards);
replace_node_shards.swap(p->node_shards_);
MutexLock lock(&p->index_mu_);
p->index_.clear();
}
} // namespace channelz
@ -179,21 +381,22 @@ void ChannelzRegistry::InternalLogAllEntities() {
char* grpc_channelz_get_top_channels(intptr_t start_channel_id) {
grpc_core::ExecCtx exec_ctx;
return gpr_strdup(
grpc_core::channelz::ChannelzRegistry::GetTopChannels(start_channel_id)
return grpc_core::channelz::ApplyHacks(
grpc_core::channelz::ChannelzRegistry::GetTopChannelsJson(
start_channel_id)
.c_str());
}
char* grpc_channelz_get_servers(intptr_t start_server_id) {
grpc_core::ExecCtx exec_ctx;
return gpr_strdup(
grpc_core::channelz::ChannelzRegistry::GetServers(start_server_id)
return grpc_core::channelz::ApplyHacks(
grpc_core::channelz::ChannelzRegistry::GetServersJson(start_server_id)
.c_str());
}
char* grpc_channelz_get_server(intptr_t server_id) {
grpc_core::ExecCtx exec_ctx;
grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> server_node =
grpc_core::WeakRefCountedPtr<grpc_core::channelz::BaseNode> server_node =
grpc_core::channelz::ChannelzRegistry::Get(server_id);
if (server_node == nullptr ||
server_node->type() !=
@ -203,7 +406,7 @@ char* grpc_channelz_get_server(intptr_t server_id) {
grpc_core::Json json = grpc_core::Json::FromObject({
{"server", server_node->RenderJson()},
});
return gpr_strdup(grpc_core::JsonDump(json).c_str());
return grpc_core::channelz::ApplyHacks(grpc_core::JsonDump(json).c_str());
}
char* grpc_channelz_get_server_sockets(intptr_t server_id,
@ -211,7 +414,7 @@ char* grpc_channelz_get_server_sockets(intptr_t server_id,
intptr_t max_results) {
grpc_core::ExecCtx exec_ctx;
// Validate inputs before handing them of to the renderer.
grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> base_node =
grpc_core::WeakRefCountedPtr<grpc_core::channelz::BaseNode> base_node =
grpc_core::channelz::ChannelzRegistry::Get(server_id);
if (base_node == nullptr ||
base_node->type() != grpc_core::channelz::BaseNode::EntityType::kServer ||
@ -222,13 +425,13 @@ char* grpc_channelz_get_server_sockets(intptr_t server_id,
// actually a server node.
grpc_core::channelz::ServerNode* server_node =
static_cast<grpc_core::channelz::ServerNode*>(base_node.get());
return gpr_strdup(
return grpc_core::channelz::ApplyHacks(
server_node->RenderServerSockets(start_socket_id, max_results).c_str());
}
char* grpc_channelz_get_channel(intptr_t channel_id) {
grpc_core::ExecCtx exec_ctx;
grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> channel_node =
grpc_core::WeakRefCountedPtr<grpc_core::channelz::BaseNode> channel_node =
grpc_core::channelz::ChannelzRegistry::Get(channel_id);
if (channel_node == nullptr ||
(channel_node->type() !=
@ -240,12 +443,12 @@ char* grpc_channelz_get_channel(intptr_t channel_id) {
grpc_core::Json json = grpc_core::Json::FromObject({
{"channel", channel_node->RenderJson()},
});
return gpr_strdup(grpc_core::JsonDump(json).c_str());
return grpc_core::channelz::ApplyHacks(grpc_core::JsonDump(json).c_str());
}
char* grpc_channelz_get_subchannel(intptr_t subchannel_id) {
grpc_core::ExecCtx exec_ctx;
grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> subchannel_node =
grpc_core::WeakRefCountedPtr<grpc_core::channelz::BaseNode> subchannel_node =
grpc_core::channelz::ChannelzRegistry::Get(subchannel_id);
if (subchannel_node == nullptr ||
subchannel_node->type() !=
@ -255,20 +458,22 @@ char* grpc_channelz_get_subchannel(intptr_t subchannel_id) {
grpc_core::Json json = grpc_core::Json::FromObject({
{"subchannel", subchannel_node->RenderJson()},
});
return gpr_strdup(grpc_core::JsonDump(json).c_str());
return grpc_core::channelz::ApplyHacks(grpc_core::JsonDump(json).c_str());
}
char* grpc_channelz_get_socket(intptr_t socket_id) {
grpc_core::ExecCtx exec_ctx;
grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode> socket_node =
grpc_core::WeakRefCountedPtr<grpc_core::channelz::BaseNode> socket_node =
grpc_core::channelz::ChannelzRegistry::Get(socket_id);
if (socket_node == nullptr ||
socket_node->type() !=
grpc_core::channelz::BaseNode::EntityType::kSocket) {
(socket_node->type() !=
grpc_core::channelz::BaseNode::EntityType::kSocket &&
socket_node->type() !=
grpc_core::channelz::BaseNode::EntityType::kListenSocket)) {
return nullptr;
}
grpc_core::Json json = grpc_core::Json::FromObject({
{"socket", socket_node->RenderJson()},
});
return gpr_strdup(grpc_core::JsonDump(json).c_str());
return grpc_core::channelz::ApplyHacks(grpc_core::JsonDump(json).c_str());
}

View File

@ -25,8 +25,10 @@
#include <map>
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_map.h"
#include "absl/functional/function_ref.h"
#include "src/core/channelz/channelz.h"
#include "src/core/util/json/json_writer.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/sync.h"
@ -40,36 +42,134 @@ class ChannelzRegistry final {
static void Register(BaseNode* node) {
return Default()->InternalRegister(node);
}
static void Unregister(intptr_t uuid) { Default()->InternalUnregister(uuid); }
static RefCountedPtr<BaseNode> Get(intptr_t uuid) {
static void Unregister(BaseNode* node) {
Default()->InternalUnregister(node);
}
static WeakRefCountedPtr<BaseNode> Get(intptr_t uuid) {
return Default()->InternalGet(uuid);
}
static intptr_t NumberNode(BaseNode* node) {
return Default()->InternalNumberNode(node);
}
static WeakRefCountedPtr<SubchannelNode> GetSubchannel(intptr_t uuid) {
return Default()
->InternalGetTyped<SubchannelNode, BaseNode::EntityType::kSubchannel>(
uuid);
}
static WeakRefCountedPtr<ChannelNode> GetChannel(intptr_t uuid) {
auto node = Default()->InternalGet(uuid);
if (node == nullptr) return nullptr;
if (node->type() == BaseNode::EntityType::kTopLevelChannel) {
return node->WeakRefAsSubclass<ChannelNode>();
}
if (node->type() == BaseNode::EntityType::kInternalChannel) {
return node->WeakRefAsSubclass<ChannelNode>();
}
return nullptr;
}
static WeakRefCountedPtr<ServerNode> GetServer(intptr_t uuid) {
return Default()
->InternalGetTyped<ServerNode, BaseNode::EntityType::kServer>(uuid);
}
static WeakRefCountedPtr<SocketNode> GetSocket(intptr_t uuid) {
return Default()
->InternalGetTyped<SocketNode, BaseNode::EntityType::kSocket>(uuid);
}
// Returns the allocated JSON string that represents the proto
// GetTopChannelsResponse as per channelz.proto.
static std::string GetTopChannels(intptr_t start_channel_id) {
return Default()->InternalGetTopChannels(start_channel_id);
static auto GetTopChannels(intptr_t start_channel_id) {
return Default()
->InternalGetObjects<ChannelNode,
BaseNode::EntityType::kTopLevelChannel>(
start_channel_id);
}
static std::string GetTopChannelsJson(intptr_t start_channel_id);
static std::string GetServersJson(intptr_t start_server_id);
// Returns the allocated JSON string that represents the proto
// GetServersResponse as per channelz.proto.
static std::string GetServers(intptr_t start_server_id) {
return Default()->InternalGetServers(start_server_id);
static auto GetServers(intptr_t start_server_id) {
return Default()
->InternalGetObjects<ServerNode, BaseNode::EntityType::kServer>(
start_server_id);
}
static std::tuple<std::vector<WeakRefCountedPtr<BaseNode>>, bool>
GetChildrenOfType(intptr_t start_node, const BaseNode* parent,
BaseNode::EntityType type, size_t max_results) {
return Default()->InternalGetChildrenOfType(start_node, parent, type,
max_results);
}
// Test only helper function to dump the JSON representation to std out.
// This can aid in debugging channelz code.
static void LogAllEntities() { Default()->InternalLogAllEntities(); }
// Test only helper function to reset to initial state.
static void TestOnlyReset() {
auto* p = Default();
MutexLock lock(&p->mu_);
p->node_map_.clear();
p->uuid_generator_ = 0;
static std::vector<WeakRefCountedPtr<BaseNode>> GetAllEntities() {
return Default()->InternalGetAllEntities();
}
// Test only helper function to reset to initial state.
static void TestOnlyReset();
private:
ChannelzRegistry() { LoadConfig(); }
void LoadConfig();
// Takes a callable F: (WeakRefCountedPtr<BaseNode>) -> bool, and returns
// a (BaseNode*) -> bool that filters unreffed objects and returns true.
// The ref must be unreffed outside the NodeMapInterface iteration.
template <typename F>
static auto CollectReferences(F fn) {
return [fn = std::move(fn)](BaseNode* n) {
auto node = n->RefIfNonZero();
if (node == nullptr) return true;
return fn(std::move(node));
};
}
struct NodeList {
BaseNode* head = nullptr;
BaseNode* tail = nullptr;
size_t count = 0;
bool Holds(BaseNode* node) const;
void AddToHead(BaseNode* node);
void Remove(BaseNode* node);
};
// Nodes traverse through up to four lists, depending on
// whether they have a uuid (this is becoming numbered),
// and whether they have been orphaned or not.
// The lists help us find un-numbered nodes when needed for
// queries, and the oldest orphaned node when needed for
// garbage collection.
// Nodes are organized into shards based on their pointer
// address. A shard tracks the four lists of nodes
// independently - we strive to have no cross-talk between
// shards as these are very global objects.
struct alignas(GPR_CACHELINE_SIZE) NodeShard {
Mutex mu;
// Nursery nodes have no uuid and are not orphaned.
NodeList nursery ABSL_GUARDED_BY(mu);
// Numbered nodes have been assigned a uuid, and are not orphaned.
NodeList numbered ABSL_GUARDED_BY(mu);
// Orphaned nodes have no uuid, but have been orphaned.
NodeList orphaned ABSL_GUARDED_BY(mu);
// Finally, orphaned numbered nodes are orphaned, and have been assigned a
// uuid.
NodeList orphaned_numbered ABSL_GUARDED_BY(mu);
uint64_t next_orphan_index ABSL_GUARDED_BY(mu) = 1;
size_t TotalOrphaned() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu) {
return orphaned.count + orphaned_numbered.count;
}
};
// Returned the singleton instance of ChannelzRegistry;
static ChannelzRegistry* Default();
@ -78,23 +178,81 @@ class ChannelzRegistry final {
// globally unregisters the object that is associated to uuid. Also does
// sanity check that an object doesn't try to unregister the wrong type.
void InternalUnregister(intptr_t uuid);
void InternalUnregister(BaseNode* node);
intptr_t InternalNumberNode(BaseNode* node);
// if object with uuid has previously been registered as the correct type,
// returns the void* associated with that uuid. Else returns nullptr.
RefCountedPtr<BaseNode> InternalGet(intptr_t uuid);
WeakRefCountedPtr<BaseNode> InternalGet(intptr_t uuid);
std::string InternalGetTopChannels(intptr_t start_channel_id);
std::string InternalGetServers(intptr_t start_server_id);
// Generic query over nodes.
// This function takes care of all the gnarly locking, and allows high level
// code to request a start node and maximum number of results (for pagination
// purposes).
// `discriminator` allows callers to choose which nodes will be returned - if
// it returns true, the node is included in the result.
// `discriminator` *MUST NOT* ref the node, nor call into ChannelzRegistry via
// any code path (locks are held during the call).
std::tuple<std::vector<WeakRefCountedPtr<BaseNode>>, bool> QueryNodes(
intptr_t start_node,
absl::FunctionRef<bool(const BaseNode*)> discriminator,
size_t max_results);
std::tuple<std::vector<WeakRefCountedPtr<BaseNode>>, bool>
InternalGetChildrenOfType(intptr_t start_node, const BaseNode* parent,
BaseNode::EntityType type, size_t max_results) {
return QueryNodes(
start_node,
[type, parent](const BaseNode* n) {
return n->type() == type && n->HasParent(parent);
},
max_results);
}
template <typename T, BaseNode::EntityType entity_type>
WeakRefCountedPtr<T> InternalGetTyped(intptr_t uuid) {
WeakRefCountedPtr<BaseNode> node = InternalGet(uuid);
if (node == nullptr || node->type() != entity_type) {
return nullptr;
}
return node->WeakRefAsSubclass<T>();
}
template <typename T, BaseNode::EntityType entity_type>
std::tuple<std::vector<WeakRefCountedPtr<T>>, bool> InternalGetObjects(
intptr_t start_id) {
const int kPaginationLimit = 100;
std::vector<WeakRefCountedPtr<T>> top_level_channels;
const auto [nodes, end] = QueryNodes(
start_id,
[](const BaseNode* node) { return node->type() == entity_type; },
kPaginationLimit);
for (const auto& p : nodes) {
top_level_channels.emplace_back(p->template WeakRefAsSubclass<T>());
}
return std::tuple(std::move(top_level_channels), end);
}
void InternalLogAllEntities();
std::vector<WeakRefCountedPtr<BaseNode>> InternalGetAllEntities();
// protects members
Mutex mu_;
std::map<intptr_t, BaseNode*> node_map_ ABSL_GUARDED_BY(mu_);
intptr_t uuid_generator_ ABSL_GUARDED_BY(mu_) = 0;
static constexpr size_t kNodeShards = 63;
size_t NodeShardIndex(BaseNode* node) {
return absl::HashOf(node) % kNodeShards;
}
int64_t uuid_generator_{1};
std::vector<NodeShard> node_shards_{kNodeShards};
Mutex index_mu_;
absl::btree_map<intptr_t, BaseNode*> index_ ABSL_GUARDED_BY(index_mu_);
size_t max_orphaned_per_shard_;
};
// `additionalInfo` section is not yet in the protobuf format, so we
// provide a utility to strip it for compatibility.
std::string StripAdditionalInfoFromJson(absl::string_view json);
} // namespace channelz
} // namespace grpc_core

View File

@ -0,0 +1,315 @@
// Copyright 2025 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_CHANNELZ_ZTRACE_COLLECTOR_H
#define GRPC_SRC_CORE_CHANNELZ_ZTRACE_COLLECTOR_H
#include <grpc/support/time.h>
#include <memory>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "src/core/channelz/channelz.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/util/json/json_writer.h"
#include "src/core/util/single_set_ptr.h"
#include "src/core/util/string.h"
#include "src/core/util/sync.h"
#include "src/core/util/time.h"
#ifdef GRPC_NO_ZTRACE
namespace grpc_core::channelz {
namespace ztrace_collector_detail {
class ZTraceImpl final : public ZTrace {
public:
explicit ZTraceImpl() {}
void Run(Timestamp deadline, std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine,
absl::AnyInvocable<void(Json)> callback) override {
event_engine->Run([callback = std::move(callback)]() mutable {
callback(Json::FromBool(false));
});
}
};
class StubImpl {
public:
template <typename T>
void Append(const T&) {}
std::unique_ptr<ZTrace> MakeZTrace() {
return std::make_unique<ZTraceImpl>();
}
};
} // namespace ztrace_collector_detail
template <typename...>
class ZTraceCollector : public ztrace_collector_detail::StubImpl {};
} // namespace grpc_core::channelz
#else
namespace grpc_core::channelz {
namespace ztrace_collector_detail {
template <typename T>
using Collection = std::deque<std::pair<gpr_cycle_counter, T> >;
template <typename T>
void AppendResults(const Collection<T>& data, Json::Array& results) {
for (const auto& value : data) {
Json::Object object;
object["timestamp"] =
Json::FromString(gpr_format_timespec(gpr_convert_clock_type(
gpr_cycle_counter_to_time(value.first), GPR_CLOCK_REALTIME)));
value.second.RenderJson(object);
results.emplace_back(Json::FromObject(std::move(object)));
}
}
template <typename Needle, typename... Haystack>
constexpr bool kIsElement = false;
template <typename Needle, typename... Haystack>
constexpr bool kIsElement<Needle, Needle, Haystack...> = true;
template <typename Needle, typename H, typename... Haystack>
constexpr bool kIsElement<Needle, H, Haystack...> =
kIsElement<Needle, Haystack...>;
} // namespace ztrace_collector_detail
inline std::optional<int64_t> IntFromArgs(
const std::map<std::string, std::string>& args, const std::string& name) {
auto it = args.find(name);
if (it == args.end()) return std::nullopt;
int64_t out;
if (!absl::SimpleAtoi(it->second, &out)) return std::nullopt;
return out;
}
// Generic collector infrastructure for ztrace queries.
// Abstracts away most of the ztrace requirements in an efficient manner,
// allowing system authors to concentrate on emitting useful data.
// If no trace is performed, overhead is one pointer and one relaxed atomic read
// per trace event.
//
// Two kinds of objects are required:
// 1. A `Config`
// - This type should be constructible with a std::map<std::string,
// std::string>
// and provides overall query configuration - the map can be used to pull
// predicates from the calling system.
// - Needs a `bool Finishes(T)` method for each Data type (see 2).
// This allows the config to terminate a query in the event of reaching
// some configured predicate.
// 2. N `Data` types
// - One for each kind of data captured in the trace
// - Allows avoiding e.g. variant<> data types; these are inefficient
// in this context because they force every recorded entry to use the
// same number of bytes whilst pending.
template <typename Config, typename... Data>
class ZTraceCollector {
public:
template <typename X>
void Append(X producer_or_value) {
GRPC_TRACE_LOG(ztrace, INFO) << "ZTRACE[" << this << "]: " << [&]() {
Json::Object obj;
if constexpr (ztrace_collector_detail::kIsElement<X, Data...>) {
producer_or_value.RenderJson(obj);
} else {
producer_or_value().RenderJson(obj);
}
return JsonDump(Json::FromObject(std::move(obj)));
}();
if (!impl_.is_set()) return;
if constexpr (ztrace_collector_detail::kIsElement<X, Data...>) {
AppendValue(std::move(producer_or_value));
} else {
AppendValue(producer_or_value());
}
}
std::unique_ptr<ZTrace> MakeZTrace() {
return std::make_unique<ZTraceImpl>(impl_.GetOrCreate());
}
private:
template <typename T>
using Collection = ztrace_collector_detail::Collection<T>;
struct Instance : public RefCounted<Instance> {
Instance(std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine,
absl::AnyInvocable<void(Json)> done)
: memory_cap_(IntFromArgs(args, "memory_cap").value_or(1024 * 1024)),
config(args),
event_engine(std::move(event_engine)),
done(std::move(done)) {}
using Collections = std::tuple<Collection<Data>...>;
struct RemoveMostRecentState {
void (*enact)(Instance*) = nullptr;
gpr_cycle_counter most_recent =
std::numeric_limits<gpr_cycle_counter>::max();
};
template <typename T>
void Append(std::pair<gpr_cycle_counter, T> value) {
memory_used_ += value.second.MemoryUsage();
while (memory_used_ > memory_cap_) RemoveMostRecent();
std::get<Collection<T> >(data).push_back(std::move(value));
}
void RemoveMostRecent() {
RemoveMostRecentState state;
(UpdateRemoveMostRecentState<Data>(&state), ...);
CHECK(state.enact != nullptr);
state.enact(this);
++items_removed_;
}
template <typename T>
void UpdateRemoveMostRecentState(RemoveMostRecentState* state) {
auto& collection = std::get<Collection<T> >(data);
if (collection.empty()) return;
if (state->enact == nullptr ||
collection.front().first < state->most_recent) {
state->enact = +[](Instance* instance) {
auto& collection = std::get<Collection<T> >(instance->data);
const size_t ent_usage = collection.front().second.MemoryUsage();
CHECK_GE(instance->memory_used_, ent_usage);
instance->memory_used_ -= ent_usage;
collection.pop_front();
};
state->most_recent = collection.front().first;
}
}
void Finish(absl::Status status) {
event_engine->Run([data = std::move(data), done = std::move(done),
status = std::move(status), memory_used = memory_used_,
items_removed = items_removed_]() mutable {
Json::Array entries;
(ztrace_collector_detail::AppendResults(
std::get<Collection<Data> >(data), entries),
...);
Json::Object result;
result["entries"] = Json::FromArray(entries);
result["status"] = Json::FromString(status.ToString());
result["memory_used"] =
Json::FromNumber(static_cast<uint64_t>(memory_used));
result["items_removed"] = Json::FromNumber(items_removed);
done(Json::FromObject(std::move(result)));
});
}
size_t memory_used_ = 0;
size_t memory_cap_ = 0;
uint64_t items_removed_ = 0;
Config config;
const Timestamp start_time = Timestamp::Now();
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine;
grpc_event_engine::experimental::EventEngine::TaskHandle task_handle{
grpc_event_engine::experimental::EventEngine::TaskHandle::kInvalid};
Collections data;
absl::AnyInvocable<void(Json)> done;
};
struct Impl : public RefCounted<Impl> {
Mutex mu;
absl::flat_hash_set<RefCountedPtr<Instance> > instances ABSL_GUARDED_BY(mu);
};
class ZTraceImpl final : public ZTrace {
public:
explicit ZTraceImpl(RefCountedPtr<Impl> impl) : impl_(std::move(impl)) {}
void Run(Timestamp deadline, std::map<std::string, std::string> args,
std::shared_ptr<grpc_event_engine::experimental::EventEngine>
event_engine,
absl::AnyInvocable<void(Json)> callback) override {
auto instance = MakeRefCounted<Instance>(std::move(args), event_engine,
std::move(callback));
auto impl = std::move(impl_);
RefCountedPtr<Instance> oldest_instance;
MutexLock lock(&impl->mu);
if (impl->instances.size() > 20) {
// Eject oldest running trace
Timestamp oldest_time = Timestamp::InfFuture();
for (auto& instance : impl->instances) {
if (instance->start_time < oldest_time) {
oldest_time = instance->start_time;
oldest_instance = instance;
}
}
CHECK(oldest_instance != nullptr);
impl->instances.erase(oldest_instance);
oldest_instance->Finish(
absl::ResourceExhaustedError("Too many concurrent ztrace queries"));
}
instance->task_handle = event_engine->RunAfter(
deadline - Timestamp::Now(), [instance, impl]() {
bool finish;
{
MutexLock lock(&impl->mu);
finish = impl->instances.erase(instance);
}
if (finish) instance->Finish(absl::DeadlineExceededError(""));
});
impl->instances.insert(instance);
}
private:
RefCountedPtr<Impl> impl_;
};
template <typename T>
void AppendValue(T&& data) {
auto value = std::pair(gpr_get_cycle_counter(), std::forward<T>(data));
auto* impl = impl_.Get();
{
MutexLock lock(&impl->mu);
switch (impl->instances.size()) {
case 0:
return;
case 1: {
auto& instances = impl->instances;
auto& instance = *instances.begin();
const bool finishes = instance->config.Finishes(value.second);
instance->Append(std::move(value));
if (finishes) {
instance->Finish(absl::OkStatus());
instances.clear();
}
} break;
default: {
std::vector<RefCountedPtr<Instance> > finished;
for (auto& instance : impl->instances) {
const bool finishes = instance->config.Finishes(value.second);
instance->Append(value);
if (finishes) {
finished.push_back(instance);
}
}
for (const auto& instance : finished) {
instance->Finish(absl::OkStatus());
impl->instances.erase(instance);
}
}
}
}
}
SingleSetRefCountedPtr<Impl> impl_;
};
} // namespace grpc_core::channelz
#endif // GRPC_NO_ZTRACE
#endif // GRPC_SRC_CORE_CHANNELZ_ZTRACE_COLLECTOR_H

View File

@ -62,10 +62,15 @@ static grpc_core::Duration g_poll_interval =
static bool g_backup_polling_disabled;
void grpc_client_channel_global_init_backup_polling() {
#ifndef GRPC_DO_NOT_INSTANTIATE_POSIX_POLLER
// Disable backup polling if EventEngine is used everywhere.
g_backup_polling_disabled = grpc_core::IsEventEngineClientEnabled() &&
grpc_core::IsEventEngineListenerEnabled() &&
grpc_core::IsEventEngineDnsEnabled();
#else
// EventEngine polling not supported, keep using the backup poller.
g_backup_polling_disabled = false;
#endif
if (g_backup_polling_disabled) {
return;
}
@ -155,11 +160,21 @@ static void g_poller_init_locked() {
}
}
static bool g_can_poll_in_background() {
#ifndef GRPC_DO_NOT_INSTANTIATE_POSIX_POLLER
return grpc_iomgr_run_in_background();
#else
// No iomgr "event_engines" (not to be confused with the new EventEngine)
// are able to run in backgroung.
return false;
#endif
}
void grpc_client_channel_start_backup_polling(
grpc_pollset_set* interested_parties) {
if (g_backup_polling_disabled ||
g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
g_can_poll_in_background()) {
return;
}
gpr_mu_lock(&g_poller_mu);
@ -179,7 +194,7 @@ void grpc_client_channel_stop_backup_polling(
grpc_pollset_set* interested_parties) {
if (g_backup_polling_disabled ||
g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
g_can_poll_in_background()) {
return;
}
grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset);

View File

@ -228,21 +228,19 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
subchannel_wrapper_.reset(DEBUG_LOCATION, "WatcherWrapper");
}
void OnConnectivityStateChange(
RefCountedPtr<ConnectivityStateWatcherInterface> self,
grpc_connectivity_state state, const absl::Status& status) override {
void OnConnectivityStateChange(grpc_connectivity_state state,
const absl::Status& status) override {
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << subchannel_wrapper_->client_channel_.get()
<< ": connectivity change for subchannel wrapper "
<< subchannel_wrapper_.get() << " subchannel "
<< subchannel_wrapper_->subchannel_.get()
<< "; hopping into work_serializer";
self.release(); // Held by callback.
auto self = RefAsSubclass<WatcherWrapper>();
subchannel_wrapper_->client_channel_->work_serializer_->Run(
[this, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
*subchannel_wrapper_->client_channel_->work_serializer_) {
ApplyUpdateInControlPlaneWorkSerializer(state, status);
Unref();
[self, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
*self->subchannel_wrapper_->client_channel_->work_serializer_) {
self->ApplyUpdateInControlPlaneWorkSerializer(state, status);
});
}
@ -324,8 +322,7 @@ ClientChannel::SubchannelWrapper::SubchannelWrapper(
auto it =
client_channel_->subchannel_refcount_map_.find(subchannel_.get());
if (it == client_channel_->subchannel_refcount_map_.end()) {
client_channel_->channelz_node_->AddChildSubchannel(
subchannel_node->uuid());
subchannel_node->AddParent(client_channel_->channelz_node_);
it = client_channel_->subchannel_refcount_map_
.emplace(subchannel_.get(), 0)
.first;
@ -360,8 +357,8 @@ void ClientChannel::SubchannelWrapper::Orphaned() {
CHECK(it != self->client_channel_->subchannel_refcount_map_.end());
--it->second;
if (it->second == 0) {
self->client_channel_->channelz_node_->RemoveChildSubchannel(
subchannel_node->uuid());
subchannel_node->RemoveParent(
self->client_channel_->channelz_node_);
self->client_channel_->subchannel_refcount_map_.erase(it);
}
}
@ -530,7 +527,11 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
if (args.GetBool(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL).value_or(false)) {
return MakeRefCounted<LocalSubchannelPool>();
}
return GlobalSubchannelPool::instance();
if (IsShardGlobalConnectionPoolEnabled()) {
return GlobalSubchannelPool::instance();
} else {
return LegacyGlobalSubchannelPool::instance();
}
}
} // namespace
@ -701,6 +702,7 @@ class ExternalStateWatcher : public RefCounted<ExternalStateWatcher> {
grpc_connectivity_state last_observed_state,
Timestamp deadline)
: channel_(std::move(channel)), cq_(cq), tag_(tag) {
grpc_cq_begin_op(cq, tag);
MutexLock lock(&mu_);
// Start watch. This inherits the ref from creation.
auto watcher =
@ -780,17 +782,14 @@ void ClientChannel::WatchConnectivityState(grpc_connectivity_state state,
}
void ClientChannel::AddConnectivityWatcher(
grpc_connectivity_state,
OrphanablePtr<AsyncConnectivityStateWatcherInterface>) {
Crash("not implemented");
// TODO(ctiller): to make this work, need to change WorkSerializer to use
// absl::AnyInvocable<> instead of std::function<>
// work_serializer_->Run(
// [self = RefAsSubclass<ClientChannel>(), initial_state,
// watcher = std::move(watcher)]()
// ABSL_EXCLUSIVE_LOCKS_REQUIRED(*work_serializer_) {
// self->state_tracker_.AddWatcher(initial_state, std::move(watcher));
// });
grpc_connectivity_state initial_state,
OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher) {
auto self = RefAsSubclass<ClientChannel>();
work_serializer_->Run(
[self, initial_state, watcher = std::move(watcher)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*self->work_serializer_) mutable {
self->state_tracker_.AddWatcher(initial_state, std::move(watcher));
});
}
void ClientChannel::RemoveConnectivityWatcher(
@ -1320,11 +1319,13 @@ void ClientChannel::UpdateStateLocked(grpc_connectivity_state state,
state_tracker_.SetState(state, status, reason);
if (channelz_node_ != nullptr) {
channelz_node_->SetConnectivityState(state);
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
channelz::ChannelNode::GetChannelConnectivityStateChangeString(
state)));
std::string trace =
channelz::ChannelNode::GetChannelConnectivityStateChangeString(state);
if (!status.ok() || state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
absl::StrAppend(&trace, " status:", status.ToString());
}
channelz_node_->AddTraceEvent(channelz::ChannelTrace::Severity::Info,
grpc_slice_from_cpp_string(std::move(trace)));
}
}

View File

@ -501,7 +501,7 @@ class ClientChannelFilter::SubchannelWrapper final
if (subchannel_node != nullptr) {
auto it = chand_->subchannel_refcount_map_.find(subchannel_.get());
if (it == chand_->subchannel_refcount_map_.end()) {
chand_->channelz_node_->AddChildSubchannel(subchannel_node->uuid());
subchannel_node->AddParent(chand_->channelz_node_);
it = chand_->subchannel_refcount_map_.emplace(subchannel_.get(), 0)
.first;
}
@ -533,8 +533,7 @@ class ClientChannelFilter::SubchannelWrapper final
CHECK(it != chand_->subchannel_refcount_map_.end());
--it->second;
if (it->second == 0) {
chand_->channelz_node_->RemoveChildSubchannel(
subchannel_node->uuid());
subchannel_node->RemoveParent(chand_->channelz_node_);
chand_->subchannel_refcount_map_.erase(it);
}
}
@ -617,20 +616,18 @@ class ClientChannelFilter::SubchannelWrapper final
parent_.reset(DEBUG_LOCATION, "WatcherWrapper");
}
void OnConnectivityStateChange(
RefCountedPtr<ConnectivityStateWatcherInterface> self,
grpc_connectivity_state state, const absl::Status& status) override {
void OnConnectivityStateChange(grpc_connectivity_state state,
const absl::Status& status) override {
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << parent_->chand_
<< ": connectivity change for subchannel wrapper " << parent_.get()
<< " subchannel " << parent_->subchannel_.get()
<< "hopping into work_serializer";
self.release(); // Held by callback.
auto self = RefAsSubclass<WatcherWrapper>();
parent_->chand_->work_serializer_->Run(
[this, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
*parent_->chand_->work_serializer_) {
ApplyUpdateInControlPlaneWorkSerializer(state, status);
Unref();
[self, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
*self->parent_->chand_->work_serializer_) {
self->ApplyUpdateInControlPlaneWorkSerializer(state, status);
});
}
@ -1029,7 +1026,11 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
if (args.GetBool(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL).value_or(false)) {
return MakeRefCounted<LocalSubchannelPool>();
}
return GlobalSubchannelPool::instance();
if (IsShardGlobalConnectionPoolEnabled()) {
return GlobalSubchannelPool::instance();
} else {
return LegacyGlobalSubchannelPool::instance();
}
}
} // namespace
@ -1551,11 +1552,13 @@ void ClientChannelFilter::UpdateStateLocked(grpc_connectivity_state state,
state_tracker_.SetState(state, status, reason);
if (channelz_node_ != nullptr) {
channelz_node_->SetConnectivityState(state);
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
channelz::ChannelNode::GetChannelConnectivityStateChangeString(
state)));
std::string trace =
channelz::ChannelNode::GetChannelConnectivityStateChangeString(state);
if (!status.ok() || state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
absl::StrAppend(&trace, " status:", status.ToString());
}
channelz_node_->AddTraceEvent(channelz::ChannelTrace::Severity::Info,
grpc_slice_from_cpp_string(std::move(trace)));
}
}
@ -2428,9 +2431,7 @@ void ClientChannelFilter::LoadBalancedCall::RecordCallCompletion(
void ClientChannelFilter::LoadBalancedCall::RecordLatency() {
// Compute latency and report it to the tracer.
if (call_attempt_tracer() != nullptr) {
gpr_timespec latency =
gpr_cycle_counter_sub(gpr_get_cycle_counter(), lb_call_start_time_);
call_attempt_tracer()->RecordEnd(latency);
call_attempt_tracer()->RecordEnd();
}
}

View File

@ -438,8 +438,6 @@ class ClientChannelFilter::LoadBalancedCall
absl::AnyInvocable<void()> on_commit_;
gpr_cycle_counter lb_call_start_time_ = gpr_get_cycle_counter();
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
const BackendMetricData* backend_metric_data_ = nullptr;
std::unique_ptr<LoadBalancingPolicy::SubchannelCallTrackerInterface>

View File

@ -53,8 +53,6 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
Transport* transport = nullptr;
// Channel args to be passed to filters.
ChannelArgs channel_args;
// Channelz socket node of the connected transport, if any.
RefCountedPtr<channelz::SocketNode> socket_node;
void Reset() {
if (transport != nullptr) {
@ -62,7 +60,6 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
transport = nullptr;
}
channel_args = ChannelArgs();
socket_node.reset();
}
};

View File

@ -26,12 +26,13 @@
namespace grpc_core {
RefCountedPtr<GlobalSubchannelPool> GlobalSubchannelPool::instance() {
static GlobalSubchannelPool* p = new GlobalSubchannelPool();
return p->RefAsSubclass<GlobalSubchannelPool>();
RefCountedPtr<LegacyGlobalSubchannelPool>
LegacyGlobalSubchannelPool::instance() {
static LegacyGlobalSubchannelPool* p = new LegacyGlobalSubchannelPool();
return p->RefAsSubclass<LegacyGlobalSubchannelPool>();
}
RefCountedPtr<Subchannel> GlobalSubchannelPool::RegisterSubchannel(
RefCountedPtr<Subchannel> LegacyGlobalSubchannelPool::RegisterSubchannel(
const SubchannelKey& key, RefCountedPtr<Subchannel> constructed) {
MutexLock lock(&mu_);
auto it = subchannel_map_.find(key);
@ -43,8 +44,8 @@ RefCountedPtr<Subchannel> GlobalSubchannelPool::RegisterSubchannel(
return constructed;
}
void GlobalSubchannelPool::UnregisterSubchannel(const SubchannelKey& key,
Subchannel* subchannel) {
void LegacyGlobalSubchannelPool::UnregisterSubchannel(const SubchannelKey& key,
Subchannel* subchannel) {
MutexLock lock(&mu_);
auto it = subchannel_map_.find(key);
// delete only if key hasn't been re-registered to a different subchannel
@ -54,7 +55,7 @@ void GlobalSubchannelPool::UnregisterSubchannel(const SubchannelKey& key,
}
}
RefCountedPtr<Subchannel> GlobalSubchannelPool::FindSubchannel(
RefCountedPtr<Subchannel> LegacyGlobalSubchannelPool::FindSubchannel(
const SubchannelKey& key) {
MutexLock lock(&mu_);
auto it = subchannel_map_.find(key);
@ -62,4 +63,64 @@ RefCountedPtr<Subchannel> GlobalSubchannelPool::FindSubchannel(
return it->second->RefIfNonZero();
}
RefCountedPtr<GlobalSubchannelPool> GlobalSubchannelPool::instance() {
static GlobalSubchannelPool* p = new GlobalSubchannelPool();
return p->RefAsSubclass<GlobalSubchannelPool>();
}
RefCountedPtr<Subchannel> GlobalSubchannelPool::RegisterSubchannel(
const SubchannelKey& key, RefCountedPtr<Subchannel> constructed) {
auto shard_index = ShardIndex(key);
auto& write_shard = write_shards_[shard_index];
auto& read_shard = read_shards_[shard_index];
SubchannelMap old_map1;
SubchannelMap old_map2;
MutexLock lock(&write_shard.mu);
auto* existing = write_shard.map.Lookup(key);
if (existing != nullptr) return (*existing)->RefIfNonZero();
old_map1 = std::exchange(write_shard.map,
write_shard.map.Add(key, constructed->WeakRef()));
MutexLock lock_read(&read_shard.mu);
old_map2 = std::exchange(read_shard.map, write_shard.map);
return constructed;
}
void GlobalSubchannelPool::UnregisterSubchannel(const SubchannelKey& key,
Subchannel* subchannel) {
auto shard_index = ShardIndex(key);
auto& write_shard = write_shards_[shard_index];
auto& read_shard = read_shards_[shard_index];
SubchannelMap old_map1;
SubchannelMap old_map2;
MutexLock lock(&write_shard.mu);
auto* existing = write_shard.map.Lookup(key);
// delete only if key hasn't been re-registered to a different subchannel
// between strong-unreffing and unregistration of subchannel.
if (existing == nullptr || existing->get() != subchannel) return;
old_map1 = std::exchange(write_shard.map, write_shard.map.Remove(key));
MutexLock lock_read(&read_shard.mu);
old_map2 = std::exchange(read_shard.map, write_shard.map);
}
RefCountedPtr<Subchannel> GlobalSubchannelPool::FindSubchannel(
const SubchannelKey& key) {
auto shard_index = ShardIndex(key);
auto& read_shard = read_shards_[shard_index];
read_shard.mu.Lock();
auto map = read_shard.map;
read_shard.mu.Unlock();
auto* subchannel = map.Lookup(key);
if (subchannel == nullptr) return nullptr;
return (*subchannel)->RefIfNonZero();
}
size_t GlobalSubchannelPool::ShardIndex(const SubchannelKey& key) {
absl::string_view addr(key.address().addr, key.address().len);
return absl::HashOf(addr) % kShards;
}
GlobalSubchannelPool::GlobalSubchannelPool() = default;
GlobalSubchannelPool::~GlobalSubchannelPool() = default;
} // namespace grpc_core

View File

@ -32,10 +32,10 @@ namespace grpc_core {
// The global subchannel pool. It shares subchannels among channels. There
// should be only one instance of this class.
class GlobalSubchannelPool final : public SubchannelPoolInterface {
class LegacyGlobalSubchannelPool final : public SubchannelPoolInterface {
public:
// Gets the singleton instance.
static RefCountedPtr<GlobalSubchannelPool> instance();
static RefCountedPtr<LegacyGlobalSubchannelPool> instance();
// Implements interface methods.
RefCountedPtr<Subchannel> RegisterSubchannel(
@ -48,8 +48,8 @@ class GlobalSubchannelPool final : public SubchannelPoolInterface {
ABSL_LOCKS_EXCLUDED(mu_);
private:
GlobalSubchannelPool() {}
~GlobalSubchannelPool() override {}
LegacyGlobalSubchannelPool() {}
~LegacyGlobalSubchannelPool() override {}
// A map from subchannel key to subchannel.
std::map<SubchannelKey, Subchannel*> subchannel_map_ ABSL_GUARDED_BY(mu_);
@ -57,6 +57,39 @@ class GlobalSubchannelPool final : public SubchannelPoolInterface {
Mutex mu_;
};
// The global subchannel pool. It shares subchannels among channels. There
// should be only one instance of this class.
class GlobalSubchannelPool final : public SubchannelPoolInterface {
public:
// Gets the singleton instance.
static RefCountedPtr<GlobalSubchannelPool> instance();
// Implements interface methods.
RefCountedPtr<Subchannel> RegisterSubchannel(
const SubchannelKey& key, RefCountedPtr<Subchannel> constructed) override;
void UnregisterSubchannel(const SubchannelKey& key,
Subchannel* subchannel) override;
RefCountedPtr<Subchannel> FindSubchannel(const SubchannelKey& key) override;
private:
GlobalSubchannelPool();
~GlobalSubchannelPool() override;
static const size_t kShards = 127;
using SubchannelMap = AVL<SubchannelKey, WeakRefCountedPtr<Subchannel>>;
struct LockedMap {
Mutex mu;
SubchannelMap map ABSL_GUARDED_BY(mu);
};
using ShardedMap = std::array<LockedMap, kShards>;
static size_t ShardIndex(const SubchannelKey& key);
ShardedMap write_shards_;
ShardedMap read_shards_;
};
} // namespace grpc_core
#endif // GRPC_SRC_CORE_CLIENT_CHANNEL_GLOBAL_SUBCHANNEL_POOL_H

View File

@ -452,10 +452,8 @@ void Subchannel::ConnectivityStateWatcherList::RemoveWatcherLocked(
void Subchannel::ConnectivityStateWatcherList::NotifyLocked(
grpc_connectivity_state state, const absl::Status& status) {
for (const auto& watcher : watchers_) {
subchannel_->work_serializer_.Run([watcher = watcher->Ref(), state,
status]() mutable {
auto* watcher_ptr = watcher.get();
watcher_ptr->OnConnectivityStateChange(std::move(watcher), state, status);
subchannel_->work_serializer_.Run([watcher, state, status]() {
watcher->OnConnectivityStateChange(state, status);
});
}
}
@ -551,6 +549,8 @@ Subchannel::Subchannel(SubchannelKey key,
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string("subchannel created"));
channelz_node_->SetChannelArgs(args_);
args_ = args_.SetObject<channelz::BaseNode>(channelz_node_);
}
}
@ -612,10 +612,8 @@ void Subchannel::WatchConnectivityState(
grpc_pollset_set_add_pollset_set(pollset_set_, interested_parties);
}
work_serializer_.Run(
[watcher = watcher->Ref(), state = state_, status = status_]() mutable {
auto* watcher_ptr = watcher.get();
watcher_ptr->OnConnectivityStateChange(std::move(watcher), state,
status);
[watcher, state = state_, status = status_]() {
watcher->OnConnectivityStateChange(state, status);
},
DEBUG_LOCATION);
watcher_list_.AddWatcherLocked(std::move(watcher));
@ -792,7 +790,7 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
}
bool Subchannel::PublishTransportLocked() {
auto socket_node = std::move(connecting_result_.socket_node);
auto socket_node = connecting_result_.transport->GetSocketNode();
if (connecting_result_.transport->filter_stack_transport() != nullptr) {
// Construct channel stack.
// Builder takes ownership of transport.

View File

@ -168,14 +168,8 @@ class Subchannel final : public DualRefCounted<Subchannel> {
// Invoked whenever the subchannel's connectivity state changes.
// There will be only one invocation of this method on a given watcher
// instance at any given time.
// A ref to the watcher is passed in here so that the implementation
// can unref it in the appropriate synchronization context (e.g.,
// inside a WorkSerializer).
// TODO(roth): Figure out a cleaner way to guarantee that the ref is
// released in the right context.
virtual void OnConnectivityStateChange(
RefCountedPtr<ConnectivityStateWatcherInterface> self,
grpc_connectivity_state state, const absl::Status& status) = 0;
virtual void OnConnectivityStateChange(grpc_connectivity_state state,
const absl::Status& status) = 0;
virtual grpc_pollset_set* interested_parties() = 0;
};

View File

@ -38,13 +38,12 @@ SubchannelKey::SubchannelKey(const grpc_resolved_address& address,
const ChannelArgs& args)
: address_(address), args_(args) {}
bool SubchannelKey::operator<(const SubchannelKey& other) const {
if (address_.len < other.address_.len) return true;
if (address_.len > other.address_.len) return false;
int SubchannelKey::Compare(const SubchannelKey& other) const {
if (address_.len < other.address_.len) return -1;
if (address_.len > other.address_.len) return 1;
int r = memcmp(address_.addr, other.address_.addr, address_.len);
if (r < 0) return true;
if (r > 0) return false;
return args_ < other.args();
if (r != 0) return r;
return QsortCompare(args_, other.args_);
}
std::string SubchannelKey::ToString() const {

View File

@ -45,7 +45,17 @@ class SubchannelKey final {
SubchannelKey(SubchannelKey&& other) noexcept = default;
SubchannelKey& operator=(SubchannelKey&& other) noexcept = default;
bool operator<(const SubchannelKey& other) const;
bool operator<(const SubchannelKey& other) const {
return Compare(other) < 0;
}
bool operator>(const SubchannelKey& other) const {
return Compare(other) > 0;
}
bool operator==(const SubchannelKey& other) const {
return Compare(other) == 0;
}
int Compare(const SubchannelKey& other) const;
const grpc_resolved_address& address() const { return address_; }
const ChannelArgs& args() const { return args_; }

View File

@ -76,6 +76,11 @@ ABSL_FLAG(absl::optional<bool>, grpc_cpp_experimental_disable_reflection, {},
"EXPERIMENTAL. Only respected when there is a dependency on "
":grpc++_reflection. If true, no reflection server will be "
"automatically added.");
ABSL_FLAG(
absl::optional<int32_t>, grpc_channelz_max_orphaned_nodes, {},
"EXPERIMENTAL: If non-zero, extend the lifetime of channelz nodes past the "
"underlying object lifetime, up to this many nodes. The value may be "
"adjusted slightly to account for implementation limits.");
namespace grpc_core {
@ -84,6 +89,10 @@ ConfigVars::ConfigVars(const Overrides& overrides)
LoadConfig(FLAGS_grpc_client_channel_backup_poll_interval_ms,
"GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS",
overrides.client_channel_backup_poll_interval_ms, 5000)),
channelz_max_orphaned_nodes_(
LoadConfig(FLAGS_grpc_channelz_max_orphaned_nodes,
"GRPC_CHANNELZ_MAX_ORPHANED_NODES",
overrides.channelz_max_orphaned_nodes, 0)),
enable_fork_support_(LoadConfig(
FLAGS_grpc_enable_fork_support, "GRPC_ENABLE_FORK_SUPPORT",
overrides.enable_fork_support, GRPC_ENABLE_FORK_SUPPORT_DEFAULT)),
@ -146,7 +155,8 @@ std::string ConfigVars::ToString() const {
", not_use_system_ssl_roots: ", NotUseSystemSslRoots() ? "true" : "false",
", ssl_cipher_suites: ", "\"", absl::CEscape(SslCipherSuites()), "\"",
", cpp_experimental_disable_reflection: ",
CppExperimentalDisableReflection() ? "true" : "false");
CppExperimentalDisableReflection() ? "true" : "false",
", channelz_max_orphaned_nodes: ", ChannelzMaxOrphanedNodes());
}
} // namespace grpc_core

View File

@ -35,6 +35,7 @@ class GPR_DLL ConfigVars {
public:
struct Overrides {
absl::optional<int32_t> client_channel_backup_poll_interval_ms;
absl::optional<int32_t> channelz_max_orphaned_nodes;
absl::optional<bool> enable_fork_support;
absl::optional<bool> abort_on_leaks;
absl::optional<bool> not_use_system_ssl_roots;
@ -104,12 +105,19 @@ class GPR_DLL ConfigVars {
bool CppExperimentalDisableReflection() const {
return cpp_experimental_disable_reflection_;
}
// EXPERIMENTAL: If non-zero, extend the lifetime of channelz nodes past the
// underlying object lifetime, up to this many nodes. The value may be
// adjusted slightly to account for implementation limits.
int32_t ChannelzMaxOrphanedNodes() const {
return channelz_max_orphaned_nodes_;
}
private:
explicit ConfigVars(const Overrides& overrides);
static const ConfigVars& Load();
static std::atomic<ConfigVars*> config_vars_;
int32_t client_channel_backup_poll_interval_ms_;
int32_t channelz_max_orphaned_nodes_;
bool enable_fork_support_;
bool abort_on_leaks_;
bool not_use_system_ssl_roots_;

View File

@ -25,8 +25,10 @@
namespace grpc_core {
std::atomic<CoreConfiguration*> CoreConfiguration::config_{nullptr};
std::atomic<CoreConfiguration::RegisteredBuilder*> CoreConfiguration::builders_{
nullptr};
std::atomic<CoreConfiguration::RegisteredBuilder*>
CoreConfiguration::builders_[static_cast<size_t>(BuilderScope::kCount)]{
nullptr, nullptr};
std::atomic<bool> CoreConfiguration::has_config_ever_been_produced_{false};
void (*CoreConfiguration::default_builder_)(CoreConfiguration::Builder*);
CoreConfiguration::Builder::Builder() = default;
@ -46,18 +48,31 @@ CoreConfiguration::CoreConfiguration(Builder* builder)
lb_policy_registry_(builder->lb_policy_registry_.Build()),
proxy_mapper_registry_(builder->proxy_mapper_registry_.Build()),
certificate_provider_registry_(
builder->certificate_provider_registry_.Build()) {}
builder->certificate_provider_registry_.Build()),
endpoint_transport_registry_(
builder->endpoint_transport_registry_.Build()) {}
void CoreConfiguration::RegisterBuilder(
absl::AnyInvocable<void(Builder*)> builder) {
BuilderScope scope, absl::AnyInvocable<void(Builder*)> builder,
SourceLocation whence) {
CHECK(config_.load(std::memory_order_relaxed) == nullptr)
<< "CoreConfiguration was already instantiated before builder "
"registration was completed";
if (scope == BuilderScope::kPersistent) {
CHECK(!has_config_ever_been_produced_.load(std::memory_order_relaxed))
<< "Persistent builders cannot be registered after the first "
"CoreConfiguration has been produced";
}
CHECK_NE(scope, BuilderScope::kCount);
auto& head = builders_[static_cast<size_t>(scope)];
RegisteredBuilder* n = new RegisteredBuilder();
VLOG(4) << "Registering " << scope << " builder from " << whence.file() << ":"
<< whence.line();
n->builder = std::move(builder);
n->next = builders_.load(std::memory_order_relaxed);
while (!builders_.compare_exchange_weak(n->next, n, std::memory_order_acq_rel,
std::memory_order_relaxed)) {
n->whence = whence;
n->next = head.load(std::memory_order_relaxed);
while (!head.compare_exchange_weak(n->next, n, std::memory_order_acq_rel,
std::memory_order_relaxed)) {
}
CHECK(config_.load(std::memory_order_relaxed) == nullptr)
<< "CoreConfiguration was already instantiated before builder "
@ -65,6 +80,7 @@ void CoreConfiguration::RegisterBuilder(
}
const CoreConfiguration& CoreConfiguration::BuildNewAndMaybeSet() {
has_config_ever_been_produced_.store(true, std::memory_order_relaxed);
// Construct builder, pass it up to code that knows about build configuration
Builder builder;
// The linked list of builders stores things in reverse registration order.
@ -72,13 +88,21 @@ const CoreConfiguration& CoreConfiguration::BuildNewAndMaybeSet() {
// actually need to run things in forward registration order, so we iterate
// once over the linked list to build a vector of builders, and then iterate
// over said vector in reverse to actually run the builders.
// Note that we also iterate scopes in reverse order here too, so that when
// we run the builders in the reverse generated order we'll actually run
// persistent builders before ephemeral ones.
std::vector<RegisteredBuilder*> registered_builders;
for (RegisteredBuilder* b = builders_.load(std::memory_order_acquire);
b != nullptr; b = b->next) {
registered_builders.push_back(b);
for (auto scope : {BuilderScope::kEphemeral, BuilderScope::kPersistent}) {
for (RegisteredBuilder* b = builders_[static_cast<size_t>(scope)].load(
std::memory_order_acquire);
b != nullptr; b = b->next) {
registered_builders.push_back(b);
}
}
for (auto it = registered_builders.rbegin(); it != registered_builders.rend();
++it) {
VLOG(4) << "Running builder from " << (*it)->whence.file() << ":"
<< (*it)->whence.line();
(*it)->builder(&builder);
}
// Finally, call the built in configuration builder.
@ -100,7 +124,8 @@ const CoreConfiguration& CoreConfiguration::BuildNewAndMaybeSet() {
void CoreConfiguration::Reset() {
delete config_.exchange(nullptr, std::memory_order_acquire);
RegisteredBuilder* builder =
builders_.exchange(nullptr, std::memory_order_acquire);
builders_[static_cast<size_t>(BuilderScope::kEphemeral)].exchange(
nullptr, std::memory_order_acquire);
while (builder != nullptr) {
RegisteredBuilder* next = builder->next;
delete builder;
@ -108,4 +133,18 @@ void CoreConfiguration::Reset() {
}
}
void CoreConfiguration::
ResetEverythingIncludingPersistentBuildersAbsolutelyNotRecommended() {
has_config_ever_been_produced_.store(false, std::memory_order_relaxed);
RegisteredBuilder* builder =
builders_[static_cast<size_t>(BuilderScope::kPersistent)].exchange(
nullptr, std::memory_order_acquire);
while (builder != nullptr) {
RegisteredBuilder* next = builder->next;
delete builder;
builder = next;
}
Reset();
}
} // namespace grpc_core

View File

@ -16,6 +16,7 @@
#define GRPC_SRC_CORE_CONFIG_CORE_CONFIGURATION_H
#include <grpc/support/port_platform.h>
#include <sys/stat.h>
#include <atomic>
@ -30,6 +31,8 @@
#include "src/core/load_balancing/lb_policy_registry.h"
#include "src/core/resolver/resolver_registry.h"
#include "src/core/service_config/service_config_parser.h"
#include "src/core/transport/endpoint_transport.h"
#include "src/core/util/debug_location.h"
namespace grpc_core {
@ -40,6 +43,27 @@ class GRPC_DLL CoreConfiguration {
CoreConfiguration(const CoreConfiguration&) = delete;
CoreConfiguration& operator=(const CoreConfiguration&) = delete;
// BulderScope is used to indicate whether a builder is persistent - these
// are builders that are used every time the configuration is built, or
// ephemeral - each time the configuration is built these are thrown away.
//
// Considerations for choosing persistent vs ephemeral:
// - For testing we want ephemeral builders, so the next test can throw away
// configuration.
// - For adapting gRPC to different environments we typically want persistent
// builders.
// - However, if the adaption should run only once per process, then
// ephemeral is better.
//
// Builders are instantiated in scope order - persistent first, ephemeral
// second.
enum class BuilderScope {
kPersistent,
kEphemeral,
// Must be last, do not use as a scope.
kCount,
};
// Builder is passed to plugins, etc... at initialization time to collect
// their configuration and assemble the published CoreConfiguration.
class Builder {
@ -78,6 +102,10 @@ class GRPC_DLL CoreConfiguration {
return &certificate_provider_registry_;
}
EndpointTransportRegistry::Builder* endpoint_transport_registry() {
return &endpoint_transport_registry_;
}
private:
friend class CoreConfiguration;
@ -90,6 +118,7 @@ class GRPC_DLL CoreConfiguration {
LoadBalancingPolicyRegistry::Builder lb_policy_registry_;
ProxyMapperRegistry::Builder proxy_mapper_registry_;
CertificateProviderRegistry::Builder certificate_provider_registry_;
EndpointTransportRegistry::Builder endpoint_transport_registry_;
Builder();
CoreConfiguration* Build();
@ -99,6 +128,7 @@ class GRPC_DLL CoreConfiguration {
struct RegisteredBuilder {
absl::AnyInvocable<void(Builder*)> builder;
RegisteredBuilder* next;
SourceLocation whence;
};
// Temporarily replaces core configuration with what is built from the
@ -121,8 +151,10 @@ class GRPC_DLL CoreConfiguration {
// Backup current core configuration and replace/reset.
config_restore_ =
CoreConfiguration::config_.exchange(p, std::memory_order_acquire);
builders_restore_ = CoreConfiguration::builders_.exchange(
nullptr, std::memory_order_acquire);
builders_restore_ =
CoreConfiguration::builders_[static_cast<size_t>(
BuilderScope::kEphemeral)]
.exchange(nullptr, std::memory_order_acquire);
}
~WithSubstituteBuilder() {
@ -130,8 +162,10 @@ class GRPC_DLL CoreConfiguration {
Reset();
CHECK(CoreConfiguration::config_.exchange(
config_restore_, std::memory_order_acquire) == nullptr);
CHECK(CoreConfiguration::builders_.exchange(
builders_restore_, std::memory_order_acquire) == nullptr);
CHECK(CoreConfiguration::builders_[static_cast<size_t>(
BuilderScope::kEphemeral)]
.exchange(builders_restore_, std::memory_order_acquire) ==
nullptr);
}
private:
@ -153,13 +187,34 @@ class GRPC_DLL CoreConfiguration {
// Attach a registration function globally.
// Each registration function is called *in addition to*
// BuildCoreConfiguration for the default core configuration.
static void RegisterBuilder(absl::AnyInvocable<void(Builder*)> builder);
static void RegisterBuilder(BuilderScope scope,
absl::AnyInvocable<void(Builder*)> builder,
SourceLocation whence);
static void RegisterPersistentBuilder(
absl::AnyInvocable<void(Builder*)> builder, SourceLocation whence = {}) {
RegisterBuilder(BuilderScope::kPersistent, std::move(builder), whence);
}
static void RegisterEphemeralBuilder(
absl::AnyInvocable<void(Builder*)> builder, SourceLocation whence = {}) {
RegisterBuilder(BuilderScope::kEphemeral, std::move(builder), whence);
}
// Drop the core configuration. Users must ensure no other threads are
// accessing the configuration.
// Clears any dynamically registered builders.
// Clears any dynamically registered ephemeral builders.
static void Reset();
// Reset, but also reset persistent builders. This is not recommended, but
// is useful for tests that assume exactly the default open source
// configuration when running in other environments.
//
// TODO(ctiller, roth, yashkt): Remove the need for this method, and then
// move the legacy plugin registration mechanism to be a persistent builder.
static void
ResetEverythingIncludingPersistentBuildersAbsolutelyNotRecommended();
// Helper for tests: Reset the configuration, build a special one, run some
// code, and then reset the configuration again.
// Templatized to be sure no codegen in normal builds.
@ -206,6 +261,10 @@ class GRPC_DLL CoreConfiguration {
return certificate_provider_registry_;
}
const EndpointTransportRegistry& endpoint_transport_registry() const {
return endpoint_transport_registry_;
}
static void SetDefaultBuilder(void (*builder)(CoreConfiguration::Builder*)) {
default_builder_ = builder;
}
@ -219,8 +278,13 @@ class GRPC_DLL CoreConfiguration {
// The configuration
static std::atomic<CoreConfiguration*> config_;
// Has a configuration *ever* been produced - we verify this is false for
// persistent builders so that we can prove consistency build to build for
// these.
static std::atomic<bool> has_config_ever_been_produced_;
// Extra registered builders
static std::atomic<RegisteredBuilder*> builders_;
static std::atomic<RegisteredBuilder*>
builders_[static_cast<size_t>(BuilderScope::kCount)];
// Default builder
static void (*default_builder_)(CoreConfiguration::Builder*);
@ -233,8 +297,26 @@ class GRPC_DLL CoreConfiguration {
LoadBalancingPolicyRegistry lb_policy_registry_;
ProxyMapperRegistry proxy_mapper_registry_;
CertificateProviderRegistry certificate_provider_registry_;
EndpointTransportRegistry endpoint_transport_registry_;
};
template <typename Sink>
void AbslStringify(Sink& sink, CoreConfiguration::BuilderScope scope) {
switch (scope) {
case CoreConfiguration::BuilderScope::kPersistent:
sink.Append("Persistent");
break;
case CoreConfiguration::BuilderScope::kEphemeral:
sink.Append("Ephemeral");
break;
case CoreConfiguration::BuilderScope::kCount:
sink.Append("Count(");
sink.Append(std::to_string(static_cast<size_t>(scope)));
sink.Append(")");
break;
}
}
extern void BuildCoreConfiguration(CoreConfiguration::Builder* builder);
} // namespace grpc_core

View File

@ -151,8 +151,9 @@ std::map<std::string, std::string> AwsRequestSigner::GetSignedRequestHeaders() {
canonical_request_vector.emplace_back(query);
canonical_request_vector.emplace_back("\n");
// 4. CanonicalHeaders
std::string authority = url_.authority();
if (request_headers_.empty()) {
request_headers_.insert({"host", url_.authority()});
request_headers_.insert({"host", authority});
if (!token_.empty()) {
request_headers_.insert({"x-amz-security-token", token_});
}
@ -198,7 +199,7 @@ std::map<std::string, std::string> AwsRequestSigner::GetSignedRequestHeaders() {
string_to_sign_vector.emplace_back("\n");
// 3. CredentialScope
std::pair<absl::string_view, absl::string_view> host_parts =
absl::StrSplit(url_.authority(), absl::MaxSplits('.', 1));
absl::StrSplit(authority, absl::MaxSplits('.', 1));
std::string service_name(host_parts.first);
std::string credential_scope = absl::StrFormat(
"%s/%s/%s/aws4_request", request_date_short, region_, service_name);

View File

@ -146,8 +146,8 @@ UrlExternalAccountCredentials::RetrieveSubjectToken(
Timestamp deadline,
absl::AnyInvocable<void(absl::StatusOr<std::string>)> on_done) {
auto url_for_request =
URI::Create(url_.scheme(), url_.authority(), url_full_path_,
{} /* query params */, "" /* fragment */);
URI::Create(url_.scheme(), url_.user_info(), url_.host_port(),
url_full_path_, {} /* query params */, "" /* fragment */);
if (!url_for_request.ok()) {
return MakeOrphanable<NoOpFetchBody>(
event_engine(), std::move(on_done),

View File

@ -180,7 +180,7 @@ GcpServiceAccountIdentityCallCredentials::StartHttpRequest(
// channel. This would allow us to cancel an authentication query when under
// extreme memory pressure.
auto uri = URI::Create(
"http", "metadata.google.internal.",
"http", /*user_info=*/"", "metadata.google.internal.",
"/computeMetadata/v1/instance/service-accounts/default/identity",
{{"audience", audience_}}, /*fragment=*/"");
CHECK_OK(uri); // params are hardcoded

View File

@ -756,8 +756,8 @@ static void on_openid_config_retrieved(void* user_data,
// TODO(ctiller): Carry the resource_quota in ctx and share it with the host
// channel. This would allow us to cancel an authentication query when under
// extreme memory pressure.
uri = grpc_core::URI::Create("https", host, path, {} /* query params /*/,
"" /* fragment */);
uri = grpc_core::URI::Create("https", /*user_info=*/"", host, path,
{} /* query params /*/, "" /* fragment */);
if (!uri.ok()) {
goto error;
}
@ -886,8 +886,8 @@ static void retrieve_key_and_verify(verifier_cb_ctx* ctx) {
// TODO(ctiller): Carry the resource_quota in ctx and share it with the host
// channel. This would allow us to cancel an authentication query when under
// extreme memory pressure.
uri = grpc_core::URI::Create("https", host, path, {} /* query params */,
"" /* fragment */);
uri = grpc_core::URI::Create("https", /*user_info=*/"", host, path,
{} /* query params */, "" /* fragment */);
if (!uri.ok()) {
goto error;
}

View File

@ -314,7 +314,8 @@ class grpc_compute_engine_token_fetcher_credentials
// TODO(ctiller): Carry the memory quota in ctx and share it with the host
// channel. This would allow us to cancel an authentication query when under
// extreme memory pressure.
auto uri = grpc_core::URI::Create("http", GRPC_COMPUTE_ENGINE_METADATA_HOST,
auto uri = grpc_core::URI::Create("http", /*user_info=*/"",
GRPC_COMPUTE_ENGINE_METADATA_HOST,
GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH,
{} /* query params */, "" /* fragment */);
CHECK(uri.ok()); // params are hardcoded
@ -372,7 +373,8 @@ grpc_google_refresh_token_credentials::StartHttpRequest(
// TODO(ctiller): Carry the memory quota in ctx and share it with the host
// channel. This would allow us to cancel an authentication query when under
// extreme memory pressure.
auto uri = grpc_core::URI::Create("https", GRPC_GOOGLE_OAUTH2_SERVICE_HOST,
auto uri = grpc_core::URI::Create("https", /*user_info=*/"",
GRPC_GOOGLE_OAUTH2_SERVICE_HOST,
GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH,
{} /* query params */, "" /* fragment */);
CHECK(uri.ok()); // params are hardcoded

View File

@ -102,10 +102,11 @@ class grpc_alts_channel_security_connector final
static_cast<const grpc_alts_credentials*>(channel_creds());
const size_t user_specified_max_frame_size =
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0));
CHECK(alts_tsi_handshaker_create(creds->options(), target_name_,
creds->handshaker_service_url(), true,
interested_parties, &handshaker,
user_specified_max_frame_size) == TSI_OK);
CHECK(alts_tsi_handshaker_create(
creds->options(), target_name_, creds->handshaker_service_url(),
true, interested_parties, &handshaker,
user_specified_max_frame_size,
args.GetOwnedString(GRPC_ARG_TRANSPORT_PROTOCOLS)) == TSI_OK);
handshake_manager->Add(
grpc_core::SecurityHandshakerCreate(handshaker, this, args));
}
@ -155,10 +156,10 @@ class grpc_alts_server_security_connector final
static_cast<const grpc_alts_server_credentials*>(server_creds());
size_t user_specified_max_frame_size =
std::max(0, args.GetInt(GRPC_ARG_TSI_MAX_FRAME_SIZE).value_or(0));
CHECK(alts_tsi_handshaker_create(creds->options(), nullptr,
creds->handshaker_service_url(), false,
interested_parties, &handshaker,
user_specified_max_frame_size) == TSI_OK);
CHECK(alts_tsi_handshaker_create(
creds->options(), nullptr, creds->handshaker_service_url(), false,
interested_parties, &handshaker, user_specified_max_frame_size,
args.GetOwnedString(GRPC_ARG_TRANSPORT_PROTOCOLS)) == TSI_OK);
handshake_manager->Add(
grpc_core::SecurityHandshakerCreate(handshaker, this, args));
}

View File

@ -217,9 +217,9 @@ static int is_metadata_server_reachable() {
detector.is_done = 0;
detector.success = 0;
memset(&request, 0, sizeof(grpc_http_request));
auto uri =
grpc_core::URI::Create("http", GRPC_COMPUTE_ENGINE_DETECTION_HOST, "/",
{} /* query params */, "" /* fragment */);
auto uri = grpc_core::URI::Create("http", /*user_info=*/"",
GRPC_COMPUTE_ENGINE_DETECTION_HOST, "/",
{} /* query params */, "" /* fragment */);
CHECK(uri.ok()); // params are hardcoded
auto http_request = grpc_core::HttpRequest::Get(
std::move(*uri), nullptr /* channel args */, &detector.pollent, &request,

View File

@ -278,8 +278,8 @@ grpc_ssl_server_credentials::~grpc_ssl_server_credentials() {
}
grpc_core::RefCountedPtr<grpc_server_security_connector>
grpc_ssl_server_credentials::create_security_connector(
const grpc_core::ChannelArgs& /* args */) {
return grpc_ssl_server_security_connector_create(this->Ref());
const grpc_core::ChannelArgs& args) {
return grpc_ssl_server_security_connector_create(this->Ref(), args);
}
grpc_core::UniqueTypeName grpc_ssl_server_credentials::Type() {

View File

@ -108,7 +108,8 @@ class grpc_ssl_channel_security_connector final
overridden_target_name_.empty() ? target_name_.c_str()
: overridden_target_name_.c_str(),
/*network_bio_buf_size=*/0,
/*ssl_bio_buf_size=*/0, &tsi_hs);
/*ssl_bio_buf_size=*/0,
args.GetOwnedString(GRPC_ARG_TRANSPORT_PROTOCOLS), &tsi_hs);
if (result != TSI_OK) {
LOG(ERROR) << "Handshaker creation failed with error "
<< tsi_result_to_string(result);
@ -198,7 +199,24 @@ class grpc_ssl_server_security_connector
return server_handshaker_factory_;
}
grpc_security_status InitializeHandshakerFactory() {
// Helper method to initialize the handshaker factory for all handshaker on
// the server side.
// - alpn_preferred_protocol_raw_list: an optional string that represents the
// comma-separated ordered list of preferred protocols for alpn negotiation on
// this server.
//
// For the server handshaker, override the preferred protocols given
// by the channel args on the handshaker factory creation. Do this for all
// handshake that a server may produce because it is unlikely that a server
// handshaker would change protocol list per handshake.
//
// OpenSSL's provided method to override the selection of the handshaker
// protocols for alpn is only available per context base which makes it
// thread-unsafe. The introduction of a lock in the server callback may make
// the alternative thread-safe, however it will introduce too much contention
// which in turn will affect performance.
grpc_security_status InitializeHandshakerFactory(
std::optional<std::string> alpn_preferred_protocol_raw_list) {
if (has_cert_config_fetcher()) {
// Load initial credentials from certificate_config_fetcher:
if (!try_fetch_ssl_server_credentials()) {
@ -209,8 +227,17 @@ class grpc_ssl_server_security_connector
auto* server_credentials =
static_cast<const grpc_ssl_server_credentials*>(server_creds());
size_t num_alpn_protocols = 0;
const char** alpn_protocol_strings =
grpc_fill_alpn_protocol_strings(&num_alpn_protocols);
const char** alpn_protocol_strings = nullptr;
if (alpn_preferred_protocol_raw_list.has_value()) {
#if TSI_OPENSSL_ALPN_SUPPORT
alpn_protocol_strings = ParseAlpnStringIntoArray(
alpn_preferred_protocol_raw_list.value(), &num_alpn_protocols);
#endif // TSI_OPENSSL_ALPN_SUPPORT
}
if (alpn_protocol_strings == nullptr) {
alpn_protocol_strings =
grpc_fill_alpn_protocol_strings(&num_alpn_protocols);
}
tsi_ssl_server_handshaker_options options;
options.pem_key_cert_pairs =
server_credentials->config().pem_key_cert_pairs;
@ -277,9 +304,10 @@ class grpc_ssl_server_security_connector
}
private:
// Attempts to fetch the server certificate config if a callback is available.
// Current certificate config will continue to be used if the callback returns
// an error. Returns true if new credentials were successfully loaded.
// Attempts to fetch the server certificate config if a callback is
// available. Current certificate config will continue to be used if the
// callback returns an error. Returns true if new credentials were
// successfully loaded.
bool try_fetch_ssl_server_credentials() {
grpc_ssl_server_certificate_config* certificate_config = nullptr;
bool status;
@ -307,8 +335,8 @@ class grpc_ssl_server_security_connector
return status;
}
// Attempts to replace the server_handshaker_factory with a new factory using
// the provided grpc_ssl_server_certificate_config. Should new factory
// Attempts to replace the server_handshaker_factory with a new factory
// using the provided grpc_ssl_server_certificate_config. Should new factory
// creation fail, the existing factory will not be replaced. Returns true on
// success (new factory created).
bool try_replace_server_handshaker_factory(
@ -390,12 +418,14 @@ grpc_ssl_channel_security_connector_create(
grpc_core::RefCountedPtr<grpc_server_security_connector>
grpc_ssl_server_security_connector_create(
grpc_core::RefCountedPtr<grpc_server_credentials> server_credentials) {
grpc_core::RefCountedPtr<grpc_server_credentials> server_credentials,
const grpc_core::ChannelArgs& args) {
CHECK(server_credentials != nullptr);
grpc_core::RefCountedPtr<grpc_ssl_server_security_connector> c =
grpc_core::MakeRefCounted<grpc_ssl_server_security_connector>(
std::move(server_credentials));
const grpc_security_status retval = c->InitializeHandshakerFactory();
const grpc_security_status retval = c->InitializeHandshakerFactory(
args.GetOwnedString(GRPC_ARG_TRANSPORT_PROTOCOLS));
if (retval != GRPC_SECURITY_OK) {
return nullptr;
}

View File

@ -76,6 +76,7 @@ struct grpc_ssl_server_config {
//
grpc_core::RefCountedPtr<grpc_server_security_connector>
grpc_ssl_server_security_connector_create(
grpc_core::RefCountedPtr<grpc_server_credentials> server_credentials);
grpc_core::RefCountedPtr<grpc_server_credentials> server_credentials,
const grpc_core::ChannelArgs& args);
#endif // GRPC_SRC_CORE_CREDENTIALS_TRANSPORT_SSL_SSL_SECURITY_CONNECTOR_H

View File

@ -200,6 +200,24 @@ const char** grpc_fill_alpn_protocol_strings(size_t* num_alpn_protocols) {
return alpn_protocol_strings;
}
const char** ParseAlpnStringIntoArray(absl::string_view preferred_protocols_raw,
size_t* num_alpn_protocols) {
CHECK_NE(num_alpn_protocols, nullptr);
std::vector<std::string> preferred_protocols;
preferred_protocols =
absl::StrSplit(preferred_protocols_raw, ',', absl::SkipWhitespace());
*num_alpn_protocols = preferred_protocols.size();
const char** alpn_protocol_strings = nullptr;
if (*num_alpn_protocols != 0) {
alpn_protocol_strings = static_cast<const char**>(
gpr_malloc(sizeof(const char*) * (*num_alpn_protocols)));
for (size_t i = 0; i < *num_alpn_protocols; i++) {
alpn_protocol_strings[i] = gpr_strdup(preferred_protocols[i].c_str());
}
}
return alpn_protocol_strings;
}
int grpc_ssl_host_matches_name(const tsi_peer* peer,
absl::string_view peer_name) {
absl::string_view allocated_name;

View File

@ -77,6 +77,11 @@ tsi_tls_version grpc_get_tsi_tls_version(grpc_tls_version tls_version);
// Return an array of strings containing alpn protocols.
const char** grpc_fill_alpn_protocol_strings(size_t* num_alpn_protocols);
// Parse a list of comma-separated protocol names into a const char** struct
// that can be injected into the handshaker factory options.
const char** ParseAlpnStringIntoArray(absl::string_view preferred_protocols,
size_t* num_alpn_protocols);
// Initialize TSI SSL server/client handshaker factory.
grpc_security_status grpc_ssl_tsi_client_handshaker_factory_init(
tsi_ssl_pem_key_cert_pair* key_cert_pair, const char* pem_root_certs,

View File

@ -347,7 +347,8 @@ void TlsChannelSecurityConnector::add_handshakers(
overridden_target_name_.empty() ? target_name_.c_str()
: overridden_target_name_.c_str(),
/*network_bio_buf_size=*/0,
/*ssl_bio_buf_size=*/0, &tsi_hs);
/*ssl_bio_buf_size=*/0,
args.GetOwnedString(GRPC_ARG_TRANSPORT_PROTOCOLS), &tsi_hs);
if (result != TSI_OK) {
LOG(ERROR) << "Handshaker creation failed with error "
<< tsi_result_to_string(result);

View File

@ -50,6 +50,7 @@
#include "src/core/util/no_destruct.h"
#include "src/core/util/orphanable.h"
#include "src/core/util/per_cpu.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/status_helper.h"
#include "src/core/util/sync.h"
@ -107,9 +108,11 @@ struct LegacyMaxAgeFilter::Config {
return absl::Uniform(bit_gen, min, max);
}
};
static NoDestruct<PerCpu<BitGen>> bit_gen(PerCpuOptions().SetMaxShards(8));
const double multiplier = bit_gen->this_cpu().MakeUniformDouble(
1.0 - kMaxConnectionAgeJitter, 1.0 + kMaxConnectionAgeJitter);
const double multiplier = []() {
SharedBitGen g;
return absl::Uniform(g, 1.0 - kMaxConnectionAgeJitter,
1.0 + kMaxConnectionAgeJitter);
}();
// GRPC_MILLIS_INF_FUTURE - 0.5 converts the value to float, so that result
// will not be cast to int implicitly before the comparison.
return Config{args_max_age * multiplier, args_max_idle * multiplier,

View File

@ -91,6 +91,20 @@ class ChannelCompression {
bool is_client, MessageHandle message, DecompressArgs args,
CallTracerInterface* call_tracer) const;
Json::Object ToJsonObject() const {
Json::Object object;
if (max_recv_size_.has_value()) {
object["maxRecvSize"] = Json::FromNumber(*max_recv_size_);
}
object["defaultCompressionAlgorithm"] = Json::FromString(
CompressionAlgorithmAsString(default_compression_algorithm_));
object["enabledCompressionAlgorithms"] = Json::FromString(
std::string(enabled_compression_algorithms_.ToString()));
object["enableCompression"] = Json::FromBool(enable_compression_);
object["enableDecompression"] = Json::FromBool(enable_decompression_);
return object;
}
private:
// Max receive message length, if set.
std::optional<uint32_t> max_recv_size_;
@ -106,7 +120,8 @@ class ChannelCompression {
};
class ClientCompressionFilter final
: public ImplementChannelFilter<ClientCompressionFilter> {
: public ImplementChannelFilter<ClientCompressionFilter>,
public channelz::DataSource {
public:
static const grpc_channel_filter kFilter;
@ -116,7 +131,14 @@ class ClientCompressionFilter final
const ChannelArgs& args, ChannelFilter::Args filter_args);
explicit ClientCompressionFilter(const ChannelArgs& args)
: compression_engine_(args) {}
: channelz::DataSource(args.GetObjectRef<channelz::BaseNode>()),
compression_engine_(args) {}
~ClientCompressionFilter() override { ResetDataSource(); }
void AddData(channelz::DataSink& sink) override {
sink.AddAdditionalInfo("clientCompressionFilter",
compression_engine_.ToJsonObject());
}
// Construct a promise for one call.
class Call {
@ -148,7 +170,8 @@ class ClientCompressionFilter final
};
class ServerCompressionFilter final
: public ImplementChannelFilter<ServerCompressionFilter> {
: public ImplementChannelFilter<ServerCompressionFilter>,
public channelz::DataSource {
public:
static const grpc_channel_filter kFilter;
@ -158,7 +181,14 @@ class ServerCompressionFilter final
const ChannelArgs& args, ChannelFilter::Args filter_args);
explicit ServerCompressionFilter(const ChannelArgs& args)
: compression_engine_(args) {}
: channelz::DataSource(args.GetObjectRef<channelz::BaseNode>()),
compression_engine_(args) {}
~ServerCompressionFilter() override { ResetDataSource(); }
void AddData(channelz::DataSink& sink) override {
sink.AddAdditionalInfo("serverCompressionFilter",
compression_engine_.ToJsonObject());
}
// Construct a promise for one call.
class Call {

View File

@ -153,7 +153,7 @@ void HttpServerFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
absl::StatusOr<std::unique_ptr<HttpServerFilter>> HttpServerFilter::Create(
const ChannelArgs& args, ChannelFilter::Args) {
return std::make_unique<HttpServerFilter>(
args.GetBool(GRPC_ARG_SURFACE_USER_AGENT).value_or(true),
args, args.GetBool(GRPC_ARG_SURFACE_USER_AGENT).value_or(true),
args.GetBool(
GRPC_ARG_DO_NOT_USE_UNLESS_YOU_HAVE_PERMISSION_FROM_GRPC_TEAM_ALLOW_BROKEN_PUT_REQUESTS)
.value_or(false));

View File

@ -31,7 +31,8 @@
namespace grpc_core {
// Processes metadata on the server side for HTTP2 transports
class HttpServerFilter : public ImplementChannelFilter<HttpServerFilter> {
class HttpServerFilter : public ImplementChannelFilter<HttpServerFilter>,
public channelz::DataSource {
public:
static const grpc_channel_filter kFilter;
@ -40,9 +41,19 @@ class HttpServerFilter : public ImplementChannelFilter<HttpServerFilter> {
static absl::StatusOr<std::unique_ptr<HttpServerFilter>> Create(
const ChannelArgs& args, ChannelFilter::Args filter_args);
HttpServerFilter(bool surface_user_agent, bool allow_put_requests)
: surface_user_agent_(surface_user_agent),
HttpServerFilter(const ChannelArgs& args, bool surface_user_agent,
bool allow_put_requests)
: channelz::DataSource(args.GetObjectRef<channelz::BaseNode>()),
surface_user_agent_(surface_user_agent),
allow_put_requests_(allow_put_requests) {}
~HttpServerFilter() override { ResetDataSource(); }
void AddData(channelz::DataSink& sink) override {
Json::Object object;
object["surfaceUserAgent"] = Json::FromBool(surface_user_agent_);
object["allowPutRequests"] = Json::FromBool(allow_put_requests_);
sink.AddAdditionalInfo("httpServerFilter", object);
}
class Call {
public:

View File

@ -0,0 +1,40 @@
// Copyright 2025 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/config/core_configuration.h"
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/ext/transport/chttp2/server/chttp2_server.h"
#include "src/core/transport/endpoint_transport.h"
namespace grpc_core {
namespace {
class Chttp2Transport final : public EndpointTransport {
public:
absl::StatusOr<grpc_channel*> ChannelCreate(
std::string target, const ChannelArgs& args) override {
return CreateHttp2Channel(target, args);
}
absl::StatusOr<int> AddPort(Server* server, std::string addr,
const ChannelArgs& args) override {
return Chttp2ServerAddPort(server, addr.c_str(), args);
}
};
} // namespace
void RegisterChttp2Transport(CoreConfiguration::Builder* builder) {
builder->endpoint_transport_registry()->RegisterTransport(
"h2", std::make_unique<Chttp2Transport>());
}
} // namespace grpc_core

View File

@ -64,6 +64,7 @@
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/resolver/resolver_registry.h"
#include "src/core/transport/endpoint_transport_client_channel_factory.h"
#include "src/core/util/debug_location.h"
#include "src/core/util/orphanable.h"
#include "src/core/util/status_helper.h"
@ -145,8 +146,6 @@ void Chttp2Connector::OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
result_->transport = grpc_create_chttp2_transport(
(*result)->args, std::move((*result)->endpoint), true);
CHECK_NE(result_->transport, nullptr);
result_->socket_node =
grpc_chttp2_transport_get_socket_node(result_->transport);
result_->channel_args = std::move((*result)->args);
Ref().release(); // Ref held by OnReceiveSettings()
GRPC_CLOSURE_INIT(&on_receive_settings_, OnReceiveSettings, this,
@ -225,121 +224,21 @@ void Chttp2Connector::MaybeNotify(grpc_error_handle error) {
}
}
namespace {
class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
public:
RefCountedPtr<Subchannel> CreateSubchannel(
const grpc_resolved_address& address, const ChannelArgs& args) override {
absl::StatusOr<ChannelArgs> new_args = GetSecureNamingChannelArgs(args);
if (!new_args.ok()) {
LOG(ERROR) << "Failed to create channel args during subchannel creation: "
<< new_args.status() << "; Got args: " << args.ToString();
return nullptr;
}
RefCountedPtr<Subchannel> s = Subchannel::Create(
MakeOrphanable<Chttp2Connector>(), address, *new_args);
return s;
absl::StatusOr<grpc_channel*> CreateHttp2Channel(std::string target,
const ChannelArgs& args) {
auto r = ChannelCreate(
target,
args.SetObject(EndpointTransportClientChannelFactory<Chttp2Connector>()),
GRPC_CLIENT_CHANNEL, nullptr);
if (r.ok()) {
return r->release()->c_ptr();
} else {
return r.status();
}
private:
static absl::StatusOr<ChannelArgs> GetSecureNamingChannelArgs(
ChannelArgs args) {
auto* channel_credentials = args.GetObject<grpc_channel_credentials>();
if (channel_credentials == nullptr) {
return absl::InternalError("channel credentials missing for channel");
}
// Make sure security connector does not already exist in args.
if (args.Contains(GRPC_ARG_SECURITY_CONNECTOR)) {
return absl::InternalError(
"security connector already present in channel args.");
}
// Find the authority to use in the security connector.
std::optional<std::string> authority =
args.GetOwnedString(GRPC_ARG_DEFAULT_AUTHORITY);
if (!authority.has_value()) {
return absl::InternalError("authority not present in channel args");
}
// Create the security connector using the credentials and target name.
RefCountedPtr<grpc_channel_security_connector>
subchannel_security_connector =
channel_credentials->create_security_connector(
/*call_creds=*/nullptr, authority->c_str(), &args);
if (subchannel_security_connector == nullptr) {
return absl::InternalError(absl::StrFormat(
"Failed to create subchannel for secure name '%s'", *authority));
}
return args.SetObject(std::move(subchannel_security_connector));
}
};
absl::StatusOr<RefCountedPtr<Channel>> CreateChannel(const char* target,
const ChannelArgs& args) {
if (target == nullptr) {
LOG(ERROR) << "cannot create channel with NULL target name";
return absl::InvalidArgumentError("channel target is NULL");
}
return ChannelCreate(target, args, GRPC_CLIENT_CHANNEL, nullptr);
}
} // namespace
} // namespace grpc_core
namespace {
grpc_core::Chttp2SecureClientChannelFactory* g_factory;
gpr_once g_factory_once = GPR_ONCE_INIT;
void FactoryInit() {
g_factory = new grpc_core::Chttp2SecureClientChannelFactory();
}
} // namespace
// Create a client channel:
// Asynchronously: - resolve target
// - connect to it (trying alternatives as presented)
// - perform handshakes
grpc_channel* grpc_channel_create(const char* target,
grpc_channel_credentials* creds,
const grpc_channel_args* c_args) {
grpc_core::ExecCtx exec_ctx;
GRPC_TRACE_LOG(api, INFO)
<< "grpc_channel_create(target=" << target << ", creds=" << (void*)creds
<< ", args=" << (void*)c_args << ")";
grpc_channel* channel = nullptr;
grpc_error_handle error;
if (creds != nullptr) {
// Add channel args containing the client channel factory and channel
// credentials.
gpr_once_init(&g_factory_once, FactoryInit);
grpc_core::ChannelArgs args =
creds->update_arguments(grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(c_args)
.SetObject(creds->Ref())
.SetObject(g_factory));
// Create channel.
auto r = grpc_core::CreateChannel(target, args);
if (r.ok()) {
channel = r->release()->c_ptr();
} else {
error = absl_status_to_grpc_error(r.status());
}
}
if (channel == nullptr) {
intptr_t integer;
grpc_status_code status = GRPC_STATUS_INTERNAL;
if (grpc_error_get_int(error, grpc_core::StatusIntProperty::kRpcStatus,
&integer)) {
status = static_cast<grpc_status_code>(integer);
}
channel = grpc_lame_client_channel_create(
target, status, "Failed to create client channel");
}
return channel;
}
#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
grpc_channel_credentials* creds,

View File

@ -72,6 +72,9 @@ class Chttp2Connector : public SubchannelConnector {
RefCountedPtr<HandshakeManager> handshake_mgr_;
};
absl::StatusOr<grpc_channel*> CreateHttp2Channel(std::string target,
const ChannelArgs& args);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H

File diff suppressed because it is too large Load Diff

View File

@ -51,7 +51,7 @@ class ActiveConnectionTestPeer;
class HandshakingStateTestPeer;
} // namespace testing
// New ChttpServerListener used if experiment "server_listener" is enabled
// New ChttpServerListener
class NewChttp2ServerListener : public Server::ListenerInterface {
public:
using AcceptorPtr =
@ -255,6 +255,10 @@ class PassiveListenerImpl final : public PassiveListener {
};
} // namespace experimental
absl::StatusOr<int> Chttp2ServerAddPort(Server* server, const char* addr,
const ChannelArgs& args);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H

View File

@ -59,7 +59,6 @@
#include "src/core/call/metadata_info.h"
#include "src/core/config/config_vars.h"
#include "src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h"
#include "src/core/ext/transport/chttp2/transport/context_list_entry.h"
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame_data.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
@ -77,6 +76,7 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/ext/transport/chttp2/transport/write_size_policy.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/event_engine/extensions/channelz.h"
#include "src/core/lib/event_engine/extensions/tcp_trace.h"
#include "src/core/lib/event_engine/query_extensions.h"
#include "src/core/lib/experiments/experiments.h"
@ -102,18 +102,24 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_framing_endpoint_extension.h"
#include "src/core/telemetry/call_tracer.h"
#include "src/core/telemetry/context_list_entry.h"
#include "src/core/telemetry/default_tcp_tracer.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
#include "src/core/util/bitset.h"
#include "src/core/util/crash.h"
#include "src/core/util/debug_location.h"
#include "src/core/util/http_client/parser.h"
#include "src/core/util/notification.h"
#include "src/core/util/ref_counted.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/status_helper.h"
#include "src/core/util/string.h"
#include "src/core/util/time.h"
#include "src/core/util/useful.h"
using grpc_core::Json;
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
@ -227,16 +233,6 @@ using EventEngine = ::grpc_event_engine::experimental::EventEngine;
using TaskHandle = ::grpc_event_engine::experimental::EventEngine::TaskHandle;
using grpc_core::http2::Http2ErrorCode;
grpc_core::CallTracerAnnotationInterface* ParentCallTracerIfSampled(
grpc_chttp2_stream* s) {
auto* parent_call_tracer =
s->arena->GetContext<grpc_core::CallTracerAnnotationInterface>();
if (parent_call_tracer == nullptr || !parent_call_tracer->IsSampled()) {
return nullptr;
}
return parent_call_tracer;
}
grpc_core::WriteTimestampsCallback g_write_timestamps_callback = nullptr;
grpc_core::CopyContextFn g_get_copied_context_fn = nullptr;
} // namespace
@ -398,6 +394,10 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
}
}
using grpc_event_engine::experimental::ChannelzExtension;
using grpc_event_engine::experimental::QueryExtension;
using grpc_event_engine::experimental::TcpTraceExtension;
static void read_channel_args(grpc_chttp2_transport* t,
const grpc_core::ChannelArgs& channel_args,
const bool is_client) {
@ -482,6 +482,15 @@ static void read_channel_args(grpc_chttp2_transport* t,
t->peer_string.as_string_view()),
channel_args
.GetObjectRef<grpc_core::channelz::SocketNode::Security>());
// Checks channelz_socket, so must be initialized after.
t->channelz_data_source =
std::make_unique<grpc_chttp2_transport::ChannelzDataSource>(t);
auto epte = QueryExtension<ChannelzExtension>(
grpc_event_engine::experimental::grpc_get_wrapped_event_engine_endpoint(
t->ep.get()));
if (epte != nullptr) {
epte->SetSocketNode(t->channelz_socket);
}
}
t->ack_pings = channel_args.GetBool("grpc.http2.ack_pings").value_or(true);
@ -571,6 +580,95 @@ static void init_keepalive_pings_if_enabled_locked(
}
}
void grpc_chttp2_transport::ChannelzDataSource::AddData(
grpc_core::channelz::DataSink& sink) {
grpc_core::Notification n;
transport_->event_engine->Run([t = transport_->Ref(), &n, &sink]() {
grpc_core::ExecCtx exec_ctx;
t->combiner->Run(
grpc_core::NewClosure([t, &n, &sink](grpc_error_handle) {
Json::Object http2_info;
http2_info["flowControl"] =
Json::FromObject(t->flow_control.stats().ToJsonObject());
Json::Object misc;
misc["maxRequestsPerRead"] =
Json::FromNumber(static_cast<int64_t>(t->max_requests_per_read));
misc["nextStreamId"] = Json::FromNumber(t->next_stream_id);
misc["lastNewStreamId"] = Json::FromNumber(t->last_new_stream_id);
misc["numIncomingStreamsBeforeSettingsAck"] =
Json::FromNumber(t->num_incoming_streams_before_settings_ack);
misc["pingAckCount"] =
Json::FromNumber(static_cast<int64_t>(t->ping_ack_count));
misc["allowTarpit"] = Json::FromBool(t->allow_tarpit);
if (t->allow_tarpit) {
misc["minTarpitDurationMs"] =
Json::FromNumber(t->min_tarpit_duration_ms);
misc["maxTarpitDurationMs"] =
Json::FromNumber(t->max_tarpit_duration_ms);
}
misc["keepaliveTime"] =
Json::FromString(t->keepalive_time.ToJsonString());
misc["nextAdjustedKeepaliveTimestamp"] =
Json::FromString((t->next_adjusted_keepalive_timestamp -
grpc_core::Timestamp::Now())
.ToJsonString());
misc["numMessagesInNextWrite"] =
Json::FromNumber(t->num_messages_in_next_write);
misc["numPendingInducedFrames"] =
Json::FromNumber(t->num_pending_induced_frames);
misc["writeBufferSize"] = Json::FromNumber(t->write_buffer_size);
misc["readingPausedOnPendingInducedFrames"] =
Json::FromBool(t->reading_paused_on_pending_induced_frames);
misc["enablePreferredRxCryptoFrameAdvertisement"] =
Json::FromBool(t->enable_preferred_rx_crypto_frame_advertisement);
misc["keepalivePermitWithoutCalls"] =
Json::FromBool(t->keepalive_permit_without_calls);
misc["bdpPingBlocked"] = Json::FromBool(t->bdp_ping_blocked);
misc["bdpPingStarted"] = Json::FromBool(t->bdp_ping_started);
misc["ackPings"] = Json::FromBool(t->ack_pings);
misc["keepaliveIncomingDataWanted"] =
Json::FromBool(t->keepalive_incoming_data_wanted);
misc["maxConcurrentStreamsOverloadProtection"] =
Json::FromBool(t->max_concurrent_streams_overload_protection);
misc["maxConcurrentStreamsRejectOnClient"] =
Json::FromBool(t->max_concurrent_streams_reject_on_client);
misc["pingOnRstStreamPercent"] =
Json::FromNumber(t->ping_on_rst_stream_percent);
misc["lastWindowUpdateAge"] = Json::FromString(
(grpc_core::Timestamp::Now() - t->last_window_update_time)
.ToJsonString());
http2_info["misc"] = Json::FromObject(std::move(misc));
http2_info["settings"] = Json::FromObject(t->settings.ToJsonObject());
sink.AddAdditionalInfo("http2", std::move(http2_info));
std::vector<grpc_core::RefCountedPtr<grpc_core::channelz::BaseNode>>
children;
children.reserve(t->stream_map.size());
for (auto [id, stream] : t->stream_map) {
if (stream->channelz_call_node == nullptr) {
stream->channelz_call_node =
grpc_core::MakeRefCounted<grpc_core::channelz::CallNode>(
absl::StrCat("chttp2 ",
t->is_client ? "client" : "server",
" stream ", stream->id));
}
children.push_back(stream->channelz_call_node);
}
sink.AddChildObjects(std::move(children));
n.Notify();
}),
absl::OkStatus());
});
n.WaitForNotification();
}
std::unique_ptr<grpc_core::channelz::ZTrace>
grpc_chttp2_transport::ChannelzDataSource::GetZTrace(absl::string_view name) {
if (name == "transport_frames") {
return transport_->http2_ztrace_collector.MakeZTrace();
}
return grpc_core::channelz::DataSource::GetZTrace(name);
}
// TODO(alishananda): add unit testing as part of chttp2 promise conversion work
void grpc_chttp2_transport::WriteSecurityFrame(grpc_core::SliceBuffer* data) {
grpc_core::ExecCtx exec_ctx;
@ -601,9 +699,6 @@ void grpc_chttp2_transport::WriteSecurityFrameLocked(
grpc_chttp2_initiate_write(this, GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
}
using grpc_event_engine::experimental::QueryExtension;
using grpc_event_engine::experimental::TcpTraceExtension;
grpc_chttp2_transport::grpc_chttp2_transport(
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> endpoint, const bool is_client)
@ -639,7 +734,9 @@ grpc_chttp2_transport::grpc_chttp2_transport(
grpc_event_engine::experimental::grpc_get_wrapped_event_engine_endpoint(
ep.get()));
if (epte != nullptr) {
epte->InitializeAndReturnTcpTracer();
epte->SetTcpTracer(std::make_shared<grpc_core::DefaultTcpTracer>(
channel_args.GetObjectRef<
grpc_core::GlobalStatsPluginRegistry::StatsPluginGroup>()));
}
}
@ -1088,6 +1185,8 @@ static void write_action(grpc_chttp2_transport* t) {
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t << "]: Write "
<< t->outbuf.Length() << " bytes";
t->write_size_policy.BeginWrite(t->outbuf.Length());
t->http2_ztrace_collector.Append(grpc_core::H2BeginEndpointWrite{
static_cast<uint32_t>(t->outbuf.Length())});
grpc_endpoint_write(t->ep.get(), t->outbuf.c_slice_buffer(),
grpc_core::InitTransportClosure<write_action_end>(
t->Ref(), &t->write_action_end_locked),
@ -1369,22 +1468,12 @@ static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
}
static void trace_annotations(grpc_chttp2_stream* s) {
if (!grpc_core::IsCallTracerTransportFixEnabled()) {
if (s->parent_call_tracer != nullptr) {
s->parent_call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s->t->flow_control.stats())
.Add(s->flow_control.stats()));
}
} else {
if (s->call_tracer != nullptr && s->call_tracer->IsSampled()) {
s->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s->t->flow_control.stats())
.Add(s->flow_control.stats()));
}
if (s->call_tracer != nullptr && s->call_tracer->IsSampled()) {
s->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kStart,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s->t->flow_control.stats())
.Add(s->flow_control.stats()));
}
}
@ -1633,9 +1722,6 @@ static void perform_stream_op_locked(void* stream_op,
grpc_chttp2_transport* t = s->t.get();
s->traced = op->is_traced;
if (!grpc_core::IsCallTracerTransportFixEnabled()) {
s->parent_call_tracer = ParentCallTracerIfSampled(s);
}
// Some server filters populate CallTracerInterface in the context only after
// reading initial metadata. (Client-side population is done by
// client_channel filter.)
@ -1919,7 +2005,8 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
// Graceful GOAWAYs require a NO_ERROR error code
grpc_chttp2_goaway_append(
(1u << 31) - 1, 0 /*NO_ERROR*/,
grpc_core::Slice::FromCopiedString(message_).TakeCSlice(), &t->qbuf);
grpc_core::Slice::FromCopiedString(message_).TakeCSlice(), &t->qbuf,
&t->http2_ztrace_collector);
t->keepalive_timeout =
std::min(t->keepalive_timeout, grpc_core::Duration::Seconds(20));
t->ping_timeout =
@ -1953,7 +2040,8 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
t_->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(
t_->last_new_stream_id, 0 /*NO_ERROR*/,
grpc_core::Slice::FromCopiedString(message_).TakeCSlice(), &t_->qbuf);
grpc_core::Slice::FromCopiedString(message_).TakeCSlice(), &t_->qbuf,
&t_->http2_ztrace_collector);
grpc_chttp2_initiate_write(t_.get(),
GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
}
@ -1999,9 +2087,10 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error,
<< ": Sending goaway last_new_stream_id=" << t->last_new_stream_id
<< " err=" << grpc_core::StatusToString(error);
t->sent_goaway_state = GRPC_CHTTP2_FINAL_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(
t->last_new_stream_id, static_cast<uint32_t>(http_error),
grpc_slice_from_cpp_string(std::move(message)), &t->qbuf);
grpc_chttp2_goaway_append(t->last_new_stream_id,
static_cast<uint32_t>(http_error),
grpc_slice_from_cpp_string(std::move(message)),
&t->qbuf, &t->http2_ztrace_collector);
} else {
// Final GOAWAY has already been sent.
}
@ -2255,7 +2344,7 @@ namespace {
Duration TarpitDuration(grpc_chttp2_transport* t) {
return Duration::Milliseconds(absl::LogUniform<int>(
absl::BitGen(), t->min_tarpit_duration_ms, t->max_tarpit_duration_ms));
SharedBitGen(), t->min_tarpit_duration_ms, t->max_tarpit_duration_ms));
}
template <typename F>
@ -3332,13 +3421,6 @@ absl::string_view grpc_chttp2_transport::GetTransportName() const {
return "chttp2";
}
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport) {
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(transport);
return t->channelz_socket;
}
grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> ep, const bool is_client) {

View File

@ -46,9 +46,6 @@ grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args,
grpc_core::OrphanablePtr<grpc_endpoint> ep, bool is_client);
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport);
/// Takes ownership of \a read_buffer, which (if non-NULL) contains
/// leftover bytes previously read from the endpoint (e.g., by handshakers).
/// If non-null, \a notify_on_receive_settings will be scheduled when

View File

@ -302,6 +302,24 @@ class TransportFlowControl final {
double bdp_bw_est;
std::string ToString() const;
Json::Object ToJsonObject() {
Json::Object object;
object["targetWindow"] = Json::FromNumber(target_window);
object["targetFrameSize"] = Json::FromNumber(target_frame_size);
object["targetPreferredRxCryptoFrameSize"] =
Json::FromNumber(target_preferred_rx_crypto_frame_size);
object["ackedInitWindow"] = Json::FromNumber(acked_init_window);
object["queuedInitWindow"] = Json::FromNumber(queued_init_window);
object["sentInitWindow"] = Json::FromNumber(sent_init_window);
object["remoteWindow"] = Json::FromNumber(remote_window);
object["announcedWindow"] = Json::FromNumber(announced_window);
object["announcedStreamTotalOverIncomingWindow"] =
Json::FromNumber(announced_stream_total_over_incoming_window);
object["bdpAccumulator"] = Json::FromNumber(bdp_accumulator);
object["bdpEstimate"] = Json::FromNumber(bdp_estimate);
object["bdpBwEst"] = Json::FromNumber(bdp_bw_est);
return object;
}
};
Stats stats() const {

View File

@ -24,13 +24,21 @@
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/util/crash.h"
using grpc_core::http2::Http2ErrorCode;
using grpc_core::http2::Http2Status;
using grpc_core::http2::ValueOrHttp2Status;
namespace grpc_core {
namespace {
// TODO(tjagtap) TODO(akshitpatel): [PH2][P3] : Write micro benchmarks for
// framing code
// HTTP2 Frame Types
enum class FrameType : uint8_t {
kData = 0,
@ -73,6 +81,21 @@ uint32_t Read3b(const uint8_t* input) {
static_cast<uint32_t>(input[1]) << 8 | static_cast<uint32_t>(input[2]);
}
constexpr uint32_t k8BitMask = 0x7f;
void Write31bits(uint32_t x, uint8_t* output) {
output[0] = static_cast<uint8_t>(k8BitMask & (x >> 24));
output[1] = static_cast<uint8_t>(x >> 16);
output[2] = static_cast<uint8_t>(x >> 8);
output[3] = static_cast<uint8_t>(x);
}
uint32_t Read31bits(const uint8_t* input) {
return (k8BitMask & static_cast<uint32_t>(input[0])) << 24 |
static_cast<uint32_t>(input[1]) << 16 |
static_cast<uint32_t>(input[2]) << 8 | static_cast<uint32_t>(input[3]);
}
void Write4b(uint32_t x, uint8_t* output) {
output[0] = static_cast<uint8_t>(x >> 24);
output[1] = static_cast<uint8_t>(x >> 16);
@ -129,7 +152,7 @@ class SerializeExtraBytesRequired {
size_t operator()(const Http2WindowUpdateFrame&) { return 4; }
size_t operator()(const Http2SecurityFrame&) { return 0; }
size_t operator()(const Http2UnknownFrame&) { Crash("unreachable"); }
size_t operator()(const Http2EmptyFrame&) { Crash("unreachable"); }
size_t operator()(const Http2EmptyFrame&) { return 0; }
};
class SerializeHeaderAndPayload {
@ -215,8 +238,12 @@ class SerializeHeaderAndPayload {
Http2FrameHeader{static_cast<uint32_t>(8 + frame.debug_data.length()),
static_cast<uint8_t>(FrameType::kGoaway), 0, 0}
.Serialize(hdr_and_fixed_payload.begin());
Write4b(frame.last_stream_id,
hdr_and_fixed_payload.begin() + kFrameHeaderSize);
if (GPR_UNLIKELY(frame.last_stream_id > RFC9113::kMaxStreamId31Bit)) {
LOG(ERROR) << "Stream ID will be truncated. The MSB will be set to 0 "
<< frame.last_stream_id;
}
Write31bits(frame.last_stream_id,
hdr_and_fixed_payload.begin() + kFrameHeaderSize);
Write4b(frame.error_code,
hdr_and_fixed_payload.begin() + kFrameHeaderSize + 4);
out_.AppendIndexed(Slice(std::move(hdr_and_fixed_payload)));
@ -228,7 +255,12 @@ class SerializeHeaderAndPayload {
Http2FrameHeader{4, static_cast<uint8_t>(FrameType::kWindowUpdate), 0,
frame.stream_id}
.Serialize(hdr_and_payload.begin());
Write4b(frame.increment, hdr_and_payload.begin() + kFrameHeaderSize);
if (GPR_UNLIKELY(frame.increment > RFC9113::kMaxStreamId31Bit)) {
LOG(ERROR) << "Http2WindowUpdateFrame increment will be truncated to 31 "
"bits. The MSB will be set to 0 "
<< frame.increment;
}
Write31bits(frame.increment, hdr_and_payload.begin() + kFrameHeaderSize);
out_.AppendIndexed(Slice(std::move(hdr_and_payload)));
}
@ -250,109 +282,162 @@ class SerializeHeaderAndPayload {
MutableSlice extra_bytes_;
};
absl::Status StripPadding(SliceBuffer& payload) {
if (payload.Length() < 1) {
return absl::InternalError("padding flag set but no padding byte");
Http2Status StripPadding(const Http2FrameHeader& hdr, SliceBuffer& payload) {
if (GPR_UNLIKELY(payload.Length() < 1)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kFrameParserIncorrectPadding, hdr.ToString()));
}
const size_t payload_size = payload.Length();
uint8_t padding_bytes;
payload.MoveFirstNBytesIntoBuffer(1, &padding_bytes);
if (payload.Length() < padding_bytes) {
return absl::InternalError("padding flag set but not enough padding bytes");
if (GPR_UNLIKELY(payload_size <= padding_bytes)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kPaddingLengthLargerThanFrameLength,
hdr.ToString()));
}
// We dont check for padding being zero.
// No point checking bytes that will be discarded.
// RFC9113 : A receiver is not obligated to verify padding but MAY treat
// non-zero padding as a connection error of type PROTOCOL_ERROR.
payload.RemoveLastNBytes(padding_bytes);
return absl::OkStatus();
return Http2Status::Ok();
}
absl::StatusOr<Http2DataFrame> ParseDataFrame(const Http2FrameHeader& hdr,
ValueOrHttp2Status<Http2Frame> ParseDataFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (hdr.stream_id == 0) {
return absl::InternalError(
absl::StrCat("invalid stream id: ", hdr.ToString()));
if (GPR_UNLIKELY((hdr.stream_id % 2) == 0)) {
if ((hdr.stream_id == 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kDataStreamIdMustBeNonZero, hdr.ToString()));
} else {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kStreamIdMustBeOdd, hdr.ToString()));
}
}
if (hdr.flags & kFlagPadded) {
auto s = StripPadding(payload);
if (!s.ok()) return s;
Http2Status s = StripPadding(hdr, payload);
if (GPR_UNLIKELY(!s.IsOk())) {
return ValueOrHttp2Status<Http2Frame>(std::move(s));
}
}
return Http2DataFrame{hdr.stream_id, ExtractFlag(hdr.flags, kFlagEndStream),
std::move(payload)};
return ValueOrHttp2Status<Http2Frame>(
Http2DataFrame{hdr.stream_id, ExtractFlag(hdr.flags, kFlagEndStream),
std::move(payload)});
}
absl::StatusOr<Http2HeaderFrame> ParseHeaderFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (hdr.stream_id == 0) {
return absl::InternalError(
absl::StrCat("invalid stream id: ", hdr.ToString()));
ValueOrHttp2Status<Http2Frame> ParseHeaderFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (GPR_UNLIKELY((hdr.stream_id % 2) == 0)) {
if (hdr.stream_id == 0) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kHeaderStreamIdMustBeNonZero, hdr.ToString()));
} else {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kStreamIdMustBeOdd, hdr.ToString()));
}
}
if (hdr.flags & kFlagPadded) {
auto s = StripPadding(payload);
if (!s.ok()) return s;
Http2Status s = StripPadding(hdr, payload);
if (GPR_UNLIKELY(!s.IsOk())) {
return ValueOrHttp2Status<Http2Frame>(std::move(s));
}
}
if (hdr.flags & kFlagPriority) {
if (payload.Length() < 5) {
return absl::InternalError(
absl::StrCat("invalid priority payload: ", hdr.ToString()));
if (GPR_UNLIKELY(hdr.flags & kFlagPriority)) {
if (GPR_UNLIKELY(payload.Length() < 5)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kIncorrectFrame, hdr.ToString()));
}
uint8_t trash[5];
payload.MoveFirstNBytesIntoBuffer(5, trash);
}
return Http2HeaderFrame{
return ValueOrHttp2Status<Http2Frame>(Http2HeaderFrame{
hdr.stream_id, ExtractFlag(hdr.flags, kFlagEndHeaders),
ExtractFlag(hdr.flags, kFlagEndStream), std::move(payload)};
ExtractFlag(hdr.flags, kFlagEndStream), std::move(payload)});
}
absl::StatusOr<Http2ContinuationFrame> ParseContinuationFrame(
ValueOrHttp2Status<Http2Frame> ParseContinuationFrame(
const Http2FrameHeader& hdr, SliceBuffer& payload) {
if (hdr.stream_id == 0) {
return absl::InternalError(
absl::StrCat("invalid stream id: ", hdr.ToString()));
if (GPR_UNLIKELY((hdr.stream_id % 2) == 0)) {
if (hdr.stream_id == 0) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kContinuationStreamIdMustBeNonZero,
hdr.ToString()));
} else {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kStreamIdMustBeOdd, hdr.ToString()));
}
}
return Http2ContinuationFrame{hdr.stream_id,
ExtractFlag(hdr.flags, kFlagEndHeaders),
std::move(payload)};
return ValueOrHttp2Status<Http2Frame>(Http2ContinuationFrame{
hdr.stream_id, ExtractFlag(hdr.flags, kFlagEndHeaders),
std::move(payload)});
}
absl::StatusOr<Http2RstStreamFrame> ParseRstStreamFrame(
const Http2FrameHeader& hdr, SliceBuffer& payload) {
if (payload.Length() != 4) {
return absl::InternalError(
absl::StrCat("invalid rst stream payload: ", hdr.ToString()));
ValueOrHttp2Status<Http2Frame> ParseRstStreamFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (GPR_UNLIKELY(payload.Length() != 4)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kRstStreamLength4, hdr.ToString()));
}
if (hdr.stream_id == 0) {
return absl::InternalError(
absl::StrCat("invalid stream id: ", hdr.ToString()));
if (GPR_UNLIKELY((hdr.stream_id % 2) == 0)) {
if ((hdr.stream_id == 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kRstStreamStreamIdMustBeNonZero,
hdr.ToString()));
} else {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kStreamIdMustBeOdd, hdr.ToString()));
}
}
uint8_t buffer[4];
payload.CopyToBuffer(buffer);
return Http2RstStreamFrame{hdr.stream_id, Read4b(buffer)};
return ValueOrHttp2Status<Http2Frame>(
Http2RstStreamFrame{hdr.stream_id, Read4b(buffer)});
}
absl::StatusOr<Http2SettingsFrame> ParseSettingsFrame(
const Http2FrameHeader& hdr, SliceBuffer& payload) {
if (hdr.stream_id != 0) {
return absl::InternalError(
absl::StrCat("invalid stream id: ", hdr.ToString()));
}
if (hdr.flags == kFlagAck) {
if (payload.Length() != 0) {
return absl::InternalError(
absl::StrCat("invalid settings ack length: ", hdr.ToString()));
}
return Http2SettingsFrame{true, {}};
ValueOrHttp2Status<Http2Frame> ParseSettingsFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (GPR_UNLIKELY(hdr.stream_id != 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kSettingsStreamIdMustBeZero, hdr.ToString()));
}
if (payload.Length() % 6 != 0) {
return absl::InternalError(
absl::StrCat("invalid settings payload: ", hdr.ToString(),
" -- settings must be multiples of 6 bytes long"));
if (hdr.flags & kFlagAck) {
if (GPR_UNLIKELY(payload.Length() != 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kSettingsLength0, hdr.ToString()));
}
return ValueOrHttp2Status<Http2Frame>(Http2SettingsFrame{true, {}});
}
if (GPR_UNLIKELY(payload.Length() % 6 != 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kSettingsLength6x, hdr.ToString()));
}
Http2SettingsFrame frame{false, {}};
@ -364,19 +449,21 @@ absl::StatusOr<Http2SettingsFrame> ParseSettingsFrame(
Read4b(buffer + 2),
});
}
return std::move(frame);
return ValueOrHttp2Status<Http2Frame>(std::move(frame));
}
absl::StatusOr<Http2PingFrame> ParsePingFrame(const Http2FrameHeader& hdr,
ValueOrHttp2Status<Http2Frame> ParsePingFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (payload.Length() != 8) {
return absl::InternalError(
absl::StrCat("invalid ping payload: ", hdr.ToString()));
if (GPR_UNLIKELY(payload.Length() != 8)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kPingLength8, hdr.ToString()));
}
if (hdr.stream_id != 0) {
return absl::InternalError(
absl::StrCat("invalid ping stream id: ", hdr.ToString()));
if (GPR_UNLIKELY(hdr.stream_id != 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kPingStreamIdMustBeZero, hdr.ToString()));
}
// RFC9113 : Unused flags MUST be ignored on receipt and MUST be left unset
@ -386,49 +473,65 @@ absl::StatusOr<Http2PingFrame> ParsePingFrame(const Http2FrameHeader& hdr,
uint8_t buffer[8];
payload.CopyToBuffer(buffer);
return Http2PingFrame{ack, Read8b(buffer)};
return ValueOrHttp2Status<Http2Frame>(Http2PingFrame{ack, Read8b(buffer)});
}
absl::StatusOr<Http2GoawayFrame> ParseGoawayFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (payload.Length() < 8) {
return absl::InternalError(
absl::StrCat("invalid goaway payload: ", hdr.ToString(),
" -- must be at least 8 bytes"));
ValueOrHttp2Status<Http2Frame> ParseGoawayFrame(const Http2FrameHeader& hdr,
SliceBuffer& payload) {
if (GPR_UNLIKELY(payload.Length() < 8)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kGoAwayLength8, hdr.ToString()));
}
if (hdr.stream_id != 0) {
return absl::InternalError(
absl::StrCat("invalid goaway stream id: ", hdr.ToString()));
if (GPR_UNLIKELY(hdr.stream_id != 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kGoAwayStreamIdMustBeZero, hdr.ToString()));
}
uint8_t buffer[8];
payload.MoveFirstNBytesIntoBuffer(8, buffer);
return Http2GoawayFrame{Read4b(buffer), Read4b(buffer + 4),
payload.JoinIntoSlice()};
return ValueOrHttp2Status<Http2Frame>(Http2GoawayFrame{
/*Last-Stream-ID (31)*/ Read31bits(buffer),
/*Error Code (32)*/ Read4b(buffer + 4),
/*Additional Debug Data(variable)*/ payload.JoinIntoSlice()});
}
absl::StatusOr<Http2WindowUpdateFrame> ParseWindowUpdateFrame(
ValueOrHttp2Status<Http2Frame> ParseWindowUpdateFrame(
const Http2FrameHeader& hdr, SliceBuffer& payload) {
if (payload.Length() != 4) {
return absl::InternalError(
absl::StrCat("invalid window update payload: ", hdr.ToString(),
" -- must be 4 bytes"));
if (GPR_UNLIKELY(payload.Length() != 4)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kFrameSizeError,
absl::StrCat(RFC9113::kWindowUpdateLength4, hdr.ToString()));
}
if (hdr.flags != 0) {
return absl::InternalError(
absl::StrCat("invalid window update flags: ", hdr.ToString()));
if (GPR_UNLIKELY(hdr.stream_id > 0u && (hdr.stream_id % 2) == 0)) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kStreamIdMustBeOdd, hdr.ToString()));
}
uint8_t buffer[4];
payload.CopyToBuffer(buffer);
return Http2WindowUpdateFrame{hdr.stream_id, Read4b(buffer)};
const uint32_t window_size_increment = Read31bits(buffer);
if (GPR_UNLIKELY(window_size_increment == 0)) {
if (hdr.stream_id == 0) {
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kWindowSizeIncrement, hdr.ToString()));
} else {
return Http2Status::Http2StreamError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kWindowSizeIncrement, hdr.ToString()));
}
}
return ValueOrHttp2Status<Http2Frame>(
Http2WindowUpdateFrame{hdr.stream_id, window_size_increment});
}
absl::StatusOr<Http2SecurityFrame> ParseSecurityFrame(
ValueOrHttp2Status<Http2Frame> ParseSecurityFrame(
const Http2FrameHeader& /*hdr*/, SliceBuffer& payload) {
return Http2SecurityFrame{std::move(payload)};
// TODO(tjagtap) : [PH2][P3] : Add validations
return ValueOrHttp2Status<Http2Frame>(Http2SecurityFrame{std::move(payload)});
}
} // namespace
@ -441,7 +544,11 @@ void Http2FrameHeader::Serialize(uint8_t* output) const {
}
Http2FrameHeader Http2FrameHeader::Parse(const uint8_t* input) {
return Http2FrameHeader{Read3b(input), input[3], input[4], Read4b(input + 5)};
return Http2FrameHeader{
/* Length(24) */ Read3b(input),
/* Type(8) */ input[3],
/* Flags(8) */ input[4],
/* Reserved(1), Stream Identifier(31) */ Read31bits(input + 5)};
}
namespace {
@ -493,9 +600,10 @@ void Serialize(absl::Span<Http2Frame> frames, SliceBuffer& out) {
}
}
absl::StatusOr<Http2Frame> ParseFramePayload(const Http2FrameHeader& hdr,
SliceBuffer payload) {
http2::ValueOrHttp2Status<Http2Frame> ParseFramePayload(
const Http2FrameHeader& hdr, SliceBuffer payload) {
CHECK(payload.Length() == hdr.length);
switch (static_cast<FrameType>(hdr.type)) {
case FrameType::kData:
return ParseDataFrame(hdr, payload);
@ -514,20 +622,20 @@ absl::StatusOr<Http2Frame> ParseFramePayload(const Http2FrameHeader& hdr,
case FrameType::kWindowUpdate:
return ParseWindowUpdateFrame(hdr, payload);
case FrameType::kPushPromise:
return absl::InternalError(
"push promise not supported (and SETTINGS_ENABLE_PUSH explicitly "
"disabled).");
return Http2Status::Http2ConnectionError(
Http2ErrorCode::kProtocolError,
absl::StrCat(RFC9113::kNoPushPromise, hdr.ToString()));
case FrameType::kCustomSecurity:
return ParseSecurityFrame(hdr, payload);
default:
return Http2UnknownFrame{};
return ValueOrHttp2Status<Http2Frame>(Http2UnknownFrame{});
}
}
GrpcMessageHeader ExtractGrpcHeader(SliceBuffer& payload) {
CHECK_GE(payload.Length(), kGrpcHeaderSizeInBytes);
uint8_t buffer[kGrpcHeaderSizeInBytes];
payload.MoveFirstNBytesIntoBuffer(kGrpcHeaderSizeInBytes, buffer);
payload.CopyFirstNBytesIntoBuffer(kGrpcHeaderSizeInBytes, buffer);
GrpcMessageHeader header;
header.flags = buffer[0];
header.length = Read4b(buffer + 1);

View File

@ -25,6 +25,7 @@
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "src/core/ext/transport/chttp2/transport/http2_status.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
@ -156,8 +157,8 @@ struct Http2GoawayFrame {
// WINDOW_UPDATE frame
struct Http2WindowUpdateFrame {
uint32_t stream_id;
uint32_t increment;
uint32_t stream_id = 0;
uint32_t increment = 0;
bool operator==(const Http2WindowUpdateFrame& other) const {
return stream_id == other.stream_id && increment == other.increment;
@ -229,8 +230,8 @@ struct Http2FrameHeader {
// If a frame should simply be ignored, this function returns a
// Http2UnknownFrame.
// It is expected that hdr.length == payload.Length().
absl::StatusOr<Http2Frame> ParseFramePayload(const Http2FrameHeader& hdr,
SliceBuffer payload);
http2::ValueOrHttp2Status<Http2Frame> ParseFramePayload(
const Http2FrameHeader& hdr, SliceBuffer payload);
// Serialize frame and append to out, leaves frames in an unknown state (may
// move things out of frames)
@ -242,8 +243,8 @@ void Serialize(absl::Span<Http2Frame> frames, SliceBuffer& out);
constexpr uint8_t kGrpcHeaderSizeInBytes = 5;
struct GrpcMessageHeader {
uint8_t flags;
uint32_t length;
uint8_t flags = 0;
uint32_t length = 0;
};
// If the payload SliceBuffer is too small to hold a gRPC header, this function
@ -254,6 +255,77 @@ GrpcMessageHeader ExtractGrpcHeader(SliceBuffer& payload);
void AppendGrpcHeaderToSliceBuffer(SliceBuffer& payload, const uint8_t flags,
const uint32_t length);
namespace RFC9113 {
// RFC9113: 5.1.1.
inline constexpr absl::string_view kStreamIdMustBeOdd =
"RFC9113: Streams initiated by a client MUST use odd-numbered stream "
"identifiers"; // gRPC streams are only initiated by a client.
// 6.
// Stream Identifier related errors
// Non-Zero Stream Identifier
inline constexpr absl::string_view kDataStreamIdMustBeNonZero =
"RFC9113: DATA frames MUST be associated with a stream";
inline constexpr absl::string_view kHeaderStreamIdMustBeNonZero =
"RFC9113: HEADERS frames MUST be associated with a stream";
inline constexpr absl::string_view kContinuationStreamIdMustBeNonZero =
"RFC9113: CONTINUATION frames MUST be associated with a stream";
inline constexpr absl::string_view kRstStreamStreamIdMustBeNonZero =
"RFC9113: RST_STREAM frames frames MUST be associated with a stream";
// Zero Stream Identifier
inline constexpr absl::string_view kPingStreamIdMustBeZero =
"RFC9113: If a PING frame is received with a Stream Identifier field "
"value other than 0x00, the recipient MUST respond with a connection error";
inline constexpr absl::string_view kGoAwayStreamIdMustBeZero =
"RFC9113: An endpoint MUST treat a GOAWAY frame with a stream identifier "
"other than 0x00 as a connection error";
inline constexpr absl::string_view kSettingsStreamIdMustBeZero =
"RFC9113: If an endpoint receives a SETTINGS frame whose Stream "
"Identifier field is anything other than 0x00, the endpoint MUST respond "
"with a connection error";
// Frame length related errors
inline constexpr absl::string_view kRstStreamLength4 =
"RFC9113: A RST_STREAM frame with a length other than 4 octets MUST be "
"treated as a connection error";
inline constexpr absl::string_view kSettingsLength0 =
"RFC9113: Receipt of a SETTINGS frame with the ACK flag set and a length "
"field value other than 0 MUST be treated as a connection error";
inline constexpr absl::string_view kSettingsLength6x =
"RFC9113: SETTINGS frame with a length other than a multiple of 6 octets "
"MUST be treated as a connection error";
inline constexpr absl::string_view kPingLength8 =
"RFC9113: Receipt of a PING frame with a length field value other than 8 "
"MUST be treated as a connection error";
inline constexpr absl::string_view kWindowUpdateLength4 =
"RFC9113: A WINDOW_UPDATE frame with a length other than 4 octets MUST be "
"treated as a connection error";
inline constexpr absl::string_view kWindowSizeIncrement =
"RFC9113: The legal range for the increment to the flow-control window is "
"1 to (2^31)-1";
inline constexpr absl::string_view kPaddingLengthLargerThanFrameLength =
"RFC9113: If the length of the padding is the length of the frame payload "
"or greater, the recipient MUST treat this as a connection error";
// Misc Errors
inline constexpr absl::string_view kNoPushPromise =
"RFC9113: PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH "
"setting of the "
"peer endpoint is set to 0";
inline constexpr absl::string_view kFrameParserIncorrectPadding =
"Incorrect length of padding in frame";
inline constexpr absl::string_view kIncorrectFrame = "Incorrect Frame";
inline constexpr absl::string_view kGoAwayLength8 =
"GOAWAY frame should have a Last-Stream-ID and Error Code making the "
"minimum length 8 octets";
// TODO(tjagtap) : [PH2][P2] : Take care that our transport class does not make
// stream id larger than this.
inline constexpr uint32_t kMaxStreamId31Bit = 0x7fffffffu;
} // namespace RFC9113
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H

View File

@ -55,6 +55,7 @@ absl::Status grpc_chttp2_data_parser_begin_frame(uint8_t flags,
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,
grpc_core::CallTracerInterface* call_tracer,
grpc_core::Http2ZTraceCollector* ztrace_collector,
grpc_slice_buffer* outbuf) {
grpc_slice hdr;
uint8_t* p;
@ -74,6 +75,9 @@ void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
*p++ = static_cast<uint8_t>(id);
grpc_slice_buffer_add(outbuf, hdr);
ztrace_collector->Append(
grpc_core::H2DataTrace<false>{id, is_eof != 0, write_bytes});
grpc_slice_buffer_move_first_no_ref(inbuf, write_bytes, outbuf);
grpc_core::global_stats().IncrementHttp2WriteDataFrameSize(write_bytes);
@ -148,6 +152,12 @@ grpc_error_handle grpc_chttp2_data_parser_parse(void* /*parser*/,
grpc_slice_buffer_add(&s->frame_storage, slice);
grpc_chttp2_maybe_complete_recv_message(t, s);
if (is_last) {
t->http2_ztrace_collector.Append(grpc_core::H2DataTrace<true>{
t->incoming_stream_id,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0,
t->incoming_frame_size});
}
if (is_last && s->received_last_frame) {
grpc_chttp2_mark_stream_closed(
t, s, true, false,

View File

@ -26,6 +26,7 @@
#include <stdint.h>
#include "absl/status/status.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/promise/poll.h"
@ -49,6 +50,7 @@ grpc_error_handle grpc_chttp2_data_parser_parse(void* parser,
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,
grpc_core::CallTracerInterface* call_tracer,
grpc_core::Http2ZTraceCollector* ztrace_collector,
grpc_slice_buffer* outbuf);
grpc_core::Poll<grpc_error_handle> grpc_deframe_unprocessed_incoming_frames(

View File

@ -138,6 +138,11 @@ grpc_error_handle grpc_chttp2_goaway_parser_parse(void* parser,
p->debug_pos += static_cast<uint32_t>(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
t->http2_ztrace_collector.Append([p]() {
return grpc_core::H2GoAwayTrace<true>{
p->last_stream_id, p->error_code,
std::string(absl::string_view(p->debug_data, p->debug_length))};
});
grpc_chttp2_add_incoming_goaway(
t, p->error_code, p->last_stream_id,
absl::string_view(p->debug_data, p->debug_length));
@ -149,15 +154,22 @@ grpc_error_handle grpc_chttp2_goaway_parser_parse(void* parser,
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer) {
void grpc_chttp2_goaway_append(
uint32_t last_stream_id, uint32_t error_code, const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer,
grpc_core::Http2ZTraceCollector* ztrace_collector) {
grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4);
uint8_t* p = GRPC_SLICE_START_PTR(header);
uint32_t frame_length;
CHECK(GRPC_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4);
frame_length = 4 + 4 + static_cast<uint32_t> GRPC_SLICE_LENGTH(debug_data);
ztrace_collector->Append([last_stream_id, error_code, debug_data]() {
return grpc_core::H2GoAwayTrace<false>{
last_stream_id, error_code,
std::string(grpc_core::StringViewFromSlice(debug_data))};
});
// frame header: length
*p++ = static_cast<uint8_t>(frame_length >> 16);
*p++ = static_cast<uint8_t>(frame_length >> 8);

View File

@ -23,6 +23,7 @@
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/lib/iomgr/error.h"
@ -56,8 +57,9 @@ grpc_error_handle grpc_chttp2_goaway_parser_parse(void* parser,
const grpc_slice& slice,
int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer);
void grpc_chttp2_goaway_append(
uint32_t last_stream_id, uint32_t error_code, const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer,
grpc_core::Http2ZTraceCollector* ztrace_collector);
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H

View File

@ -90,6 +90,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (p->byte == 8) {
CHECK(is_last);
t->http2_ztrace_collector.Append(
grpc_core::H2PingTrace<true>{p->is_ack != 0, p->opaque_8bytes});
if (p->is_ack) {
GRPC_TRACE_LOG(http2_ping, INFO)
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t

View File

@ -36,17 +36,20 @@
#include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/status_helper.h"
using grpc_core::http2::Http2ErrorCode;
grpc_slice grpc_chttp2_rst_stream_create(
uint32_t id, uint32_t code, grpc_core::CallTracerInterface* call_tracer) {
uint32_t id, uint32_t code, grpc_core::CallTracerInterface* call_tracer,
grpc_core::Http2ZTraceCollector* ztrace_collector) {
static const size_t frame_size = 13;
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
if (call_tracer != nullptr) {
call_tracer->RecordOutgoingBytes({frame_size, 0, 0});
}
ztrace_collector->Append(grpc_core::H2RstStreamTrace<false>{id, code});
uint8_t* p = GRPC_SLICE_START_PTR(slice);
// Frame size.
@ -75,8 +78,9 @@ void grpc_chttp2_add_rst_stream_to_next_write(
grpc_chttp2_transport* t, uint32_t id, uint32_t code,
grpc_core::CallTracerInterface* call_tracer) {
t->num_pending_induced_frames++;
grpc_slice_buffer_add(&t->qbuf,
grpc_chttp2_rst_stream_create(id, code, call_tracer));
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(id, code, call_tracer,
&t->http2_ztrace_collector));
}
grpc_error_handle grpc_chttp2_rst_stream_parser_begin_frame(
@ -114,6 +118,8 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
((static_cast<uint32_t>(p->reason_bytes[1])) << 16) |
((static_cast<uint32_t>(p->reason_bytes[2])) << 8) |
((static_cast<uint32_t>(p->reason_bytes[3])));
t->http2_ztrace_collector.Append(
grpc_core::H2RstStreamTrace<true>{t->incoming_stream_id, reason});
GRPC_TRACE_LOG(http, INFO)
<< "[chttp2 transport=" << t << " stream=" << s
<< "] received RST_STREAM(reason=" << reason << ")";
@ -128,8 +134,9 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
grpc_core::StatusIntProperty::kHttp2Error,
static_cast<intptr_t>(reason));
}
grpc_core::SharedBitGen g;
if (!t->is_client &&
absl::Bernoulli(t->bitgen, t->ping_on_rst_stream_percent / 100.0)) {
absl::Bernoulli(g, t->ping_on_rst_stream_percent / 100.0)) {
++t->num_pending_induced_frames;
t->ping_callbacks.RequestPing();
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);

View File

@ -23,6 +23,7 @@
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/transport/transport.h"
@ -34,7 +35,8 @@ struct grpc_chttp2_rst_stream_parser {
};
grpc_slice grpc_chttp2_rst_stream_create(
uint32_t stream_id, uint32_t code,
grpc_core::CallTracerInterface* call_tracer);
grpc_core::CallTracerInterface* call_tracer,
grpc_core::Http2ZTraceCollector* ztrace_collector);
// Adds RST_STREAM frame to t->qbuf (buffer for the next write). Should be
// called when we want to add RST_STREAM and we are not in

View File

@ -31,6 +31,7 @@
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/lib/debug/trace.h"
@ -94,6 +95,8 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
const uint8_t* end = GRPC_SLICE_END_PTR(slice);
if (parser->is_ack) {
t->http2_ztrace_collector.Append(
grpc_core::H2SettingsTrace<true>{true, {}});
return absl::OkStatus();
}
@ -118,6 +121,20 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
grpc_core::global_stats()
.IncrementHttp2PreferredReceiveCryptoMessageSize(
target_settings->preferred_receive_crypto_message_size());
t->http2_ztrace_collector.Append([parser]() {
grpc_core::H2SettingsTrace<true> settings{false, {}};
// TODO(ctiller): produce actual wire settings here, not a
// diff. Likely this needs to wait for PH2 where we separate
// the parse loop from the application loop.
parser->incoming_settings->Diff(
false, *parser->target_settings,
[&settings](uint16_t key, uint32_t value) {
settings.settings.push_back({key, value});
});
return settings;
});
t->http2_ztrace_collector.Append(
[]() { return grpc_core::H2SettingsTrace<false>{true, {}}; });
*parser->target_settings = *parser->incoming_settings;
t->num_pending_induced_frames++;
grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
@ -198,7 +215,8 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
if (error != Http2ErrorCode::kNoError) {
grpc_chttp2_goaway_append(
t->last_new_stream_id, static_cast<uint32_t>(error),
grpc_slice_from_static_string("HTTP2 settings error"), &t->qbuf);
grpc_slice_from_static_string("HTTP2 settings error"), &t->qbuf,
&t->http2_ztrace_collector);
return GRPC_ERROR_CREATE(absl::StrFormat(
"invalid value %u passed for %s", parser->value,
grpc_core::Http2Settings::WireIdToName(parser->id).c_str()));

View File

@ -27,6 +27,7 @@
#include "absl/strings/str_format.h"
#include "src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h"
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/stream_lists.h"
#include "src/core/telemetry/stats.h"
@ -101,6 +102,9 @@ grpc_error_handle grpc_chttp2_window_update_parser_parse(
}
CHECK(is_last);
t->http2_ztrace_collector.Append(grpc_core::H2WindowUpdateTrace<true>{
t->incoming_stream_id, received_update});
if (t->incoming_stream_id != 0) {
if (s != nullptr) {
grpc_core::Timestamp now = grpc_core::Timestamp::Now();

View File

@ -101,6 +101,13 @@ void HPackCompressor::Frame(const EncodeHeaderOptions& options,
FillHeader(grpc_slice_buffer_tiny_add(output, kHeadersFrameHeaderSize),
frame_type, options.stream_id, len, flags);
options.call_tracer->RecordOutgoingBytes({kHeadersFrameHeaderSize, 0, 0});
options.ztrace_collector->Append([&]() {
return H2HeaderTrace<false>{
options.stream_id, (flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0,
(flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0,
frame_type == GRPC_CHTTP2_FRAME_CONTINUATION,
static_cast<uint32_t>(len)};
});
grpc_slice_buffer_move_first(raw.c_slice_buffer(), len, output);
frame_type = GRPC_CHTTP2_FRAME_CONTINUATION;

View File

@ -35,6 +35,7 @@
#include "src/core/call/metadata_compression_traits.h"
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
#include "src/core/ext/transport/chttp2/transport/hpack_encoder_table.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/transport/timeout_encoding.h"
@ -353,6 +354,7 @@ class HPackCompressor {
bool use_true_binary_metadata;
size_t max_frame_size;
CallTracerInterface* call_tracer;
Http2ZTraceCollector* ztrace_collector;
};
template <typename HeaderSet>

View File

@ -27,6 +27,7 @@
#include "absl/strings/string_view.h"
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/http2_status.h"
#include "src/core/util/json/json.h"
#include "src/core/util/useful.h"
namespace grpc_core {
@ -132,6 +133,22 @@ class Http2Settings {
bool operator!=(const Http2Settings& rhs) const { return !operator==(rhs); }
Json::Object ToJsonObject() const {
Json::Object object;
object["headerTableSize"] = Json::FromNumber(header_table_size());
object["maxConcurrentStreams"] = Json::FromNumber(max_concurrent_streams());
object["initialWindowSize"] = Json::FromNumber(initial_window_size());
object["maxFrameSize"] = Json::FromNumber(max_frame_size());
object["maxHeaderListSize"] = Json::FromNumber(max_header_list_size());
object["preferredReceiveCryptoMessageSize"] =
Json::FromNumber(preferred_receive_crypto_message_size());
object["enablePush"] = Json::FromBool(enable_push());
object["allowTrueBinaryMetadata"] =
Json::FromBool(allow_true_binary_metadata());
object["allowSecurityFrame"] = Json::FromBool(allow_security_frame());
return object;
}
private:
uint32_t header_table_size_ = 4096;
uint32_t max_concurrent_streams_ = 4294967295u;
@ -152,6 +169,15 @@ class Http2SettingsManager {
Http2Settings& mutable_peer() { return peer_; }
const Http2Settings& peer() const { return peer_; }
Json::Object ToJsonObject() const {
Json::Object object;
object["local"] = Json::FromObject(local_.ToJsonObject());
object["sent"] = Json::FromObject(sent_.ToJsonObject());
object["peer"] = Json::FromObject(peer_.ToJsonObject());
object["acked"] = Json::FromObject(acked_.ToJsonObject());
return object;
}
std::optional<Http2SettingsFrame> MaybeSendUpdate();
GRPC_MUST_USE_RESULT bool AckLastSend();

View File

@ -19,13 +19,21 @@
#ifndef GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_STATUS_H
#define GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_STATUS_H
#include <grpc/support/port_platform.h>
#include <cstdint>
#include <string>
#include <variant>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/strings/str_cat.h"
#include "src/core/util/time.h"
namespace grpc_core {
namespace http2 {
// These error codes are as per RFC9113
// https://www.rfc-editor.org/rfc/rfc9113.html#name-error-codes
// The RFC tells us to use 32 bit, but since this is our internal
// representation, we can use a smaller value.
@ -46,6 +54,355 @@ enum class Http2ErrorCode : uint8_t {
kDoNotUse = 0xffu // Force use of a default clause
};
inline absl::StatusCode ErrorCodeToAbslStatusCode(
const Http2ErrorCode http2_code,
const Timestamp deadline = Timestamp::InfFuture()) {
switch (http2_code) {
case Http2ErrorCode::kNoError:
return absl::StatusCode::kOk;
case Http2ErrorCode::kEnhanceYourCalm:
return absl::StatusCode::kResourceExhausted;
case Http2ErrorCode::kInadequateSecurity:
return absl::StatusCode::kPermissionDenied;
case Http2ErrorCode::kRefusedStream:
return absl::StatusCode::kUnavailable;
case Http2ErrorCode::kCancel:
return (Timestamp::Now() > deadline) ? absl::StatusCode::kDeadlineExceeded
: absl::StatusCode::kCancelled;
default:
return absl::StatusCode::kInternal;
}
GPR_UNREACHABLE_CODE(return absl::StatusCode::kUnknown);
}
inline Http2ErrorCode AbslStatusCodeToErrorCode(const absl::StatusCode status) {
switch (status) {
case absl::StatusCode::kOk:
return Http2ErrorCode::kNoError;
case absl::StatusCode::kCancelled:
return Http2ErrorCode::kCancel;
case absl::StatusCode::kDeadlineExceeded:
return Http2ErrorCode::kCancel;
case absl::StatusCode::kResourceExhausted:
return Http2ErrorCode::kEnhanceYourCalm;
case absl::StatusCode::kPermissionDenied:
return Http2ErrorCode::kInadequateSecurity;
case absl::StatusCode::kUnavailable:
return Http2ErrorCode::kRefusedStream;
default:
return Http2ErrorCode::kInternalError;
};
}
class GRPC_MUST_USE_RESULT Http2Status {
public:
// Classifying if an error is a stream error or a connection Http2Status must
// be done at the time of error object creation. Once the Http2Status object
// is created, its Http2ErrorType is immutable. This is intentional.
enum class Http2ErrorType : uint8_t {
kOk = 0x0,
kConnectionError = 0x1,
kStreamError = 0x2,
};
static Http2Status Ok() { return Http2Status(); }
// To pass message :
// Either pass a absl::string_view which is then copied into a std::string.
// Or, pass a local std::string using std::move
static Http2Status Http2ConnectionError(const Http2ErrorCode error_code,
std::string message) {
return Http2Status(error_code, Http2ErrorType::kConnectionError, message);
}
static Http2Status Http2StreamError(const Http2ErrorCode error_code,
std::string message) {
return Http2Status(error_code, Http2ErrorType::kStreamError, message);
}
static Http2Status AbslConnectionError(const absl::StatusCode code,
std::string message) {
return Http2Status(code, Http2ErrorType::kConnectionError, message);
}
static Http2Status AbslStreamError(const absl::StatusCode code,
std::string message) {
return Http2Status(code, Http2ErrorType::kStreamError, message);
}
GRPC_MUST_USE_RESULT Http2ErrorType GetType() const { return error_type_; }
// We only expect to use this in 2 places
// 1. To know what error code to send in a HTTP2 GOAWAY frame.
// 2. In tests
// Any other usage is strongly discouraged.
GRPC_MUST_USE_RESULT Http2ErrorCode GetConnectionErrorCode() const {
switch (error_type_) {
case Http2ErrorType::kOk:
CHECK(false);
case Http2ErrorType::kConnectionError:
return http2_code_;
case Http2ErrorType::kStreamError:
CHECK(false);
default:
CHECK(false);
}
GPR_UNREACHABLE_CODE(return http2_code_);
}
// We only expect to use this in 2 places
// 1. To know what error code to send in a HTTP2 RST_STREAM.
// 2. In tests
// Any other usage is strongly discouraged.
GRPC_MUST_USE_RESULT Http2ErrorCode GetStreamErrorCode() const {
switch (error_type_) {
case Http2ErrorType::kOk:
CHECK(false);
case Http2ErrorType::kConnectionError:
CHECK(false);
case Http2ErrorType::kStreamError:
return http2_code_;
default:
CHECK(false);
}
GPR_UNREACHABLE_CODE(return http2_code_);
}
// If an error code needs to be used along with promises, or passed out of the
// transport, this function should be used.
GRPC_MUST_USE_RESULT absl::Status GetAbslConnectionError() const {
switch (error_type_) {
case Http2ErrorType::kOk:
CHECK(false);
case Http2ErrorType::kConnectionError:
return AbslError();
case Http2ErrorType::kStreamError:
CHECK(false);
default:
CHECK(false);
}
GPR_UNREACHABLE_CODE(return absl::OkStatus());
}
// If an error code needs to be used along with promises, or passed out of the
// transport, this function should be used.
GRPC_MUST_USE_RESULT absl::Status GetAbslStreamError() const {
switch (error_type_) {
case Http2ErrorType::kOk:
CHECK(false);
case Http2ErrorType::kConnectionError:
CHECK(false);
case Http2ErrorType::kStreamError:
return AbslError();
default:
CHECK(false);
}
GPR_UNREACHABLE_CODE(return absl::OkStatus());
}
bool IsOk() const { return (http2_code_ == Http2ErrorCode::kNoError); }
std::string DebugString() const {
return absl::StrCat(DebugGetType(), ": {Error Code:", DebugGetCode(),
", Message:", message_, "}");
}
~Http2Status() = default;
template <typename Sink>
friend void AbslStringify(Sink& sink, const Http2Status& frame) {
sink.Append(frame.DebugString());
}
Http2Status(Http2Status&& move_status) = default;
// Our http2_code_ code is a const, which makes an assignment illegal.
Http2Status& operator=(Http2Status&& rhs) = delete;
Http2Status(const Http2Status&) = delete;
Http2Status& operator=(const Http2Status&) = delete;
private:
explicit Http2Status()
: http2_code_(Http2ErrorCode::kNoError),
error_type_(Http2ErrorType::kOk),
absl_code_(absl::StatusCode::kOk) {
Validate();
}
explicit Http2Status(const absl::StatusCode code, const Http2ErrorType type,
std::string& message)
: http2_code_((code == absl::StatusCode::kOk)
? Http2ErrorCode::kNoError
: Http2ErrorCode::kInternalError),
error_type_(type),
absl_code_(code),
message_(std::move(message)) {
Validate();
}
explicit Http2Status(const Http2ErrorCode code, const Http2ErrorType type,
std::string& message)
: http2_code_(code),
error_type_(type),
absl_code_(ErrorCodeToAbslStatusCode(http2_code_)),
message_(std::move(message)) {
Validate();
}
absl::Status AbslError() const {
if (IsOk()) return absl::OkStatus();
return absl::Status(absl_code_, message_);
}
void Validate() const {
DCHECK((http2_code_ == Http2ErrorCode::kNoError &&
error_type_ == Http2ErrorType::kOk &&
absl_code_ == absl::StatusCode::kOk) ||
(http2_code_ > Http2ErrorCode::kNoError &&
error_type_ > Http2ErrorType::kOk &&
absl_code_ != absl::StatusCode::kOk));
DCHECK((IsOk() && message_.empty()) || (!IsOk() && !message_.empty()));
}
std::string DebugGetType() const {
switch (error_type_) {
case Http2ErrorType::kOk:
return "Ok";
case Http2ErrorType::kConnectionError:
return "Connection Error";
case Http2ErrorType::kStreamError:
return "Stream Error";
default:
DCHECK(false);
}
GPR_UNREACHABLE_CODE(return "Invalid");
}
std::string DebugGetCode() const {
switch (http2_code_) {
case Http2ErrorCode::kNoError:
return "NO_ERROR";
case Http2ErrorCode::kProtocolError:
return "PROTOCOL_ERROR";
case Http2ErrorCode::kInternalError:
return "INTERNAL_ERROR";
case Http2ErrorCode::kFlowControlError:
return "FLOW_CONTROL_ERROR";
case Http2ErrorCode::kSettingsTimeout:
return "SETTINGS_TIMEOUT";
case Http2ErrorCode::kStreamClosed:
return "STREAM_CLOSED";
case Http2ErrorCode::kFrameSizeError:
return "FRAME_SIZE_ERROR";
case Http2ErrorCode::kRefusedStream:
return "REFUSED_STREAM";
case Http2ErrorCode::kCancel:
return "CANCEL";
case Http2ErrorCode::kCompressionError:
return "COMPRESSION_ERROR";
case Http2ErrorCode::kConnectError:
return "CONNECT_ERROR";
case Http2ErrorCode::kEnhanceYourCalm:
return "ENHANCE_YOUR_CALM";
case Http2ErrorCode::kInadequateSecurity:
return "INADEQUATE_SECURITY";
case Http2ErrorCode::kDoNotUse:
return "HTTP_1_1_REQUIRED";
default:
DCHECK(false);
}
GPR_UNREACHABLE_CODE(return "Invalid");
}
const Http2ErrorCode http2_code_;
const Http2ErrorType error_type_;
const absl::StatusCode absl_code_;
std::string message_;
};
// We can add more methods and helpers as needed.
// This class is similar to ValueOrFailure but a more minamilasit version.
// Reference :
// https://github.com/grpc/grpc/blob/master/src/core/lib/promise/status_flag.h
// A value if an operation was successful, or a Http2Status if not.
template <typename T>
class GRPC_MUST_USE_RESULT ValueOrHttp2Status {
public:
// NOLINTNEXTLINE(google-explicit-constructor)
ValueOrHttp2Status(T value) : value_(std::move(value)) {
DCHECK(std::holds_alternative<T>(value_));
}
// NOLINTNEXTLINE(google-explicit-constructor)
ValueOrHttp2Status(Http2Status status) : value_(std::move(status)) {
DCHECK(std::holds_alternative<Http2Status>(value_));
CHECK(std::get<Http2Status>(value_).GetType() !=
Http2Status::Http2ErrorType::kOk);
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION const T& value() const {
DCHECK(std::holds_alternative<T>(value_));
return std::get<T>(value_);
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION T& value() {
DCHECK(std::holds_alternative<T>(value_));
return std::get<T>(value_);
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION static Http2Status TakeStatus(
ValueOrHttp2Status<T>&& status) {
DCHECK(std::holds_alternative<Http2Status>(status.value_));
return std::move(std::get<Http2Status>(status.value_));
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION bool IsOk() const {
return std::holds_alternative<T>(value_);
}
GRPC_MUST_USE_RESULT Http2Status::Http2ErrorType GetErrorType() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).GetType();
}
GRPC_MUST_USE_RESULT Http2ErrorCode GetConnectionErrorCode() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).GetConnectionErrorCode();
}
GRPC_MUST_USE_RESULT Http2ErrorCode GetStreamErrorCode() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).GetStreamErrorCode();
}
GRPC_MUST_USE_RESULT absl::Status GetAbslConnectionError() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).GetAbslConnectionError();
}
GRPC_MUST_USE_RESULT absl::Status GetAbslStreamError() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).GetAbslStreamError();
}
std::string DebugString() const {
DCHECK(std::holds_alternative<Http2Status>(value_));
return std::get<Http2Status>(value_).DebugString();
}
private:
std::variant<T, Http2Status> value_;
};
template <typename T>
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION inline T TakeValue(
ValueOrHttp2Status<T>&& value) {
return std::move(value.value());
}
} // namespace http2
} // namespace grpc_core

View File

@ -0,0 +1,252 @@
// Copyright 2025 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ZTRACE_COLLECTOR_H
#define GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ZTRACE_COLLECTOR_H
#include <cstdint>
#include <map>
#include <string>
#include "src/core/channelz/ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/frame.h"
namespace grpc_core {
namespace http2_ztrace_collector_detail {
class Config {
public:
explicit Config(std::map<std::string, std::string>) {}
template <typename T>
bool Finishes(const T&) {
return false;
}
};
} // namespace http2_ztrace_collector_detail
template <bool kRead>
struct H2DataTrace {
uint32_t stream_id;
bool end_stream;
uint32_t payload_length;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("DATA");
json["stream_id"] = Json::FromNumber(stream_id);
json["end_stream"] = Json::FromBool(end_stream);
json["payload_length"] = Json::FromNumber(payload_length);
}
};
template <bool kRead>
struct H2HeaderTrace {
uint32_t stream_id;
bool end_headers;
bool end_stream;
bool continuation;
uint32_t payload_length;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = continuation ? Json::FromString("CONTINUATION")
: Json::FromString("HEADERS");
json["stream_id"] = Json::FromNumber(stream_id);
json["end_headers"] = Json::FromBool(end_headers);
json["end_stream"] = Json::FromBool(end_stream);
json["payload_length"] = Json::FromNumber(payload_length);
}
};
template <bool kRead>
struct H2RstStreamTrace {
uint32_t stream_id;
uint32_t error_code;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("RST_STREAM");
json["stream_id"] = Json::FromNumber(stream_id);
json["error_code"] = Json::FromNumber(error_code);
}
};
template <bool kRead>
struct H2SettingsTrace {
bool ack;
std::vector<Http2SettingsFrame::Setting> settings;
size_t MemoryUsage() const {
return sizeof(*this) +
sizeof(Http2SettingsFrame::Setting) * settings.size();
}
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("SETTINGS");
json["ack"] = Json::FromBool(ack);
Json::Array settings_array;
for (const auto& setting : settings) {
Json::Object setting_object;
setting_object["id"] = Json::FromNumber(setting.id);
setting_object["value"] = Json::FromNumber(setting.value);
settings_array.push_back(Json::FromObject(std::move(setting_object)));
}
json["settings"] = Json::FromArray(std::move(settings_array));
}
};
template <bool kRead>
struct H2PingTrace {
bool ack;
uint64_t opaque;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("PING");
json["ack"] = Json::FromBool(ack);
json["opaque"] = Json::FromNumber(opaque);
}
};
template <bool kRead>
struct H2GoAwayTrace {
uint32_t last_stream_id;
uint32_t error_code;
std::string debug_data;
size_t MemoryUsage() const { return sizeof(*this) + debug_data.size(); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("GOAWAY");
json["last_stream_id"] = Json::FromNumber(last_stream_id);
json["error_code"] = Json::FromNumber(error_code);
json["debug_data"] = Json::FromString(debug_data);
}
};
template <bool kRead>
struct H2WindowUpdateTrace {
uint32_t stream_id;
uint32_t window_size_increment;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("WINDOW_UPDATE");
json["stream_id"] = Json::FromNumber(stream_id);
json["window_size_increment"] = Json::FromNumber(window_size_increment);
}
};
template <bool kRead>
struct H2SecurityTrace {
uint32_t payload_length;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["read"] = Json::FromBool(kRead);
json["frame_type"] = Json::FromString("SECURITY");
json["payload_length"] = Json::FromNumber(payload_length);
}
};
struct H2UnknownFrameTrace {
uint8_t type;
uint8_t flags;
uint32_t stream_id;
uint32_t payload_length;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["frame_type"] = Json::FromString("UNKNOWN");
json["type"] = Json::FromNumber(type);
json["flags"] = Json::FromNumber(flags);
json["stream_id"] = Json::FromNumber(stream_id);
json["payload_length"] = Json::FromNumber(payload_length);
}
};
struct H2FlowControlStall {
int64_t transport_window;
int64_t stream_window;
uint32_t stream_id;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["metadata_type"] = Json::FromString("FLOW_CONTROL_STALL");
json["transport_window"] = Json::FromNumber(transport_window);
json["stream_window"] = Json::FromNumber(stream_window);
json["stream_id"] = Json::FromNumber(stream_id);
}
};
struct H2BeginWriteCycle {
uint32_t target_size;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["metadata_type"] = Json::FromString("BEGIN_WRITE_CYCLE");
json["target_size"] = Json::FromNumber(target_size);
}
};
struct H2BeginEndpointWrite {
uint32_t write_size;
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["metadata_type"] = Json::FromString("BEGIN_ENDPOINT_WRITE");
json["write_size"] = Json::FromNumber(write_size);
}
};
struct H2EndWriteCycle {
size_t MemoryUsage() const { return sizeof(*this); }
void RenderJson(Json::Object& json) const {
json["metadata_type"] = Json::FromString("END_WRITE_CYCLE");
}
};
using Http2ZTraceCollector = channelz::ZTraceCollector<
http2_ztrace_collector_detail::Config, H2DataTrace<false>,
H2HeaderTrace<false>, H2RstStreamTrace<false>, H2SettingsTrace<false>,
H2PingTrace<false>, H2GoAwayTrace<false>, H2WindowUpdateTrace<false>,
H2SecurityTrace<false>, H2DataTrace<true>, H2HeaderTrace<true>,
H2RstStreamTrace<true>, H2SettingsTrace<true>, H2PingTrace<true>,
H2GoAwayTrace<true>, H2WindowUpdateTrace<true>, H2SecurityTrace<true>,
H2UnknownFrameTrace, H2FlowControlStall, H2BeginWriteCycle, H2EndWriteCycle,
H2BeginEndpointWrite>;
} // namespace grpc_core
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_ZTRACE_COLLECTOR_H

View File

@ -41,7 +41,6 @@
#include "src/core/call/metadata_batch.h"
#include "src/core/channelz/channelz.h"
#include "src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h"
#include "src/core/ext/transport/chttp2/transport/context_list_entry.h"
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
#include "src/core/ext/transport/chttp2/transport/frame_ping.h"
@ -52,6 +51,8 @@
#include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_parser.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/ping_abuse_policy.h"
#include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
@ -73,6 +74,8 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_framing_endpoint_extension.h"
#include "src/core/telemetry/call_tracer.h"
#include "src/core/telemetry/context_list_entry.h"
#include "src/core/telemetry/stats.h"
#include "src/core/util/bitset.h"
#include "src/core/util/debug_location.h"
#include "src/core/util/ref_counted.h"
@ -229,6 +232,21 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
bool is_client);
~grpc_chttp2_transport() override;
class ChannelzDataSource final : public grpc_core::channelz::DataSource {
public:
explicit ChannelzDataSource(grpc_chttp2_transport* transport)
: grpc_core::channelz::DataSource(transport->channelz_socket),
transport_(transport) {}
~ChannelzDataSource() { ResetDataSource(); }
void AddData(grpc_core::channelz::DataSink& sink) override;
std::unique_ptr<grpc_core::channelz::ZTrace> GetZTrace(
absl::string_view name) override;
private:
grpc_chttp2_transport* transport_;
};
void Orphan() override;
grpc_core::RefCountedPtr<grpc_chttp2_transport> Ref() {
@ -250,6 +268,10 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override;
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> GetSocketNode()
const override {
return channelz_socket;
}
void InitStream(grpc_stream* gs, grpc_stream_refcount* refcount,
const void* server_data, grpc_core::Arena* arena) override;
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override;
@ -275,7 +297,6 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine;
grpc_core::Combiner* combiner;
absl::BitGen bitgen;
// On the client side, when the transport is first created, the
// endpoint will already have been added to this pollset_set, and it
@ -471,7 +492,6 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
grpc_event_engine::experimental::EventEngine::TaskHandle
keepalive_ping_timer_handle =
grpc_event_engine::experimental::EventEngine::TaskHandle::kInvalid;
;
/// time duration in between pings
grpc_core::Duration keepalive_time;
/// Tracks any adjustments to the absolute timestamp of the next keepalive
@ -488,6 +508,7 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
uint32_t max_header_list_size_soft_limit = 0;
grpc_core::ContextList* context_list = nullptr;
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> channelz_socket;
std::unique_ptr<ChannelzDataSource> channelz_data_source;
uint32_t num_messages_in_next_write = 0;
/// The number of pending induced frames (SETTINGS_ACK, PINGS_ACK and
/// RST_STREAM) in the outgoing buffer (t->qbuf). If this number goes beyond
@ -560,6 +581,9 @@ struct grpc_chttp2_transport final : public grpc_core::FilterStackTransport,
grpc_core::Timestamp last_window_update_time =
grpc_core::Timestamp::InfPast();
grpc_core::Http2StatsCollector http2_stats;
grpc_core::Http2ZTraceCollector http2_ztrace_collector;
GPR_NO_UNIQUE_ADDRESS grpc_core::latent_see::Flow write_flow;
};
@ -670,6 +694,8 @@ struct grpc_chttp2_stream {
int64_t write_counter = 0;
grpc_core::Chttp2CallTracerWrapper call_tracer_wrapper;
// null by default, set by the transport data source upon first query
grpc_core::RefCountedPtr<grpc_core::channelz::CallNode> channelz_call_node;
// TODO(yashykt): Remove call_tracer field after transition to call v3. (See
// https://github.com/grpc/grpc/pull/38729 for more information.)
@ -707,8 +733,6 @@ struct grpc_chttp2_stream {
grpc_core::Timestamp::InfPast();
};
#define GRPC_ARG_PING_TIMEOUT_MS "grpc.http2.ping_timeout_ms"
// EXPERIMENTAL: provide protection against overloading a server with too many
// requests: wait for streams to be deallocated before they stop counting
// against MAX_CONCURRENT_STREAMS

View File

@ -0,0 +1,24 @@
//
//
// Copyright 2025 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#ifndef GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_CHANNEL_ARG_NAMES_H
#define GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_CHANNEL_ARG_NAMES_H
#define GRPC_ARG_PING_TIMEOUT_MS "grpc.http2.ping_timeout_ms"
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_CHANNEL_ARG_NAMES_H

View File

@ -55,6 +55,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_parser_table.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http2_status.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/ping_rate_policy.h"
@ -73,6 +74,7 @@
#include "src/core/telemetry/stats_data.h"
#include "src/core/util/random_early_detection.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/status_helper.h"
using grpc_core::HPackParser;
@ -520,6 +522,12 @@ static grpc_error_handle init_header_skip_frame_parser(
static grpc_error_handle init_non_header_skip_frame_parser(
grpc_chttp2_transport* t) {
t->http2_ztrace_collector.Append(grpc_core::H2UnknownFrameTrace{
t->incoming_frame_type,
t->incoming_frame_flags,
t->incoming_stream_id,
t->incoming_frame_size,
});
t->parser =
grpc_chttp2_transport::Parser{"skip_parser", skip_parser, nullptr};
return absl::OkStatus();
@ -623,6 +631,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
t->ping_rate_policy.ReceivedDataFrame();
grpc_core::SharedBitGen g;
// could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == nullptr) {
@ -656,10 +665,10 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
t->settings.acked().max_concurrent_streams())) {
++t->num_pending_induced_frames;
grpc_slice_buffer_add(
&t->qbuf,
grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream), nullptr));
&t->qbuf, grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream),
nullptr, &t->http2_ztrace_collector));
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
return init_header_skip_frame_parser(t, priority_type, is_eoh);
} else if (grpc_core::IsRqFastRejectEnabled() &&
@ -672,7 +681,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
&t->qbuf, grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kEnhanceYourCalm),
nullptr));
nullptr, &t->http2_ztrace_collector));
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
return init_header_skip_frame_parser(t, priority_type, is_eoh);
} else if (GPR_UNLIKELY(
@ -683,10 +692,10 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
// by refusing this stream.
++t->num_pending_induced_frames;
grpc_slice_buffer_add(
&t->qbuf,
grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream), nullptr));
&t->qbuf, grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream),
nullptr, &t->http2_ztrace_collector));
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
return init_header_skip_frame_parser(t, priority_type, is_eoh);
} else if (GPR_UNLIKELY(t->stream_map.size() >=
@ -694,16 +703,16 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
grpc_core::RandomEarlyDetection(
t->settings.local().max_concurrent_streams(),
t->settings.acked().max_concurrent_streams())
.Reject(t->stream_map.size(), t->bitgen))) {
.Reject(t->stream_map.size(), g))) {
// We are under the limit of max concurrent streams for the current
// setting, but are over the next value that will be advertised.
// Apply some backpressure by randomly not accepting new streams.
++t->num_pending_induced_frames;
grpc_slice_buffer_add(
&t->qbuf,
grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream), nullptr));
&t->qbuf, grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kRefusedStream),
nullptr, &t->http2_ztrace_collector));
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
return init_header_skip_frame_parser(t, priority_type, is_eoh);
} else if (t->sent_goaway_state == GRPC_CHTTP2_FINAL_GOAWAY_SENT ||
@ -730,7 +739,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
&t->qbuf, grpc_chttp2_rst_stream_create(
t->incoming_stream_id,
static_cast<uint32_t>(Http2ErrorCode::kEnhanceYourCalm),
nullptr));
nullptr, &t->http2_ztrace_collector));
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
t->last_new_stream_id = t->incoming_stream_id;
return init_header_skip_frame_parser(t, priority_type, is_eoh);
@ -972,17 +981,23 @@ grpc_error_handle grpc_chttp2_header_parser_parse(void* hpack_parser,
if (s != nullptr) {
s->call_tracer_wrapper.RecordIncomingBytes(
{0, 0, GRPC_SLICE_LENGTH(slice)});
call_tracer =
grpc_core::IsCallTracerTransportFixEnabled()
? s->call_tracer
: s->arena->GetContext<grpc_core::CallTracerAnnotationInterface>();
call_tracer = s->call_tracer;
}
grpc_error_handle error = parser->Parse(
slice, is_last != 0, absl::BitGenRef(t->bitgen), call_tracer);
grpc_core::SharedBitGen g;
grpc_error_handle error =
parser->Parse(slice, is_last != 0, absl::BitGenRef(g), call_tracer);
if (!error.ok()) {
return error;
}
if (is_last) {
t->http2_ztrace_collector.Append([t]() {
return grpc_core::H2HeaderTrace<true>{
t->incoming_stream_id,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0,
t->incoming_frame_type == GRPC_CHTTP2_FRAME_CONTINUATION,
t->incoming_frame_size};
});
// need to check for null stream: this can occur if we receive an invalid
// stream id on a header
if (s != nullptr) {

View File

@ -25,12 +25,6 @@
#include "src/core/lib/experiments/experiments.h"
#include "src/core/util/match.h"
// How many pings do we allow to be inflight at any given time?
// In older versions of gRPC this was implicitly 1.
// With the multiping experiment we allow this to rise to 100 by default.
// TODO(ctiller): consider making this public API
#define GRPC_ARG_HTTP2_MAX_INFLIGHT_PINGS "grpc.http2.max_inflight_pings"
namespace grpc_core {
namespace {
@ -64,9 +58,16 @@ void Chttp2PingRatePolicy::SetDefaults(const ChannelArgs& args) {
Chttp2PingRatePolicy::RequestSendPingResult
Chttp2PingRatePolicy::RequestSendPing(Duration next_allowed_ping_interval,
size_t inflight_pings) const {
if (max_inflight_pings_ > 0 &&
inflight_pings > static_cast<size_t>(max_inflight_pings_)) {
return TooManyRecentPings{};
if (max_inflight_pings_ > 0) {
if (!IsMaxInflightPingsStrictLimitEnabled()) {
if (inflight_pings > static_cast<size_t>(max_inflight_pings_)) {
return TooManyRecentPings{};
}
} else {
if (inflight_pings >= static_cast<size_t>(max_inflight_pings_)) {
return TooManyRecentPings{};
}
}
}
const Timestamp next_allowed_ping =
last_ping_sent_time_ + next_allowed_ping_interval;

View File

@ -27,6 +27,12 @@
namespace grpc_core {
// How many pings do we allow to be inflight at any given time?
// In older versions of gRPC this was implicitly 1.
// With the multiping experiment we allow this to rise to 100 by default.
// TODO(ctiller): consider making this public API
#define GRPC_ARG_HTTP2_MAX_INFLIGHT_PINGS "grpc.http2.max_inflight_pings"
class Chttp2PingRatePolicy {
public:
explicit Chttp2PingRatePolicy(const ChannelArgs& args, bool is_client);

View File

@ -38,7 +38,6 @@
#include "src/core/channelz/channelz.h"
#include "src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/context_list_entry.h"
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame_data.h"
#include "src/core/ext/transport/chttp2/transport/frame_ping.h"
@ -48,6 +47,7 @@
#include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http2_status.h"
#include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
@ -64,11 +64,13 @@
#include "src/core/lib/transport/bdp_estimator.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/telemetry/call_tracer.h"
#include "src/core/telemetry/context_list_entry.h"
#include "src/core/telemetry/stats.h"
#include "src/core/telemetry/stats_data.h"
#include "src/core/util/match.h"
#include "src/core/util/ref_counted.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/shared_bit_gen.h"
#include "src/core/util/time.h"
#include "src/core/util/useful.h"
@ -125,7 +127,10 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->ping_callbacks.pings_inflight()),
[t](grpc_core::Chttp2PingRatePolicy::SendGranted) {
t->ping_rate_policy.SentPing();
const uint64_t id = t->ping_callbacks.StartPing(t->bitgen);
grpc_core::SharedBitGen g;
const uint64_t id = t->ping_callbacks.StartPing(g);
t->http2_ztrace_collector.Append(
grpc_core::H2PingTrace<false>{false, id});
grpc_slice_buffer_add(t->outbuf.c_slice_buffer(),
grpc_chttp2_ping_create(false, id));
t->keepalive_incoming_data_wanted = true;
@ -253,13 +258,16 @@ namespace {
class WriteContext {
public:
explicit WriteContext(grpc_chttp2_transport* t) : t_(t) {
grpc_core::global_stats().IncrementHttp2WritesBegun();
grpc_core::global_stats().IncrementHttp2WriteTargetSize(target_write_size_);
t->http2_stats.IncrementHttp2WritesBegun();
t->http2_stats.IncrementHttp2WriteTargetSize(target_write_size_);
}
void FlushSettings() {
auto update = t_->settings.MaybeSendUpdate();
if (update.has_value()) {
t_->http2_ztrace_collector.Append([&update]() {
return grpc_core::H2SettingsTrace<false>{false, update->settings};
});
grpc_core::Http2Frame frame(std::move(*update));
Serialize(absl::Span<grpc_core::Http2Frame>(&frame, 1), t_->outbuf);
if (t_->keepalive_timeout != grpc_core::Duration::Infinity()) {
@ -290,6 +298,8 @@ class WriteContext {
uint32_t transport_announce = t_->flow_control.MaybeSendUpdate(
t_->outbuf.c_slice_buffer()->count > 0);
if (transport_announce) {
t_->http2_ztrace_collector.Append(
grpc_core::H2WindowUpdateTrace<false>{0, transport_announce});
grpc_slice_buffer_add(
t_->outbuf.c_slice_buffer(),
grpc_chttp2_window_update_create(0, transport_announce, nullptr));
@ -303,6 +313,8 @@ class WriteContext {
// delayed by crypto operations.
target_write_size_ = 0;
for (size_t i = 0; i < t_->ping_ack_count; i++) {
t_->http2_ztrace_collector.Append(
grpc_core::H2PingTrace<false>{true, t_->ping_acks[i]});
grpc_slice_buffer_add(t_->outbuf.c_slice_buffer(),
grpc_chttp2_ping_create(true, t_->ping_acks[i]));
}
@ -386,11 +398,14 @@ class DataSendContext {
}
uint32_t max_outgoing() const {
return grpc_core::Clamp<uint32_t>(
return grpc_core::Clamp<int64_t>(
std::min<int64_t>(
{t_->settings.peer().max_frame_size(), stream_remote_window(),
t_->flow_control.remote_window(),
static_cast<int64_t>(write_context_->target_write_size())}),
static_cast<int64_t>(write_context_->target_write_size()) -
(grpc_core::IsChttp2BoundWriteSizeEnabled()
? static_cast<int64_t>(t_->outbuf.Length())
: static_cast<int64_t>(0))}),
0, std::numeric_limits<uint32_t>::max());
}
@ -405,6 +420,7 @@ class DataSendContext {
s_->send_trailing_metadata->empty();
grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, send_bytes,
is_last_frame_, &s_->call_tracer_wrapper,
&t_->http2_ztrace_collector,
t_->outbuf.c_slice_buffer());
sfc_upd_.SentData(send_bytes);
s_->sending_bytes += send_bytes;
@ -463,7 +479,7 @@ class StreamWriteContext {
t_->settings.peer()
.allow_true_binary_metadata(), // use_true_binary_metadata
t_->settings.peer().max_frame_size(), // max_frame_size
&s_->call_tracer_wrapper},
&s_->call_tracer_wrapper, &t_->http2_ztrace_collector},
*s_->send_initial_metadata, t_->outbuf.c_slice_buffer());
grpc_chttp2_reset_ping_clock(t_);
write_context_->IncInitialMetadataWrites();
@ -475,30 +491,16 @@ class StreamWriteContext {
grpc_chttp2_complete_closure_step(t_, &s_->send_initial_metadata_finished,
absl::OkStatus(),
"send_initial_metadata_finished");
if (!grpc_core::IsCallTracerTransportFixEnabled()) {
if (s_->parent_call_tracer != nullptr) {
grpc_core::HttpAnnotation::WriteStats write_stats;
write_stats.target_write_size = write_context_->target_write_size();
s_->parent_call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(
grpc_core::HttpAnnotation::Type::kHeadWritten,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats())
.Add(write_stats));
}
} else {
if (s_->call_tracer != nullptr && s_->call_tracer->IsSampled()) {
grpc_core::HttpAnnotation::WriteStats write_stats;
write_stats.target_write_size = write_context_->target_write_size();
s_->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(
grpc_core::HttpAnnotation::Type::kHeadWritten,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats())
.Add(write_stats));
}
if (s_->call_tracer != nullptr && s_->call_tracer->IsSampled()) {
grpc_core::HttpAnnotation::WriteStats write_stats;
write_stats.target_write_size = write_context_->target_write_size();
s_->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(
grpc_core::HttpAnnotation::Type::kHeadWritten,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats())
.Add(write_stats));
}
}
@ -509,6 +511,8 @@ class StreamWriteContext {
const uint32_t stream_announce = s_->flow_control.MaybeSendUpdate();
if (stream_announce == 0) return;
t_->http2_ztrace_collector.Append(
grpc_core::H2WindowUpdateTrace<false>{s_->id, stream_announce});
grpc_slice_buffer_add(
t_->outbuf.c_slice_buffer(),
grpc_chttp2_window_update_create(s_->id, stream_announce,
@ -528,13 +532,23 @@ class StreamWriteContext {
if (!data_send_context.AnyOutgoing()) {
if (t_->flow_control.remote_window() <= 0) {
t_->http2_ztrace_collector.Append(grpc_core::H2FlowControlStall{
t_->flow_control.remote_window(),
data_send_context.stream_remote_window(), s_->id});
grpc_core::global_stats().IncrementHttp2TransportStalls();
report_stall(t_, s_, "transport");
grpc_chttp2_list_add_stalled_by_transport(t_, s_);
} else if (data_send_context.stream_remote_window() <= 0) {
t_->http2_ztrace_collector.Append(grpc_core::H2FlowControlStall{
t_->flow_control.remote_window(),
data_send_context.stream_remote_window(), s_->id});
grpc_core::global_stats().IncrementHttp2StreamStalls();
report_stall(t_, s_, "stream");
grpc_chttp2_list_add_stalled_by_stream(t_, s_);
} else if (grpc_core::IsChttp2BoundWriteSizeEnabled()) {
GRPC_CHTTP2_STREAM_REF(s_, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t_, s_);
stream_became_writable_ = true;
}
return; // early out: nothing to do
}
@ -566,12 +580,14 @@ class StreamWriteContext {
if (s_->send_trailing_metadata->empty()) {
grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true,
&s_->call_tracer_wrapper,
&t_->http2_ztrace_collector,
t_->outbuf.c_slice_buffer());
} else {
t_->hpack_compressor.EncodeHeaders(
grpc_core::HPackCompressor::EncodeHeaderOptions{
s_->id, true, t_->settings.peer().allow_true_binary_metadata(),
t_->settings.peer().max_frame_size(), &s_->call_tracer_wrapper},
t_->settings.peer().max_frame_size(), &s_->call_tracer_wrapper,
&t_->http2_ztrace_collector},
*s_->send_trailing_metadata, t_->outbuf.c_slice_buffer());
}
write_context_->IncTrailingMetadataWrites();
@ -636,26 +652,16 @@ class StreamWriteContext {
t_->outbuf.c_slice_buffer(),
grpc_chttp2_rst_stream_create(
s_->id, static_cast<uint32_t>(Http2ErrorCode::kNoError),
&s_->call_tracer_wrapper));
&s_->call_tracer_wrapper, &t_->http2_ztrace_collector));
}
grpc_chttp2_mark_stream_closed(t_, s_, !t_->is_client, true,
absl::OkStatus());
if (!grpc_core::IsCallTracerTransportFixEnabled()) {
if (s_->parent_call_tracer != nullptr) {
s_->parent_call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats()));
}
} else {
if (s_->call_tracer != nullptr && s_->call_tracer->IsSampled()) {
s_->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats()));
}
if (s_->call_tracer != nullptr && s_->call_tracer->IsSampled()) {
s_->call_tracer->RecordAnnotation(
grpc_core::HttpAnnotation(grpc_core::HttpAnnotation::Type::kEnd,
gpr_now(GPR_CLOCK_REALTIME))
.Add(s_->t->flow_control.stats())
.Add(s_->flow_control.stats()));
}
}
@ -672,6 +678,10 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
int64_t outbuf_relative_start_pos = 0;
WriteContext ctx(t);
t->http2_ztrace_collector.Append(grpc_core::H2BeginWriteCycle{
static_cast<uint32_t>(ctx.target_write_size())});
ctx.FlushSettings();
ctx.FlushPingAcks();
ctx.FlushQueuedBuffers();
@ -734,6 +744,7 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
grpc_chttp2_stream* s;
t->write_flow.End();
t->http2_ztrace_collector.Append(grpc_core::H2EndWriteCycle{});
if (t->channelz_socket != nullptr) {
t->channelz_socket->RecordMessagesSent(t->num_messages_in_next_write);

View File

@ -73,6 +73,9 @@ class InprocServerTransport final : public ServerTransport {
ClientTransport* client_transport() override { return nullptr; }
ServerTransport* server_transport() override { return this; }
absl::string_view GetTransportName() const override { return "inproc"; }
RefCountedPtr<channelz::SocketNode> GetSocketNode() const override {
return nullptr;
}
void SetPollset(grpc_stream*, grpc_pollset*) override {}
void SetPollsetSet(grpc_stream*, grpc_pollset_set*) override {}
void PerformOp(grpc_transport_op* op) override {
@ -212,6 +215,9 @@ class InprocClientTransport final : public ClientTransport {
ClientTransport* client_transport() override { return this; }
ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override { return "inproc"; }
RefCountedPtr<channelz::SocketNode> GetSocketNode() const override {
return nullptr;
}
void SetPollset(grpc_stream*, grpc_pollset*) override {}
void SetPollsetSet(grpc_stream*, grpc_pollset_set*) override {}
void PerformOp(grpc_transport_op*) override { Crash("unimplemented"); }
@ -258,8 +264,7 @@ RefCountedPtr<Channel> MakeInprocChannel(Server* server,
server->SetupTransport(server_transport.get(), nullptr,
server->channel_args()
.Remove(GRPC_ARG_MAX_CONNECTION_IDLE_MS)
.Remove(GRPC_ARG_MAX_CONNECTION_AGE_MS),
nullptr);
.Remove(GRPC_ARG_MAX_CONNECTION_AGE_MS));
if (!error.ok()) {
return MakeLameChannel("Failed to create server channel", std::move(error));
}

View File

@ -124,6 +124,10 @@ struct inproc_transport final : public grpc_core::FilterStackTransport {
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override;
void PerformOp(grpc_transport_op* op) override;
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> GetSocketNode()
const override {
return nullptr;
}
size_t SizeOfStream() const override;
bool HackyDisableStreamOpBatchCoalescingInConnectedChannel() const override {
@ -1264,8 +1268,8 @@ grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
inproc_transports_create(&server_transport, &client_transport);
// TODO(ncteisen): design and support channelz GetSocket for inproc.
grpc_error_handle error = core_server->SetupTransport(
server_transport, nullptr, server_args, nullptr);
grpc_error_handle error =
core_server->SetupTransport(server_transport, nullptr, server_args);
grpc_channel* channel = nullptr;
if (error.ok()) {
auto new_channel = grpc_core::ChannelCreate(

View File

@ -698,6 +698,10 @@ UPB_INLINE envoy_admin_v3_SubjectAlternateName_name_oneofcases envoy_admin_v3_Su
return (envoy_admin_v3_SubjectAlternateName_name_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_admin_v3_SubjectAlternateName_clear_name(envoy_admin_v3_SubjectAlternateName* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__admin__v3__SubjectAlternateName_msg_init, &field);
}
UPB_INLINE void envoy_admin_v3_SubjectAlternateName_clear_dns(envoy_admin_v3_SubjectAlternateName* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);

View File

@ -127,6 +127,10 @@ UPB_INLINE envoy_config_accesslog_v3_AccessLog_config_type_oneofcases envoy_conf
return (envoy_config_accesslog_v3_AccessLog_config_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_accesslog_v3_AccessLog_clear_config_type(envoy_config_accesslog_v3_AccessLog* msg) {
const upb_MiniTableField field = {4, UPB_SIZE(20, 40), UPB_SIZE(-17, -13), 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__accesslog__v3__AccessLog_msg_init, &field);
}
UPB_INLINE void envoy_config_accesslog_v3_AccessLog_clear_name(envoy_config_accesslog_v3_AccessLog* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(24, 16), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -262,6 +266,10 @@ UPB_INLINE envoy_config_accesslog_v3_AccessLogFilter_filter_specifier_oneofcases
return (envoy_config_accesslog_v3_AccessLogFilter_filter_specifier_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_accesslog_v3_AccessLogFilter_clear_filter_specifier(envoy_config_accesslog_v3_AccessLogFilter* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__accesslog__v3__AccessLogFilter_msg_init, &field);
}
UPB_INLINE void envoy_config_accesslog_v3_AccessLogFilter_clear_status_code_filter(envoy_config_accesslog_v3_AccessLogFilter* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -1797,6 +1805,10 @@ UPB_INLINE envoy_config_accesslog_v3_ExtensionFilter_config_type_oneofcases envo
return (envoy_config_accesslog_v3_ExtensionFilter_config_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_accesslog_v3_ExtensionFilter_clear_config_type(envoy_config_accesslog_v3_ExtensionFilter* msg) {
const upb_MiniTableField field = {3, UPB_SIZE(12, 32), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__accesslog__v3__ExtensionFilter_msg_init, &field);
}
UPB_INLINE void envoy_config_accesslog_v3_ExtensionFilter_clear_name(envoy_config_accesslog_v3_ExtensionFilter* msg) {
const upb_MiniTableField field = {1, 16, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);

View File

@ -52,7 +52,6 @@ typedef struct envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig { upb_Me
typedef struct envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat;
typedef struct envoy_config_bootstrap_v3_Bootstrap_DeferredStatOptions { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_Bootstrap_DeferredStatOptions;
typedef struct envoy_config_bootstrap_v3_Bootstrap_GrpcAsyncClientManagerConfig { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_Bootstrap_GrpcAsyncClientManagerConfig;
typedef struct envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry;
typedef struct envoy_config_bootstrap_v3_Admin { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_Admin;
typedef struct envoy_config_bootstrap_v3_ClusterManager { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_ClusterManager;
typedef struct envoy_config_bootstrap_v3_ClusterManager_OutlierDetection { upb_Message UPB_PRIVATE(base); } envoy_config_bootstrap_v3_ClusterManager_OutlierDetection;
@ -152,6 +151,10 @@ UPB_INLINE envoy_config_bootstrap_v3_Bootstrap_stats_flush_oneofcases envoy_conf
return (envoy_config_bootstrap_v3_Bootstrap_stats_flush_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_Bootstrap_clear_stats_flush(envoy_config_bootstrap_v3_Bootstrap* msg) {
const upb_MiniTableField field = {29, 14, UPB_SIZE(-145, -17), kUpb_NoSub, 8, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_1Byte << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__bootstrap__v3__Bootstrap_msg_init, &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_Bootstrap_clear_node(envoy_config_bootstrap_v3_Bootstrap* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(16, 88), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -1992,6 +1995,10 @@ UPB_INLINE envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat_lo
return (envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat_log_format_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat_clear_log_format(envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__bootstrap__v3__Bootstrap__ApplicationLogConfig__LogFormat_msg_init, &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat_clear_json_format(envoy_config_bootstrap_v3_Bootstrap_ApplicationLogConfig_LogFormat* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -2166,27 +2173,6 @@ UPB_INLINE struct google_protobuf_Duration* envoy_config_bootstrap_v3_Bootstrap_
return sub;
}
/* envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry */
UPB_INLINE upb_StringView envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry_key(const envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry* msg) {
upb_StringView ret;
_upb_msg_map_key(msg, &ret, 0);
return ret;
}
UPB_INLINE const struct envoy_config_core_v3_TypedExtensionConfig* envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry_value(const envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry* msg) {
struct envoy_config_core_v3_TypedExtensionConfig* ret;
_upb_msg_map_value(msg, &ret, sizeof(ret));
return ret;
}
UPB_INLINE bool envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry_has_value(const envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry* msg) {
const upb_MiniTableField field = {2, 32, 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
return upb_Message_HasBaseField(UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry_set_value(envoy_config_bootstrap_v3_Bootstrap_CertificateProviderInstancesEntry *msg, struct envoy_config_core_v3_TypedExtensionConfig* value) {
_upb_msg_map_set_value(msg, &value, sizeof(struct envoy_config_core_v3_TypedExtensionConfig*));
}
/* envoy.config.bootstrap.v3.Admin */
UPB_INLINE envoy_config_bootstrap_v3_Admin* envoy_config_bootstrap_v3_Admin_new(upb_Arena* arena) {
@ -3380,6 +3366,10 @@ UPB_INLINE envoy_config_bootstrap_v3_RuntimeLayer_layer_specifier_oneofcases env
return (envoy_config_bootstrap_v3_RuntimeLayer_layer_specifier_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_RuntimeLayer_clear_layer_specifier(envoy_config_bootstrap_v3_RuntimeLayer* msg) {
const upb_MiniTableField field = {2, UPB_SIZE(12, 32), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__bootstrap__v3__RuntimeLayer_msg_init, &field);
}
UPB_INLINE void envoy_config_bootstrap_v3_RuntimeLayer_clear_name(envoy_config_bootstrap_v3_RuntimeLayer* msg) {
const upb_MiniTableField field = {1, 16, 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);

View File

@ -64,7 +64,6 @@ typedef struct envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLb
typedef struct envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig;
typedef struct envoy_config_cluster_v3_Cluster_RefreshRate { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_Cluster_RefreshRate;
typedef struct envoy_config_cluster_v3_Cluster_PreconnectPolicy { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_Cluster_PreconnectPolicy;
typedef struct envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry;
typedef struct envoy_config_cluster_v3_LoadBalancingPolicy { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_LoadBalancingPolicy;
typedef struct envoy_config_cluster_v3_LoadBalancingPolicy_Policy { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_LoadBalancingPolicy_Policy;
typedef struct envoy_config_cluster_v3_UpstreamConnectionOptions { upb_Message UPB_PRIVATE(base); } envoy_config_cluster_v3_UpstreamConnectionOptions;
@ -277,6 +276,10 @@ UPB_INLINE envoy_config_cluster_v3_Cluster_cluster_discovery_type_oneofcases env
return (envoy_config_cluster_v3_Cluster_cluster_discovery_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_clear_cluster_discovery_type(envoy_config_cluster_v3_Cluster* msg) {
const upb_MiniTableField field = {2, UPB_SIZE(176, 352), UPB_SIZE(-173, -33), kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__cluster__v3__Cluster_msg_init, &field);
}
typedef enum {
envoy_config_cluster_v3_Cluster_lb_config_ring_hash_lb_config = 23,
envoy_config_cluster_v3_Cluster_lb_config_maglev_lb_config = 52,
@ -290,6 +293,10 @@ UPB_INLINE envoy_config_cluster_v3_Cluster_lb_config_oneofcases envoy_config_clu
return (envoy_config_cluster_v3_Cluster_lb_config_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_clear_lb_config(envoy_config_cluster_v3_Cluster* msg) {
const upb_MiniTableField field = {23, UPB_SIZE(184, 360), UPB_SIZE(-181, -37), 14, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__cluster__v3__Cluster_msg_init, &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_clear_name(envoy_config_cluster_v3_Cluster* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(188, 40), 0, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -3315,6 +3322,10 @@ UPB_INLINE envoy_config_cluster_v3_Cluster_CommonLbConfig_locality_config_specif
return (envoy_config_cluster_v3_Cluster_CommonLbConfig_locality_config_specifier_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_CommonLbConfig_clear_locality_config_specifier(envoy_config_cluster_v3_Cluster_CommonLbConfig* msg) {
const upb_MiniTableField field = {2, UPB_SIZE(32, 48), UPB_SIZE(-29, -13), 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__cluster__v3__Cluster__CommonLbConfig_msg_init, &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_CommonLbConfig_clear_healthy_panic_threshold(envoy_config_cluster_v3_Cluster_CommonLbConfig* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -3961,27 +3972,6 @@ UPB_INLINE struct google_protobuf_DoubleValue* envoy_config_cluster_v3_Cluster_P
return sub;
}
/* envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry */
UPB_INLINE upb_StringView envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry_key(const envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry* msg) {
upb_StringView ret;
_upb_msg_map_key(msg, &ret, 0);
return ret;
}
UPB_INLINE const struct google_protobuf_Any* envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry_value(const envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry* msg) {
struct google_protobuf_Any* ret;
_upb_msg_map_value(msg, &ret, sizeof(ret));
return ret;
}
UPB_INLINE bool envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry_has_value(const envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry* msg) {
const upb_MiniTableField field = {2, 32, 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
return upb_Message_HasBaseField(UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry_set_value(envoy_config_cluster_v3_Cluster_TypedExtensionProtocolOptionsEntry *msg, struct google_protobuf_Any* value) {
_upb_msg_map_set_value(msg, &value, sizeof(struct google_protobuf_Any*));
}
/* envoy.config.cluster.v3.LoadBalancingPolicy */
UPB_INLINE envoy_config_cluster_v3_LoadBalancingPolicy* envoy_config_cluster_v3_LoadBalancingPolicy_new(upb_Arena* arena) {

View File

@ -35,7 +35,6 @@ typedef struct envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_Pred
typedef struct envoy_config_common_matcher_v3_Matcher_MatcherList_FieldMatcher { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_Matcher_MatcherList_FieldMatcher;
typedef struct envoy_config_common_matcher_v3_Matcher_MatcherTree { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_Matcher_MatcherTree;
typedef struct envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap;
typedef struct envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry;
typedef struct envoy_config_common_matcher_v3_MatchPredicate { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_MatchPredicate;
typedef struct envoy_config_common_matcher_v3_MatchPredicate_MatchSet { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_MatchPredicate_MatchSet;
typedef struct envoy_config_common_matcher_v3_HttpHeadersMatch { upb_Message UPB_PRIVATE(base); } envoy_config_common_matcher_v3_HttpHeadersMatch;
@ -93,6 +92,10 @@ UPB_INLINE envoy_config_common_matcher_v3_Matcher_matcher_type_oneofcases envoy_
return (envoy_config_common_matcher_v3_Matcher_matcher_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_clear_matcher_type(envoy_config_common_matcher_v3_Matcher* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(20, 24), UPB_SIZE(-17, -13), 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__Matcher_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_clear_matcher_list(envoy_config_common_matcher_v3_Matcher* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(20, 24), UPB_SIZE(-17, -13), 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -231,6 +234,10 @@ UPB_INLINE envoy_config_common_matcher_v3_Matcher_OnMatch_on_match_oneofcases en
return (envoy_config_common_matcher_v3_Matcher_OnMatch_on_match_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_OnMatch_clear_on_match(envoy_config_common_matcher_v3_Matcher_OnMatch* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__Matcher__OnMatch_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_OnMatch_clear_matcher(envoy_config_common_matcher_v3_Matcher_OnMatch* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -446,6 +453,10 @@ UPB_INLINE envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_match_ty
return (envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_match_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_clear_match_type(envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__Matcher__MatcherList__Predicate_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_clear_single_predicate(envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -614,6 +625,10 @@ UPB_INLINE envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePr
return (envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate_matcher_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate_clear_matcher(envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate* msg) {
const upb_MiniTableField field = {2, UPB_SIZE(20, 24), UPB_SIZE(-17, -13), 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__Matcher__MatcherList__Predicate__SinglePredicate_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate_clear_input(envoy_config_common_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -956,6 +971,10 @@ UPB_INLINE envoy_config_common_matcher_v3_Matcher_MatcherTree_tree_type_oneofcas
return (envoy_config_common_matcher_v3_Matcher_MatcherTree_tree_type_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherTree_clear_tree_type(envoy_config_common_matcher_v3_Matcher_MatcherTree* msg) {
const upb_MiniTableField field = {2, UPB_SIZE(20, 24), UPB_SIZE(-17, -13), 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__Matcher__MatcherTree_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherTree_clear_input(envoy_config_common_matcher_v3_Matcher_MatcherTree* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -1180,27 +1199,6 @@ UPB_INLINE bool envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_map_
return _upb_Map_Delete(map, &key, 0, NULL);
}
/* envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry */
UPB_INLINE upb_StringView envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry_key(const envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry* msg) {
upb_StringView ret;
_upb_msg_map_key(msg, &ret, 0);
return ret;
}
UPB_INLINE const envoy_config_common_matcher_v3_Matcher_OnMatch* envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry_value(const envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry* msg) {
envoy_config_common_matcher_v3_Matcher_OnMatch* ret;
_upb_msg_map_value(msg, &ret, sizeof(ret));
return ret;
}
UPB_INLINE bool envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry_has_value(const envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry* msg) {
const upb_MiniTableField field = {2, 32, 64, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
return upb_Message_HasBaseField(UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry_set_value(envoy_config_common_matcher_v3_Matcher_MatcherTree_MatchMap_MapEntry *msg, envoy_config_common_matcher_v3_Matcher_OnMatch* value) {
_upb_msg_map_set_value(msg, &value, sizeof(envoy_config_common_matcher_v3_Matcher_OnMatch*));
}
/* envoy.config.common.matcher.v3.MatchPredicate */
UPB_INLINE envoy_config_common_matcher_v3_MatchPredicate* envoy_config_common_matcher_v3_MatchPredicate_new(upb_Arena* arena) {
@ -1255,6 +1253,10 @@ UPB_INLINE envoy_config_common_matcher_v3_MatchPredicate_rule_oneofcases envoy_c
return (envoy_config_common_matcher_v3_MatchPredicate_rule_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_MatchPredicate_clear_rule(envoy_config_common_matcher_v3_MatchPredicate* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__MatchPredicate_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_MatchPredicate_clear_or_match(envoy_config_common_matcher_v3_MatchPredicate* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -1924,6 +1926,10 @@ UPB_INLINE envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch_
return (envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch_rule_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch_clear_rule(envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__common__matcher__v3__HttpGenericBodyMatch__GenericTextMatch_msg_init, &field);
}
UPB_INLINE void envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch_clear_string_match(envoy_config_common_matcher_v3_HttpGenericBodyMatch_GenericTextMatch* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);

View File

@ -164,6 +164,10 @@ UPB_INLINE envoy_config_core_v3_EnvoyInternalAddress_address_name_specifier_oneo
return (envoy_config_core_v3_EnvoyInternalAddress_address_name_specifier_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_core_v3_EnvoyInternalAddress_clear_address_name_specifier(envoy_config_core_v3_EnvoyInternalAddress* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(20, 32), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__core__v3__EnvoyInternalAddress_msg_init, &field);
}
UPB_INLINE void envoy_config_core_v3_EnvoyInternalAddress_clear_server_listener_name(envoy_config_core_v3_EnvoyInternalAddress* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(20, 32), -9, kUpb_NoSub, 9, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -248,6 +252,10 @@ UPB_INLINE envoy_config_core_v3_SocketAddress_port_specifier_oneofcases envoy_co
return (envoy_config_core_v3_SocketAddress_port_specifier_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_core_v3_SocketAddress_clear_port_specifier(envoy_config_core_v3_SocketAddress* msg) {
const upb_MiniTableField field = {3, UPB_SIZE(36, 56), -17, kUpb_NoSub, 13, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__core__v3__SocketAddress_msg_init, &field);
}
UPB_INLINE void envoy_config_core_v3_SocketAddress_clear_protocol(envoy_config_core_v3_SocketAddress* msg) {
const upb_MiniTableField field = {1, 12, 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);
@ -956,6 +964,10 @@ UPB_INLINE envoy_config_core_v3_Address_address_oneofcases envoy_config_core_v3_
return (envoy_config_core_v3_Address_address_oneofcases)upb_Message_WhichOneofFieldNumber(
UPB_UPCAST(msg), &field);
}
UPB_INLINE void envoy_config_core_v3_Address_clear_address(envoy_config_core_v3_Address* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearOneof(UPB_UPCAST(msg), &envoy__config__core__v3__Address_msg_init, &field);
}
UPB_INLINE void envoy_config_core_v3_Address_clear_socket_address(envoy_config_core_v3_Address* msg) {
const upb_MiniTableField field = {1, UPB_SIZE(12, 16), -9, 0, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)};
upb_Message_ClearBaseField(UPB_UPCAST(msg), &field);

Some files were not shown because too many files have changed in this diff Show More