diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go new file mode 100644 index 000000000..213186976 --- /dev/null +++ b/internal/transport/keepalive_test.go @@ -0,0 +1,606 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains tests related to the following proposals: +// https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md +// https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md +// https://github.com/grpc/proposal/blob/master/A18-tcp-user-timeout.md +package transport + +import ( + "context" + "io" + "net" + "testing" + "time" + + "golang.org/x/net/http2" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" +) + +// TestMaxConnectionIdle tests that a server will send GoAway to an idle +// client. An idle client is one who doesn't make any RPC calls for a duration +// of MaxConnectionIdle time. +func TestMaxConnectionIdle(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionIdle: 2 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + stream, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + client.CloseStream(stream, io.EOF) + + // Wait for the server's MaxConnectionIdle timeout to kick in, and for it + // to send a GoAway. + timeout := time.NewTimer(time.Second * 4) + select { + case <-client.Error(): + if !timeout.Stop() { + <-timeout.C + } + if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) + } + case <-timeout.C: + t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") + } +} + +// TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to +// a busy client. +func TestMaxConnectionIdleBusyClient(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionIdle: 2 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + _, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + + // Wait for double the MaxConnectionIdle time to make sure the server does + // not send a GoAway, as the client has an open stream. + timeout := time.NewTimer(time.Second * 4) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + t.Fatalf("A non-idle client received a GoAway.") + case <-timeout.C: + } +} + +// TestMaxConnectionAge tests that a server will send GoAway after a duration +// of MaxConnectionAge. +func TestMaxConnectionAge(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionAge: 1 * time.Second, + MaxConnectionAgeGrace: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + _, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + + // Wait for the server's MaxConnectionAge timeout to kick in, and for it + // to send a GoAway. + timeout := time.NewTimer(4 * time.Second) + select { + case <-client.Error(): + if !timeout.Stop() { + <-timeout.C + } + if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) + } + case <-timeout.C: + t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") + } +} + +const ( + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// TestKeepaliveServerClosesUnresponsiveClient tests that a server closes +// the connection with a client that doesn't respond to keepalive pings. +// +// This test creates a regular net.Conn connection to the server and sends the +// clientPreface and the initial Settings frame, and then remains unresponsive. +func TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + addr := server.addr() + conn, err := net.Dial("tcp", addr) + if err != nil { + t.Fatalf("net.Dial(tcp, %v) failed: %v", addr, err) + } + defer conn.Close() + + if n, err := conn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("conn.Write(clientPreface) failed: n=%v, err=%v", n, err) + } + framer := newFramer(conn, defaultWriteBufSize, defaultReadBufSize, 0) + if err := framer.fr.WriteSettings(http2.Setting{}); err != nil { + t.Fatal("framer.WriteSettings(http2.Setting{}) failed:", err) + } + framer.writer.Flush() + + // We read from the net.Conn till we get an error, which is expected when + // the server closes the connection as part of the keepalive logic. + errCh := make(chan error) + go func() { + b := make([]byte, 24) + for { + if _, err = conn.Read(b); err != nil { + errCh <- err + return + } + } + }() + + // Server waits for KeepaliveParams.Time seconds before sending out a ping, + // and then waits for KeepaliveParams.Timeout for a ping ack. + timeout := time.NewTimer(4 * time.Second) + select { + case err := <-errCh: + if err != io.EOF { + t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) + + } + case <-timeout.C: + t.Fatalf("keepalive timeout expired, server should have closed the connection.") + } +} + +// TestKeepaliveServerWithResponsiveClient tests that a server doesn't close +// the connection with a client that responds to keepalive pings. +func TestKeepaliveServerWithResponsiveClient(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + // Give keepalive logic some time by sleeping. + time.Sleep(4 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestKeepaliveClientClosesUnresponsiveServer creates a server which does not +// respond to keepalive pings, and makes sure that the client closes the +// transport once the keepalive logic kicks in. Here, we set the +// `PermitWithoutStream` parameter to true which ensures that the keepalive +// logic is running even without any active streams. +func TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { + connCh := make(chan net.Conn, 1) + client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }}, connCh) + defer cancel() + defer client.Close() + + conn, ok := <-connCh + if !ok { + t.Fatalf("Server didn't return connection object") + } + defer conn.Close() + + // Sleep for keepalive to close the connection. + time.Sleep(4 * time.Second) + + // Make sure the client transport is not healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { + t.Fatal("client.NewStream() should have failed, but succeeded") + } +} + +// TestKeepaliveClientOpenWithUnresponsiveServer creates a server which does +// not respond to keepalive pings, and makes sure that the client does not +// close the transport. Here, we do not set the `PermitWithoutStream` parameter +// to true which ensures that the keepalive logic is turned off without any +// active streams, and therefore the transport stays open. +func TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { + connCh := make(chan net.Conn, 1) + client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }}, connCh) + defer cancel() + defer client.Close() + + conn, ok := <-connCh + if !ok { + t.Fatalf("Server didn't return connection object") + } + defer conn.Close() + + // Give keepalive some time. + time.Sleep(4 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestKeepaliveClientClosesWithActiveStreams creates a server which does not +// respond to keepalive pings, and makes sure that the client closes the +// transport even when there is an active stream. +func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { + connCh := make(chan net.Conn, 1) + client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }}, connCh) + defer cancel() + defer client.Close() + + conn, ok := <-connCh + if !ok { + t.Fatalf("Server didn't return connection object") + } + defer conn.Close() + + // Create a stream, but send no data on it. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + + // Give keepalive some time. + time.Sleep(4 * time.Second) + + // Make sure the client transport is not healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { + t.Fatal("client.NewStream() should have failed, but succeeded") + } +} + +// TestKeepaliveClientStaysHealthyWithResponsiveServer creates a server which +// responds to keepalive pings, and makes sure than a client transport stays +// healthy without any active streams. +func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { + server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }}) + defer func() { + client.Close() + server.stop() + cancel() + }() + + // Give keepalive some time. + time.Sleep(4 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestKeepaliveServerEnforcementWithAbusiveClientNoRPC verifies that the +// server closes a client transport when it sends too many keepalive pings +// (when there are no active streams), based on the configured +// EnforcementPolicy. +func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) + defer func() { + client.Close() + server.stop() + cancel() + }() + + timeout := time.NewTimer(4 * time.Second) + select { + case <-client.Error(): + if !timeout.Stop() { + <-timeout.C + } + if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) + } + case <-timeout.C: + t.Fatalf("client transport still healthy; expected GoAway from the server.") + } + + // Make sure the client transport is not healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { + t.Fatal("client.NewStream() should have failed, but succeeded") + } +} + +// TestKeepaliveServerEnforcementWithAbusiveClientWithRPC verifies that the +// server closes a client transport when it sends too many keepalive pings +// (even when there is an active stream), based on the configured +// EnforcementPolicy. +func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) + defer func() { + client.Close() + server.stop() + cancel() + }() + + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + + timeout := time.NewTimer(4 * time.Second) + select { + case <-client.Error(): + if !timeout.Stop() { + <-timeout.C + } + if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) + } + case <-timeout.C: + t.Fatalf("client transport still healthy; expected GoAway from the server.") + } + + // Make sure the client transport is not healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { + t.Fatal("client.NewStream() should have failed, but succeeded") + } +} + +// TestKeepaliveServerEnforcementWithObeyingClientNoRPC verifies that the +// server does not close a client transport (with no active streams) which +// sends keepalive pings in accordance to the configured keepalive +// EnforcementPolicy. +func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 100 * time.Millisecond, + PermitWithoutStream: true, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 101 * time.Millisecond, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) + defer func() { + client.Close() + server.stop() + cancel() + }() + + // Give keepalive enough time. + time.Sleep(3 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestKeepaliveServerEnforcementWithObeyingClientWithRPC verifies that the +// server does not close a client transport (with active streams) which +// sends keepalive pings in accordance to the configured keepalive +// EnforcementPolicy. +func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 100 * time.Millisecond, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 101 * time.Millisecond, + Timeout: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) + defer func() { + client.Close() + server.stop() + cancel() + }() + + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + + // Give keepalive enough time. + time.Sleep(3 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient verifies that the +// server does not closes a client transport, which has been configured to send +// more pings than allowed by the server's EnforcementPolicy. This client +// transport does not have any active streams and `PermitWithoutStream` is set +// to false. This should ensure that the keepalive functionality on the client +// side enters a dormant state. +func TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 1 * time.Second, + }, + } + server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) + defer func() { + client.Close() + server.stop() + cancel() + }() + + // No active streams on the client. Give keepalive enough time. + time.Sleep(5 * time.Second) + + // Make sure the client transport is healthy. + if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } +} + +// TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to +// the keepalive timeout, as detailed in proposal A18. +func TestTCPUserTimeout(t *testing.T) { + tests := []struct { + time time.Duration + timeout time.Duration + wantTimeout time.Duration + }{ + { + 10 * time.Second, + 10 * time.Second, + 10 * 1000 * time.Millisecond, + }, + { + 0, + 0, + 0, + }, + } + for _, tt := range tests { + server, client, cancel := setUpWithOptions( + t, + 0, + &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: tt.timeout, + Timeout: tt.timeout, + }, + }, + normal, + ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: tt.time, + Timeout: tt.timeout, + }, + }, + ) + defer func() { + client.Close() + server.stop() + cancel() + }() + + stream, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("client.NewStream() failed: %v", err) + } + client.CloseStream(stream, io.EOF) + + opt, err := syscall.GetTCPUserTimeout(client.conn) + if err != nil { + t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) + } + if opt < 0 { + t.Skipf("skipping test on unsupported environment") + } + if gotTimeout := time.Duration(opt) * time.Millisecond; gotTimeout != tt.wantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.wantTimeout) + } + } +} diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 067d588c0..c5ee74848 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -38,9 +38,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/leakcheck" - "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) @@ -504,515 +502,6 @@ func TestInflightStreamClosing(t *testing.T) { } } -// TestMaxConnectionIdle tests that a server will send GoAway to an idle -// client. An idle client is one who doesn't make any RPC calls for a duration -// of MaxConnectionIdle time. -func TestMaxConnectionIdle(t *testing.T) { - serverConfig := &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - stream, err := client.NewStream(context.Background(), &CallHdr{}) - if err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - client.CloseStream(stream, io.EOF) - - // Wait for the server's MaxConnectionIdle timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(time.Second * 4) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) - } - case <-timeout.C: - t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") - } -} - -// TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to -// a busy client. -func TestMaxConnectionIdleBusyClient(t *testing.T) { - serverConfig := &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - _, err := client.NewStream(context.Background(), &CallHdr{}) - if err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - // Wait for double the MaxConnectionIdle time to make sure the server does - // not send a GoAway, as the client has an open stream. - timeout := time.NewTimer(time.Second * 4) - select { - case <-client.GoAway(): - if !timeout.Stop() { - <-timeout.C - } - t.Fatalf("A non-idle client received a GoAway.") - case <-timeout.C: - } -} - -// TestMaxConnectionAge tests that a server will send GoAway after a duration -// of MaxConnectionAge. -func TestMaxConnectionAge(t *testing.T) { - serverConfig := &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionAge: 1 * time.Second, - MaxConnectionAgeGrace: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - _, err := client.NewStream(context.Background(), &CallHdr{}) - if err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - // Wait for the server's MaxConnectionAge timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) - } - case <-timeout.C: - t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") - } -} - -const ( - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 -) - -// TestKeepaliveServerClosesUnresponsiveClient tests that a server closes -// the connection with a client that doesn't respond to keepalive pings. -// -// This test creates a regular net.Conn connection to the server and sends the -// clientPreface and the initial Settings frame, and then remains unresponsive. -func TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { - serverConfig := &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - addr := server.addr() - conn, err := net.Dial("tcp", addr) - if err != nil { - t.Fatalf("net.Dial(tcp, %v) failed: %v", addr, err) - } - defer conn.Close() - - if n, err := conn.Write(clientPreface); err != nil || n != len(clientPreface) { - t.Fatalf("conn.Write(clientPreface) failed: n=%v, err=%v", n, err) - } - framer := newFramer(conn, defaultWriteBufSize, defaultReadBufSize, 0) - if err := framer.fr.WriteSettings(http2.Setting{}); err != nil { - t.Fatal("framer.WriteSettings(http2.Setting{}) failed:", err) - } - framer.writer.Flush() - - // We read from the net.Conn till we get an error, which is expected when - // the server closes the connection as part of the keepalive logic. - errCh := make(chan error) - go func() { - b := make([]byte, 24) - for { - if _, err = conn.Read(b); err != nil { - errCh <- err - return - } - } - }() - - // Server waits for KeepaliveParams.Time seconds before sending out a ping, - // and then waits for KeepaliveParams.Timeout for a ping ack. - timeout := time.NewTimer(4 * time.Second) - select { - case err := <-errCh: - if err != io.EOF { - t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) - - } - case <-timeout.C: - t.Fatalf("keepalive timeout expired, server should have closed the connection.") - } -} - -// TestKeepaliveServerWithResponsiveClient tests that a server doesn't close -// the connection with a client that responds to keepalive pings. -func TestKeepaliveServerWithResponsiveClient(t *testing.T) { - serverConfig := &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - // Give keepalive logic some time by sleeping. - time.Sleep(4 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - -// TestKeepaliveClientClosesUnresponsiveServer creates a server which does not -// respond to keepalive pings, and makes sure that the client closes the -// transport once the keepalive logic kicks in. Here, we set the -// `PermitWithoutStream` parameter to true which ensures that the keepalive -// logic is running even without any active streams. -func TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { - connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}, connCh) - defer cancel() - defer client.Close() - - conn, ok := <-connCh - if !ok { - t.Fatalf("Server didn't return connection object") - } - defer conn.Close() - - // Sleep for keepalive to close the connection. - time.Sleep(4 * time.Second) - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") - } -} - -// TestKeepaliveClientOpenWithUnresponsiveServer creates a server which does -// not respond to keepalive pings, and makes sure that the client does not -// close the transport. Here, we do not set the `PermitWithoutStream` parameter -// to true which ensures that the keepalive logic is turned off without any -// active streams, and therefore the transport stays open. -func TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { - connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) - defer cancel() - defer client.Close() - - conn, ok := <-connCh - if !ok { - t.Fatalf("Server didn't return connection object") - } - defer conn.Close() - - // Give keepalive some time. - time.Sleep(4 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - -// TestKeepaliveClientClosesWithActiveStreams creates a server which does not -// respond to keepalive pings, and makes sure that the client closes the -// transport even when there is an active stream. -func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { - connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) - defer cancel() - defer client.Close() - - conn, ok := <-connCh - if !ok { - t.Fatalf("Server didn't return connection object") - } - defer conn.Close() - - // Create a stream, but send no data on it. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - // Give keepalive some time. - time.Sleep(4 * time.Second) - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") - } -} - -// TestKeepaliveClientStaysHealthyWithResponsiveServer creates a server which -// responds to keepalive pings, and makes sure than a client transport stays -// healthy without any active streams. -func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { - server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}) - defer func() { - client.Close() - server.stop() - cancel() - }() - - // Give keepalive some time. - time.Sleep(4 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - -// TestKeepaliveServerEnforcementWithAbusiveClientNoRPC verifies that the -// server closes a client transport when it sends too many keepalive pings -// (when there are no active streams), based on the configured -// EnforcementPolicy. -func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { - serverConfig := &ServerConfig{ - KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, - }, - } - clientOptions := ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) - defer func() { - client.Close() - server.stop() - cancel() - }() - - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") - } -} - -// TestKeepaliveServerEnforcementWithAbusiveClientWithRPC verifies that the -// server closes a client transport when it sends too many keepalive pings -// (even when there is an active stream), based on the configured -// EnforcementPolicy. -func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { - serverConfig := &ServerConfig{ - KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, - }, - } - clientOptions := ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) - defer func() { - client.Close() - server.stop() - cancel() - }() - - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") - } -} - -// TestKeepaliveServerEnforcementWithObeyingClientNoRPC verifies that the -// server does not close a client transport (with no active streams) which -// sends keepalive pings in accordance to the configured keepalive -// EnforcementPolicy. -func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { - serverConfig := &ServerConfig{ - KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, - PermitWithoutStream: true, - }, - } - clientOptions := ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) - defer func() { - client.Close() - server.stop() - cancel() - }() - - // Give keepalive enough time. - time.Sleep(3 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - -// TestKeepaliveServerEnforcementWithObeyingClientWithRPC verifies that the -// server does not close a client transport (with active streams) which -// sends keepalive pings in accordance to the configured keepalive -// EnforcementPolicy. -func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { - serverConfig := &ServerConfig{ - KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, - }, - } - clientOptions := ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) - defer func() { - client.Close() - server.stop() - cancel() - }() - - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - // Give keepalive enough time. - time.Sleep(3 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - -// TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient verifies that the -// server does not closes a client transport, which has been configured to send -// more pings than allowed by the server's EnforcementPolicy. This client -// transport does not have any active streams and `PermitWithoutStream` is set -// to false. This should ensure that the keepalive functionality on the client -// side enters a dormant state. -func TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { - serverConfig := &ServerConfig{ - KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, - }, - } - clientOptions := ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, - }, - } - server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) - defer func() { - client.Close() - server.stop() - cancel() - }() - - // No active streams on the client. Give keepalive enough time. - time.Sleep(5 * time.Second) - - // Make sure the client transport is healthy. - if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } -} - func TestClientSendAndReceive(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() @@ -2320,61 +1809,3 @@ func TestHeaderTblSize(t *testing.T) { t.Fatalf("expected len(limits) = 2 within 10s, got != 2") } } - -// TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to the -// keepalive timeout, as detailed in proposal A18 -func TestTCPUserTimeout(t *testing.T) { - tests := []struct { - time time.Duration - timeout time.Duration - }{ - { - 10 * time.Second, - 10 * time.Second, - }, - { - 0, - 0, - }, - } - for _, tt := range tests { - server, client, cancel := setUpWithOptions( - t, - 0, - &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - Time: tt.timeout, - Timeout: tt.timeout, - }, - }, - normal, - ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: tt.time, - Timeout: tt.timeout, - }, - }, - ) - defer cancel() - defer server.stop() - defer client.Close() - - stream, err := client.NewStream(context.Background(), &CallHdr{}) - if err != nil { - t.Fatalf("Client failed to create RPC request: %v", err) - } - client.closeStream(stream, io.EOF, true, http2.ErrCodeCancel, nil, nil, false) - - opt, err := syscall.GetTCPUserTimeout(client.conn) - if err != nil { - t.Fatalf("GetTCPUserTimeout error: %v", err) - } - if opt < 0 { - t.Skipf("skipping test on unsupported environment") - } - if timeoutMS := int(tt.timeout / time.Millisecond); timeoutMS != opt { - t.Fatalf("wrong TCP_USER_TIMEOUT set on conn. expected %d. got %d", - timeoutMS, opt) - } - } -}