benchmark: Add support for Poisson load in benchmark client (#6378)

This commit is contained in:
Zach Reyes 2023-06-22 14:24:52 -04:00 committed by GitHub
parent dd350d02da
commit a9c79427b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 105 additions and 45 deletions

View File

@ -32,6 +32,7 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"google.golang.org/grpc/testdata" "google.golang.org/grpc/testdata"
@ -185,11 +186,21 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc
} }
} }
// TODO add open loop distribution. // If set, perform an open loop, if not perform a closed loop. An open loop
switch config.LoadParams.Load.(type) { // asynchronously starts RPCs based on random start times derived from a
// Poisson distribution. A closed loop performs RPCs in a blocking manner,
// and runs the next RPC after the previous RPC completes and returns.
var poissonLambda *float64
switch t := config.LoadParams.Load.(type) {
case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_ClosedLoop:
case *testpb.LoadParams_Poisson: case *testpb.LoadParams_Poisson:
return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) if t.Poisson == nil {
return status.Errorf(codes.InvalidArgument, "poisson is nil, needs to be set")
}
if t.Poisson.OfferedLoad <= 0 {
return status.Errorf(codes.InvalidArgument, "poisson.offered is <= 0: %v, needs to be >0", t.Poisson.OfferedLoad)
}
poissonLambda = &t.Poisson.OfferedLoad
default: default:
return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams)
} }
@ -198,11 +209,9 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc
switch config.RpcType { switch config.RpcType {
case testpb.RpcType_UNARY: case testpb.RpcType_UNARY:
bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) bc.unaryLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, poissonLambda)
// TODO open loop.
case testpb.RpcType_STREAMING: case testpb.RpcType_STREAMING:
bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) bc.streamingLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, poissonLambda)
// TODO open loop.
default: default:
return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
} }
@ -246,7 +255,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error)
return bc, nil return bc, nil
} }
func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, poissonLambda *float64) {
for ic, conn := range conns { for ic, conn := range conns {
client := testgrpc.NewBenchmarkServiceClient(conn) client := testgrpc.NewBenchmarkServiceClient(conn)
// For each connection, create rpcCountPerConn goroutines to do rpc. // For each connection, create rpcCountPerConn goroutines to do rpc.
@ -260,36 +269,44 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe
// Now relying on worker client to reserve time to do warm up. // Now relying on worker client to reserve time to do warm up.
// The worker client needs to wait for some time after client is created, // The worker client needs to wait for some time after client is created,
// before starting benchmark. // before starting benchmark.
done := make(chan bool) if poissonLambda == nil { // Closed loop.
for { done := make(chan bool)
go func() { for {
start := time.Now() go func() {
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { start := time.Now()
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
select {
case <-bc.stop:
case done <- false:
}
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select { select {
case <-bc.stop: case <-bc.stop:
case done <- false: case done <- true:
} }
return }()
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select { select {
case <-bc.stop: case <-bc.stop:
case done <- true: return
case <-done:
} }
}()
select {
case <-bc.stop:
return
case <-done:
} }
} else { // Open loop.
timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second))
time.AfterFunc(timeBetweenRPCs, func() {
bc.poissonUnary(client, idx, reqSize, respSize, *poissonLambda)
})
} }
}(idx) }(idx)
} }
} }
} }
func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { func (bc *benchmarkClient) streamingLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, poissonLambda *float64) {
var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error
if payloadType == "bytebuf" { if payloadType == "bytebuf" {
doRPC = benchmark.DoByteBufStreamingRoundTrip doRPC = benchmark.DoByteBufStreamingRoundTrip
@ -304,33 +321,69 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou
if err != nil { if err != nil {
logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err) logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
} }
// Create histogram for each goroutine.
idx := ic*rpcCountPerConn + j idx := ic*rpcCountPerConn + j
bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
// Start goroutine on the created mutex and histogram. if poissonLambda == nil { // Closed loop.
go func(idx int) { // Start goroutine on the created mutex and histogram.
// TODO: do warm up if necessary. go func(idx int) {
// Now relying on worker client to reserve time to do warm up. // TODO: do warm up if necessary.
// The worker client needs to wait for some time after client is created, // Now relying on worker client to reserve time to do warm up.
// before starting benchmark. // The worker client needs to wait for some time after client is created,
for { // before starting benchmark.
start := time.Now() for {
if err := doRPC(stream, reqSize, respSize); err != nil { start := time.Now()
return if err := doRPC(stream, reqSize, respSize); err != nil {
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select {
case <-bc.stop:
return
default:
}
} }
elapse := time.Since(start) }(idx)
bc.lockingHistograms[idx].add(int64(elapse)) } else { // Open loop.
select { timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second))
case <-bc.stop: time.AfterFunc(timeBetweenRPCs, func() {
return bc.poissonStreaming(stream, idx, reqSize, respSize, *poissonLambda, doRPC)
default: })
} }
}
}(idx)
} }
} }
} }
func (bc *benchmarkClient) poissonUnary(client testgrpc.BenchmarkServiceClient, idx int, reqSize int, respSize int, lambda float64) {
go func() {
start := time.Now()
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
}()
timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second))
time.AfterFunc(timeBetweenRPCs, func() {
bc.poissonUnary(client, idx, reqSize, respSize, lambda)
})
}
func (bc *benchmarkClient) poissonStreaming(stream testgrpc.BenchmarkService_StreamingCallClient, idx int, reqSize int, respSize int, lambda float64, doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error) {
go func() {
start := time.Now()
if err := doRPC(stream, reqSize, respSize); err != nil {
return
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
}()
timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second))
time.AfterFunc(timeBetweenRPCs, func() {
bc.poissonStreaming(stream, idx, reqSize, respSize, lambda, doRPC)
})
}
// getStats returns the stats for benchmark client. // getStats returns the stats for benchmark client.
// It resets lastResetTime and all histograms if argument reset is true. // It resets lastResetTime and all histograms if argument reset is true.
func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {

View File

@ -80,6 +80,13 @@ func Uint32() uint32 {
return r.Uint32() return r.Uint32()
} }
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
func ExpFloat64() float64 {
mu.Lock()
defer mu.Unlock()
return r.ExpFloat64()
}
// Shuffle implements rand.Shuffle on the grpcrand global source. // Shuffle implements rand.Shuffle on the grpcrand global source.
var Shuffle = func(n int, f func(int, int)) { var Shuffle = func(n int, f func(int, int)) {
mu.Lock() mu.Lock()