Merge pull request #85152 from mikedanese/tokbench

report cache misses in cached token authenticator benchmark

Kubernetes-commit: 570572b38773829e2841033967f7f7364f56206e
This commit is contained in:
Kubernetes Publisher 2019-11-13 14:51:08 -08:00
commit 5ec070f50a
1 changed files with 32 additions and 22 deletions

View File

@ -26,6 +26,7 @@ import (
mathrand "math/rand"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
@ -194,6 +195,11 @@ func newSingleBenchmark(tokens, threads int) *singleBenchmark {
// singleBenchmark collects all the state needed to run a benchmark. The
// question this benchmark answers is, "what's the average latency added by the
// cache for N concurrent tokens?"
//
// Given the size of the key range constructed by this test, the default go
// benchtime of 1 second is often inadequate to test caching and expiration
// behavior. A benchtime of 10 to 30 seconds is adequate to stress these
// code paths.
type singleBenchmark struct {
threadCount int
// These token.* variables are set by makeTokens()
@ -204,11 +210,6 @@ type singleBenchmark struct {
tokenToAuds map[string]authenticator.Audiences
// a list makes it easy to select a random one
tokens []string
// Simulate slowness, qps limit, external service limitation, etc
chokepoint chan struct{}
b *testing.B
}
func (s *singleBenchmark) makeTokens() {
@ -228,12 +229,12 @@ func (s *singleBenchmark) makeTokens() {
for i := 0; i < mathrand.Intn(4); i++ {
auds = append(auds, string(uuid.NewUUID()))
}
choice := mathrand.Intn(1000)
choice := mathrand.Float64()
switch {
case choice < 900:
case choice < 0.9:
r.ok = true
r.err = nil
case choice < 990:
case choice < 0.99:
r.ok = false
r.err = nil
default:
@ -249,9 +250,6 @@ func (s *singleBenchmark) makeTokens() {
}
func (s *singleBenchmark) lookup(ctx context.Context, token string) (*authenticator.Response, bool, error) {
s.chokepoint <- struct{}{}
defer func() { <-s.chokepoint }()
time.Sleep(1 * time.Millisecond)
r, ok := s.tokenToResponse[token]
if !ok {
panic("test setup problem")
@ -272,29 +270,41 @@ func (s *singleBenchmark) run(b *testing.B) {
}
func (s *singleBenchmark) bench(b *testing.B) {
s.b = b
// Simulate slowness, qps limit, external service limitation, etc
const maxInFlight = 40
chokepoint := make(chan struct{}, maxInFlight)
// lookup count
var lookups uint64
a := newWithClock(
authenticator.TokenFunc(s.lookup),
authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) {
atomic.AddUint64(&lookups, 1)
chokepoint <- struct{}{}
defer func() { <-chokepoint }()
time.Sleep(1 * time.Millisecond)
return s.lookup(ctx, token)
}),
true,
4*time.Second,
500*time.Millisecond,
utilclock.RealClock{},
)
const maxInFlight = 40
s.chokepoint = make(chan struct{}, maxInFlight)
s.b.ResetTimer()
b.ResetTimer()
b.SetParallelism(s.threadCount)
b.RunParallel(func(pb *testing.PB) {
r := mathrand.New(mathrand.NewSource(mathrand.Int63()))
for pb.Next() {
// some problems appear with random
// access, some appear with many
// requests for a single entry, so we
// do both.
s.doAuthForTokenN(r.Intn(len(s.tokens)), a)
// some problems appear with random access, some appear with many
// requests for a single entry, so we do both.
s.doAuthForTokenN(r.Intn(s.tokenCount), a)
s.doAuthForTokenN(0, a)
}
})
b.StopTimer()
b.ReportMetric(float64(lookups)/float64(b.N), "lookups/op")
}