flowcontrol: make TestConfigConsumer less time-consuming

Signed-off-by: Adhityaa Chandrasekar <adtac@google.com>

Kubernetes-commit: c1f3e90969d38f5b8410b67f0640d5df0d098e6b
This commit is contained in:
Adhityaa Chandrasekar 2021-02-02 11:20:18 +00:00 committed by Kubernetes Publisher
parent 0d37c2610c
commit ec9a97b26e
3 changed files with 59 additions and 23 deletions

View File

@ -41,6 +41,10 @@ import (
"k8s.io/klog/v2"
)
// Some tests print a lot of debug logs which slows down tests considerably,
// causing them to even timeout.
var testDebugLogs = false
func TestMain(m *testing.M) {
klog.InitFlags(nil)
os.Exit(m.Run())
@ -138,7 +142,9 @@ func (cqs *ctlrTestQueueSet) StartRequest(ctx context.Context, hashValue uint64,
cqs.cts.lock.Lock()
defer cqs.cts.lock.Unlock()
cqs.countActive++
cqs.cts.t.Logf("Queued %q %#+v %#+v for %p QS=%s, countActive:=%d", fsName, descr1, descr2, cqs, cqs.qc.Name, cqs.countActive)
if testDebugLogs {
cqs.cts.t.Logf("Queued %q %#+v %#+v for %p QS=%s, countActive:=%d", fsName, descr1, descr2, cqs, cqs.qc.Name, cqs.countActive)
}
return &ctlrTestRequest{cqs, cqs.qc.Name, descr1, descr2}, false
}
@ -147,7 +153,9 @@ func (ctr *ctlrTestRequest) Finish(execute func()) bool {
ctr.cqs.cts.lock.Lock()
defer ctr.cqs.cts.lock.Unlock()
ctr.cqs.countActive--
ctr.cqs.cts.t.Logf("Finished %#+v %#+v for %p QS=%s, countActive:=%d", ctr.descr1, ctr.descr2, ctr.cqs, ctr.cqs.qc.Name, ctr.cqs.countActive)
if testDebugLogs {
ctr.cqs.cts.t.Logf("Finished %#+v %#+v for %p QS=%s, countActive:=%d", ctr.descr1, ctr.descr2, ctr.cqs, ctr.cqs.qc.Name, ctr.cqs.countActive)
}
return ctr.cqs.countActive == 0
}
@ -182,7 +190,9 @@ func (cts *ctlrTestState) addHeldRequest(plName string, rd RequestDigest, finish
hrs := cts.heldRequestsMap[plName]
hrs = append(hrs, heldRequest{rd, finishCh})
cts.heldRequestsMap[plName] = hrs
cts.t.Logf("Holding %#+v for %s, count:=%d", rd, plName, len(hrs))
if testDebugLogs {
cts.t.Logf("Holding %#+v for %s, count:=%d", rd, plName, len(hrs))
}
}
func (cts *ctlrTestState) popHeldRequest() (plName string, hr *heldRequest, nCount int) {
@ -227,7 +237,7 @@ var mandQueueSetNames, exclQueueSetNames = func() (sets.String, sets.String) {
func TestConfigConsumer(t *testing.T) {
rngOuter := rand.New(rand.NewSource(1234567890123456789))
for i := 1; i <= 20; i++ {
for i := 1; i <= 10; i++ {
rng := rand.New(rand.NewSource(int64(rngOuter.Uint64())))
t.Run(fmt.Sprintf("trial%d:", i), func(t *testing.T) {
clientset := clientsetfake.NewSimpleClientset()
@ -254,8 +264,10 @@ func TestConfigConsumer(t *testing.T) {
_, _, desiredPLNames, newBadPLNames := genPLs(rng, trialStep, persistingPLNames, 0)
_, _, newFTRs, newCatchAlls := genFSs(t, rng, trialStep, desiredPLNames, newBadPLNames, 0)
for j := 0; ; {
t.Logf("For %s, desiredPLNames=%#+v", trialStep, desiredPLNames)
t.Logf("For %s, newFTRs=%#+v", trialStep, newFTRs)
if testDebugLogs {
t.Logf("For %s, desiredPLNames=%#+v", trialStep, desiredPLNames)
t.Logf("For %s, newFTRs=%#+v", trialStep, newFTRs)
}
// Check that the latest digestion did the right thing
nextPLNames := sets.NewString()
for oldPLName := range persistingPLNames {
@ -277,7 +289,9 @@ func TestConfigConsumer(t *testing.T) {
}
for plName, hr, nCount := cts.popHeldRequest(); hr != nil; plName, hr, nCount = cts.popHeldRequest() {
desired := desiredPLNames.Has(plName) || mandPLs[plName] != nil
t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
if testDebugLogs {
t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
}
close(hr.finishCh)
}
cts.requestWG.Wait()
@ -299,17 +313,21 @@ func TestConfigConsumer(t *testing.T) {
newPLs, _, desiredPLNames, newBadPLNames = genPLs(rng, trialStep, persistingPLNames, 1+rng.Intn(4))
newFSs, _, newFTRs, newCatchAlls = genFSs(t, rng, trialStep, desiredPLNames, newBadPLNames, 1+rng.Intn(6))
for _, newPL := range newPLs {
t.Logf("For %s, digesting newPL=%s", trialStep, fcfmt.Fmt(newPL))
}
for _, newFS := range newFSs {
t.Logf("For %s, digesting newFS=%s", trialStep, fcfmt.Fmt(newFS))
if testDebugLogs {
for _, newPL := range newPLs {
t.Logf("For %s, digesting newPL=%s", trialStep, fcfmt.Fmt(newPL))
}
for _, newFS := range newFSs {
t.Logf("For %s, digesting newFS=%s", trialStep, fcfmt.Fmt(newFS))
}
}
_ = ctlr.lockAndDigestConfigObjects(newPLs, newFSs)
}
for plName, hr, nCount := cts.popHeldRequest(); hr != nil; plName, hr, nCount = cts.popHeldRequest() {
desired := desiredPLNames.Has(plName) || mandPLs[plName] != nil
t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
if testDebugLogs {
desired := desiredPLNames.Has(plName) || mandPLs[plName] != nil
t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
}
close(hr.finishCh)
}
cts.requestWG.Wait()
@ -434,7 +452,9 @@ func checkNewFS(cts *ctlrTestState, rng *rand.Rand, trialName string, ftr *fsTes
expectedMatch := matches && ftr.wellFormed && (fsPrecedes(fs, catchAlls[isResource]) || fs.Name == catchAlls[isResource].Name)
ctlr.Handle(ctx, rdu, func(matchFS *flowcontrol.FlowSchema, matchPL *flowcontrol.PriorityLevelConfiguration) {
matchIsExempt := matchPL.Spec.Type == flowcontrol.PriorityLevelEnablementExempt
t.Logf("Considering FlowSchema %s, expectedMatch=%v, isResource=%v: Handle(%#+v) => note(fs=%s, pl=%s, isExempt=%v)", fs.Name, expectedMatch, isResource, rdu, matchFS.Name, matchPL.Name, matchIsExempt)
if testDebugLogs {
t.Logf("Considering FlowSchema %s, expectedMatch=%v, isResource=%v: Handle(%#+v) => note(fs=%s, pl=%s, isExempt=%v)", fs.Name, expectedMatch, isResource, rdu, matchFS.Name, matchPL.Name, matchIsExempt)
}
if a := matchFS.Name == fs.Name; expectedMatch != a {
t.Errorf("Fail at %s/%s: rd=%#+v, expectedMatch=%v, actualMatch=%v, matchFSName=%q, catchAlls=%#+v", trialName, fs.Name, rdu, expectedMatch, a, matchFS.Name, catchAlls)
}
@ -451,7 +471,9 @@ func checkNewFS(cts *ctlrTestState, rng *rand.Rand, trialName string, ftr *fsTes
cts.requestWG.Done()
}(matches, isResource, rdu)
if rng.Float32() < 0.8 {
t.Logf("Immediate request %#+v, plName=%s", rdu, expectedPLName)
if testDebugLogs {
t.Logf("Immediate request %#+v, plName=%s", rdu, expectedPLName)
}
close(finishCh)
} else {
cts.addHeldRequest(expectedPLName, rdu, finishCh)
@ -511,7 +533,9 @@ func genFSs(t *testing.T, rng *rand.Rand, trial string, goodPLNames, badPLNames
catchAlls[true] = ftr.fs
}
}
t.Logf("For trial %s, adding wf=%v FlowSchema %s", trial, ftr.wellFormed, fcfmt.Fmt(ftr.fs))
if testDebugLogs {
t.Logf("For trial %s, adding wf=%v FlowSchema %s", trial, ftr.wellFormed, fcfmt.Fmt(ftr.fs))
}
}
if n == 0 || rng.Float32() < 0.5 {
add(mandFTRCatchAll)

View File

@ -251,7 +251,9 @@ func genFS(t *testing.T, rng *rand.Rand, name string, mayMatchClusterScope bool,
ftr.addDigests(skippingRDigests, false)
ftr.addDigests(skippingNDigests, false)
}
t.Logf("Returning name=%s, plRef=%q, wellFormed=%v, matchesAllResourceRequests=%v, matchesAllNonResourceRequests=%v for mayMatchClusterScope=%v", fs.Name, fs.Spec.PriorityLevelConfiguration.Name, ftr.wellFormed, ftr.matchesAllResourceRequests, ftr.matchesAllNonResourceRequests, mayMatchClusterScope)
if testDebugLogs {
t.Logf("Returning name=%s, plRef=%q, wellFormed=%v, matchesAllResourceRequests=%v, matchesAllNonResourceRequests=%v for mayMatchClusterScope=%v", fs.Name, fs.Spec.PriorityLevelConfiguration.Name, ftr.wellFormed, ftr.matchesAllResourceRequests, ftr.matchesAllNonResourceRequests, mayMatchClusterScope)
}
return ftr
}
@ -342,7 +344,9 @@ func genPolicyRuleWithSubjects(t *testing.T, rng *rand.Rand, pfx string, mayMatc
_, _, skippingNRIs = genNonResourceRule(rng, pfx+"-o", false, someMatchesAllNonResourceRequests)
}
rule := flowcontrol.PolicyRulesWithSubjects{subjects, resourceRules, nonResourceRules}
t.Logf("For pfx=%s, mayMatchClusterScope=%v, someMatchesAllResourceRequests=%v, someMatchesAllNonResourceRequests=%v, marr=%v, manrr=%v: generated prws=%s, mu=%s, su=%s, mrr=%s, mnr=%s, srr=%s, snr=%s", pfx, mayMatchClusterScope, someMatchesAllResourceRequests, someMatchesAllNonResourceRequests, matchAllResourceRequests, matchAllNonResourceRequests, fcfmt.Fmt(rule), fcfmt.Fmt(matchingUIs), fcfmt.Fmt(skippingUIs), fcfmt.Fmt(matchingRRIs), fcfmt.Fmt(matchingNRIs), fcfmt.Fmt(skippingRRIs), fcfmt.Fmt(skippingNRIs))
if testDebugLogs {
t.Logf("For pfx=%s, mayMatchClusterScope=%v, someMatchesAllResourceRequests=%v, someMatchesAllNonResourceRequests=%v, marr=%v, manrr=%v: generated prws=%s, mu=%s, su=%s, mrr=%s, mnr=%s, srr=%s, snr=%s", pfx, mayMatchClusterScope, someMatchesAllResourceRequests, someMatchesAllNonResourceRequests, matchAllResourceRequests, matchAllNonResourceRequests, fcfmt.Fmt(rule), fcfmt.Fmt(matchingUIs), fcfmt.Fmt(skippingUIs), fcfmt.Fmt(matchingRRIs), fcfmt.Fmt(matchingNRIs), fcfmt.Fmt(skippingRRIs), fcfmt.Fmt(skippingNRIs))
}
matchingRDigests := cross(matchingUIs, matchingRRIs)
skippingRDigests := append(append(cross(matchingUIs, skippingRRIs),
cross(skippingUIs, matchingRRIs)...),
@ -379,12 +383,16 @@ func shuffleAndTakeDigests(t *testing.T, rng *rand.Rand, rule *flowcontrol.Polic
if rule != nil {
thisMatches := matchesPolicyRule(digest, rule)
if toMatch {
t.Logf("Added matching digest %#+v", digest)
if testDebugLogs {
t.Logf("Added matching digest %#+v", digest)
}
if !thisMatches {
t.Errorf("Fail in check: rule %s does not match digest %#+v", fcfmt.Fmt(rule), digest)
}
} else {
t.Logf("Added skipping digest %#+v", digest)
if testDebugLogs {
t.Logf("Added skipping digest %#+v", digest)
}
if thisMatches {
t.Errorf("Fail in check: rule %s matches digest %#+v", fcfmt.Fmt(rule), digest)
}

View File

@ -46,7 +46,9 @@ func TestMatching(t *testing.T) {
func checkFTR(t *testing.T, ftr *fsTestingRecord) {
for expectMatch, digests1 := range ftr.digests {
t.Logf("%s.digests[%v] = %#+v", ftr.fs.Name, expectMatch, digests1)
if testDebugLogs {
t.Logf("%s.digests[%v] = %#+v", ftr.fs.Name, expectMatch, digests1)
}
for _, digests2 := range digests1 {
for _, digest := range digests2 {
actualMatch := matchesFlowSchema(digest, ftr.fs)
@ -66,7 +68,9 @@ func TestPolicyRules(t *testing.T) {
r := rng.Float32()
n := rng.Float32()
policyRule, matchingRDigests, matchingNDigests, skippingRDigests, skippingNDigests := genPolicyRuleWithSubjects(t, rng, fmt.Sprintf("t%d", i), rng.Float32() < 0.2, r < 0.10, n < 0.10, r < 0.05, n < 0.05)
t.Logf("policyRule=%s, mrd=%#+v, mnd=%#+v, srd=%#+v, snd=%#+v", fcfmt.Fmt(policyRule), matchingRDigests, matchingNDigests, skippingRDigests, skippingNDigests)
if testDebugLogs {
t.Logf("policyRule=%s, mrd=%#+v, mnd=%#+v, srd=%#+v, snd=%#+v", fcfmt.Fmt(policyRule), matchingRDigests, matchingNDigests, skippingRDigests, skippingNDigests)
}
for _, digest := range append(matchingRDigests, matchingNDigests...) {
if !matchesPolicyRule(digest, &policyRule) {
t.Errorf("Fail: expected %s to match %#+v but it did not", fcfmt.Fmt(policyRule), digest)