Deprecate MPICFullResults feature flag (#8169)
Fixes https://github.com/letsencrypt/boulder/issues/8121
This commit is contained in:
parent
b6887a945e
commit
01a299cd0f
|
@ -23,6 +23,7 @@ type Config struct {
|
|||
InsertAuthzsIndividually bool
|
||||
EnforceMultiCAA bool
|
||||
EnforceMPIC bool
|
||||
MPICFullResults bool
|
||||
UnsplitIssuance bool
|
||||
|
||||
// ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
|
||||
|
@ -81,11 +82,6 @@ type Config struct {
|
|||
// removing pending authz reuse.
|
||||
NoPendingAuthzReuse bool
|
||||
|
||||
// MPICFullResults causes the VA to wait for all remote (MPIC) results, rather
|
||||
// than cancelling outstanding requests after enough successes or failures for
|
||||
// the result to be determined.
|
||||
MPICFullResults bool
|
||||
|
||||
// StoreARIReplacesInOrders causes the SA to store and retrieve the optional
|
||||
// ARI replaces field in the orders table.
|
||||
StoreARIReplacesInOrders bool
|
||||
|
|
|
@ -38,8 +38,7 @@
|
|||
}
|
||||
},
|
||||
"features": {
|
||||
"DOH": true,
|
||||
"MPICFullResults": true
|
||||
"DOH": true
|
||||
},
|
||||
"remoteVAs": [
|
||||
{
|
||||
|
|
|
@ -32,46 +32,17 @@ func collectUserAgentsFromDNSRequests(requests []challtestsrvclient.DNSRequest)
|
|||
func assertUserAgentsLength(t *testing.T, got []string, checkType string) {
|
||||
t.Helper()
|
||||
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") != "test/config-next" {
|
||||
// We only need 3 checks if the MPICFullResults feature-flag is not
|
||||
// enabled.
|
||||
//
|
||||
// TODO(#8121): Remove this once MPICFullResults has been defaulted to
|
||||
// true.
|
||||
if len(got) != 4 && len(got) != 3 {
|
||||
t.Errorf("During %s, expected 3 or 4 User-Agents, got %d", checkType, len(got))
|
||||
}
|
||||
} else {
|
||||
if len(got) != 4 {
|
||||
t.Errorf("During %s, expected 4 User-Agents, got %d", checkType, len(got))
|
||||
}
|
||||
if len(got) != 4 {
|
||||
t.Errorf("During %s, expected 4 User-Agents, got %d", checkType, len(got))
|
||||
}
|
||||
}
|
||||
|
||||
func assertExpectedUserAgents(t *testing.T, got []string, checkType string) {
|
||||
t.Helper()
|
||||
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") != "test/config-next" {
|
||||
// One User-Agent may be missing if the MPICFullResults feature-flag is
|
||||
// not enabled. This will need to be modified to 2 if we have not
|
||||
// removed this feature-flag by the time we get to 6+ perspectives.
|
||||
//
|
||||
// TODO(#8121): Remove this once MPICFullResults has been defaulted to
|
||||
// true.
|
||||
var alreadySkippedOne bool
|
||||
for _, ua := range expectedUserAgents {
|
||||
if !slices.Contains(got, ua) {
|
||||
if alreadySkippedOne {
|
||||
t.Errorf("During %s, missing more than 1 User-Agent in %s (got %v)", checkType, expectedUserAgents, got)
|
||||
}
|
||||
alreadySkippedOne = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ua := range expectedUserAgents {
|
||||
if !slices.Contains(got, ua) {
|
||||
t.Errorf("During %s, expected User-Agent %q in %s (got %v)", checkType, ua, expectedUserAgents, got)
|
||||
}
|
||||
for _, ua := range expectedUserAgents {
|
||||
if !slices.Contains(got, ua) {
|
||||
t.Errorf("During %s, expected User-Agent %q in %s (got %v)", checkType, ua, expectedUserAgents, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -129,15 +100,8 @@ func TestMPICTLSALPN01(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config-next" && len(validationEvents) != 4 {
|
||||
if len(validationEvents) != 4 {
|
||||
t.Errorf("expected 4 validation events got %d", len(validationEvents))
|
||||
} else if len(validationEvents) != 3 && len(validationEvents) != 4 {
|
||||
// We only need 3 checks if the MPICFullResults feature-flag is not
|
||||
// enabled.
|
||||
//
|
||||
// TODO(#8121): Remove this once MPICFullResults has been defaulted to
|
||||
// true.
|
||||
t.Errorf("expected 3 or 4 validation events got %d", len(validationEvents))
|
||||
}
|
||||
|
||||
dnsEvents, err := testSrvClient.DNSRequestHistory(domain)
|
||||
|
|
13
va/va.go
13
va/va.go
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
corepb "github.com/letsencrypt/boulder/core/proto"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
"github.com/letsencrypt/boulder/identifier"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
|
@ -610,18 +609,6 @@ func (va *ValidationAuthorityImpl) doRemoteOperation(ctx context.Context, op rem
|
|||
firstProb = currProb
|
||||
}
|
||||
|
||||
if !features.Get().MPICFullResults {
|
||||
// To respond faster, if we get enough successes or too many failures, we cancel remaining RPCs.
|
||||
// Finish the loop to collect remaining responses into `failed` so we can rely on having a response
|
||||
// for every request we made.
|
||||
if len(passed) >= required && len(passedRIRs) >= requiredRIRs {
|
||||
cancel()
|
||||
}
|
||||
if len(failed) > va.maxRemoteFailures {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Once all the VAs have returned a result, break the loop.
|
||||
if len(passed)+len(failed) >= remoteVACount {
|
||||
break
|
||||
|
|
148
va/va_test.go
148
va/va_test.go
|
@ -12,7 +12,6 @@ import (
|
|||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -237,11 +236,6 @@ type multiSrv struct {
|
|||
allowedUAs map[string]bool
|
||||
}
|
||||
|
||||
const (
|
||||
slowUA = "slow"
|
||||
slowRemoteSleepMillis = 100
|
||||
)
|
||||
|
||||
func httpMultiSrv(t *testing.T, token string, allowedUAs map[string]bool) *multiSrv {
|
||||
t.Helper()
|
||||
m := http.NewServeMux()
|
||||
|
@ -250,9 +244,6 @@ func httpMultiSrv(t *testing.T, token string, allowedUAs map[string]bool) *multi
|
|||
ms := &multiSrv{server, sync.Mutex{}, allowedUAs}
|
||||
|
||||
m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.UserAgent() == slowUA {
|
||||
time.Sleep(slowRemoteSleepMillis * time.Millisecond)
|
||||
}
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.allowedUAs[r.UserAgent()] {
|
||||
|
@ -645,7 +636,7 @@ func TestMultiVA(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// With the local and remote VAs seeing diff problems, we expect a problem.
|
||||
Name: "Local and remote VA differential, full results, enforce multi VA",
|
||||
Name: "Local and remote VA differential",
|
||||
Remotes: []remoteConf{
|
||||
{ua: fail, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
|
@ -689,143 +680,6 @@ func TestMultiVA(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMultiVAEarlyReturn(t *testing.T) {
|
||||
// TODO(#7809): Make this test parallel when it no longer manipulates the
|
||||
// MPICFullResults feature flag.
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
remoteConfs []remoteConf
|
||||
wantCorroboration bool
|
||||
wantEarlyReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Early return when 2/3 pass",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: pass, rir: arin},
|
||||
{ua: pass, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: true,
|
||||
wantEarlyReturn: true,
|
||||
},
|
||||
{
|
||||
name: "Early return when 2/3 fail",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: fail, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: false,
|
||||
wantEarlyReturn: true,
|
||||
},
|
||||
{
|
||||
name: "Slow return when first 2/3 are inconclusive",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: pass, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: false,
|
||||
wantEarlyReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Early return when 4/6 pass",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: pass, rir: arin},
|
||||
{ua: pass, rir: ripe},
|
||||
{ua: pass, rir: apnic},
|
||||
{ua: pass, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: true,
|
||||
wantEarlyReturn: true,
|
||||
},
|
||||
{
|
||||
name: "Early return when 4/6 fail",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: pass, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: fail, rir: apnic},
|
||||
{ua: fail, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: false,
|
||||
wantEarlyReturn: true,
|
||||
},
|
||||
{
|
||||
name: "Slow return when first 5/6 are inconclusive",
|
||||
remoteConfs: []remoteConf{
|
||||
{ua: pass, rir: arin},
|
||||
{ua: pass, rir: ripe},
|
||||
{ua: pass, rir: apnic},
|
||||
{ua: fail, rir: arin},
|
||||
{ua: fail, rir: ripe},
|
||||
{ua: slowUA, rir: apnic},
|
||||
},
|
||||
wantCorroboration: false,
|
||||
wantEarlyReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, mpicFullResults := range []bool{false, true} {
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%s_(MPICFullResults: %s)", tc.name, strconv.FormatBool(mpicFullResults)), func(t *testing.T) {
|
||||
// TODO(#7809): Make this test parallel when it no longer manipulates the
|
||||
// MPICFullResults feature flag.
|
||||
|
||||
// Configure one test server per test case so that all tests can run in parallel.
|
||||
ms := httpMultiSrv(t, expectedToken, map[string]bool{pass: true, fail: false})
|
||||
defer ms.Close()
|
||||
|
||||
localVA, _ := setupWithRemotes(ms.Server, pass, tc.remoteConfs, nil)
|
||||
|
||||
features.Set(features.Config{MPICFullResults: mpicFullResults})
|
||||
defer features.Reset()
|
||||
|
||||
// Perform all validations
|
||||
start := time.Now()
|
||||
req := createValidationRequest(identifier.NewDNS("localhost"), core.ChallengeTypeHTTP01)
|
||||
res, _ := localVA.DoDCV(ctx, req)
|
||||
|
||||
if tc.wantCorroboration {
|
||||
if res.Problem != nil {
|
||||
t.Errorf("expected corroboration, but got prob %s", res.Problem)
|
||||
}
|
||||
} else {
|
||||
if res.Problem == nil {
|
||||
t.Error("expected prob from PerformValidation, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
elapsed := time.Since(start).Round(time.Millisecond).Milliseconds()
|
||||
|
||||
if tc.wantEarlyReturn && !mpicFullResults {
|
||||
// The slow UA should sleep for `slowRemoteSleepMillis`. But the first remote
|
||||
// VA should fail quickly and the early-return code should cause the overall
|
||||
// overall validation to return a prob quickly (i.e. in less than half of
|
||||
// `slowRemoteSleepMillis`).
|
||||
if elapsed > slowRemoteSleepMillis/2 {
|
||||
t.Errorf(
|
||||
"Expected an early return from PerformValidation in < %d ms, took %d ms",
|
||||
slowRemoteSleepMillis/2, elapsed)
|
||||
}
|
||||
} else {
|
||||
// The VA will have to wait for all of the results, because the fast
|
||||
// results aren't sufficient to determine (non)corroboration.
|
||||
if elapsed < slowRemoteSleepMillis {
|
||||
t.Errorf(
|
||||
"Expected a slow return from PerformValidation in >= %d ms, took %d ms",
|
||||
slowRemoteSleepMillis, elapsed)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiVAPolicy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
Loading…
Reference in New Issue