Support new Google CT Policy (#6082)
Add a new code path to the ctpolicy package which enforces Chrome's new CT Policy, which requires that SCTs come from logs run by two different operators, rather than one Google and one non-Google log. To achieve this, invert the "race" logic: rather than assuming we always have two groups, and racing the logs within each group against each other, we now race the various groups against each other, and pick just one arbitrary log from each group to attempt submission to. Ensure that the new code path does the right thing by adding a new zlint which checks that the two SCTs embedded in a certificate come from logs run by different operators. To support this lint, which needs to have a canonical mapping from logs to their operators, import the Chrome CT Log List JSON Schema and autogenerate Go structs from it so that we can parse a real CT Log List. Also add flags to all services which run these lints (the CA and cert-checker) to let them load a CT Log List from disk and provide it to the lint. Finally, since we now have the ability to load a CT Log List file anyway, use this capability to simplify configuration of the RA. Rather than listing all of the details for each log we're willing to submit to, simply list the names (technically, Descriptions) of each log, and look up the rest of the details from the log list file. To support this change, SRE will need to deploy log list files (the real Chrome log list for prod, and a custom log list for staging) and then update the configuration of the RA, CA, and cert-checker. Once that transition is complete, the deletion TODOs left behind by this change will be able to be completed, removing the old RA configuration and old ctpolicy race logic. Part of #5938
This commit is contained in:
parent
3e01ffe8bf
commit
11544756bb
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/ca"
|
||||
capb "github.com/letsencrypt/boulder/ca/proto"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
|
@ -84,6 +85,11 @@ type Config struct {
|
|||
// allowed to request ECDSA issuance
|
||||
ECDSAAllowListFilename string
|
||||
|
||||
// CTLogListFile is the path to a JSON file on disk containing the set of
|
||||
// all logs trusted by Chrome. The file must match the v3 log list schema:
|
||||
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
|
||||
CTLogListFile string
|
||||
|
||||
Features map[string]bool
|
||||
}
|
||||
|
||||
|
@ -189,6 +195,13 @@ func main() {
|
|||
err = pa.SetHostnamePolicyFile(c.CA.HostnamePolicyFile)
|
||||
cmd.FailOnError(err, "Couldn't load hostname policy file")
|
||||
|
||||
// Do this before creating the issuers to ensure the log list is loaded before
|
||||
// the linters are initialized.
|
||||
if c.CA.CTLogListFile != "" {
|
||||
err = loglist.InitLintList(c.CA.CTLogListFile)
|
||||
cmd.FailOnError(err, "Failed to load CT Log List")
|
||||
}
|
||||
|
||||
var boulderIssuers []*issuance.Issuer
|
||||
boulderIssuers, err = loadBoulderIssuers(c.CA.Issuance.Profile, c.CA.Issuance.Issuers, c.CA.Issuance.IgnoredLints)
|
||||
cmd.FailOnError(err, "Couldn't load issuers")
|
||||
|
|
|
@ -2,7 +2,6 @@ package notmain
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
|
@ -15,6 +14,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/ctpolicy"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/ctconfig"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
|
@ -72,7 +72,18 @@ type Config struct {
|
|||
// in a group and the first SCT returned will be used. This allows
|
||||
// us to comply with Chrome CT policy which requires one SCT from a
|
||||
// Google log and one SCT from any other log included in their policy.
|
||||
// DEPRECATED: Use CTLogs instead.
|
||||
// TODO(#5938): Remove this.
|
||||
CTLogGroups2 []ctconfig.CTGroup
|
||||
// CTLogs contains groupings of CT logs organized by what organization
|
||||
// operates them. When we submit precerts to logs in order to get SCTs, we
|
||||
// will submit the cert to one randomly-chosen log from each group, and use
|
||||
// the SCTs from the first two groups which reply. This allows us to comply
|
||||
// with various CT policies that require (for certs with short lifetimes
|
||||
// like ours) two SCTs from logs run by different operators. It also holds
|
||||
// a `Stagger` value controlling how long we wait for one operator group
|
||||
// to respond before trying a different one.
|
||||
CTLogs ctconfig.CTConfig
|
||||
// InformationalCTLogs are a set of CT logs we will always submit to
|
||||
// but won't ever use the SCTs from. This may be because we want to
|
||||
// test them or because they are not yet approved by a browser/root
|
||||
|
@ -161,7 +172,6 @@ func main() {
|
|||
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
|
||||
sac := sapb.NewStorageAuthorityClient(saConn)
|
||||
|
||||
var ctp *ctpolicy.CTPolicy
|
||||
conn, err := bgrpc.ClientSetup(c.RA.PublisherService, tlsConfig, clientMetrics, clk)
|
||||
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to Publisher")
|
||||
pubc := pubpb.NewPublisherClient(conn)
|
||||
|
@ -181,27 +191,44 @@ func main() {
|
|||
}
|
||||
|
||||
// Boulder's components assume that there will always be CT logs configured.
|
||||
// Issuing a certificate without SCTs embedded is a miss-issuance event in the
|
||||
// environment Boulder is built for. Exit early if there is no CTLogGroups2
|
||||
// configured.
|
||||
if len(c.RA.CTLogGroups2) == 0 {
|
||||
cmd.Fail("CTLogGroups2 must not be empty")
|
||||
}
|
||||
|
||||
for i, g := range c.RA.CTLogGroups2 {
|
||||
// Exit early if any of the log groups specify no logs
|
||||
if len(g.Logs) == 0 {
|
||||
cmd.Fail(
|
||||
fmt.Sprintf("CTLogGroups2 index %d specifies no logs", i))
|
||||
}
|
||||
for _, l := range g.Logs {
|
||||
if l.TemporalSet != nil {
|
||||
err := l.Setup()
|
||||
cmd.FailOnError(err, "Failed to setup a temporal log set")
|
||||
// Issuing a certificate without SCTs embedded is a misissuance event as per
|
||||
// our CPS 4.4.2, which declares we will always include at least two SCTs.
|
||||
// Exit early if no groups are configured.
|
||||
var ctp *ctpolicy.CTPolicy
|
||||
if len(c.RA.CTLogGroups2) != 0 && len(c.RA.CTLogs.SCTLogs) != 0 {
|
||||
cmd.Fail("Configure only CTLogGroups2 or CTLogs, not both")
|
||||
} else if len(c.RA.CTLogGroups2) > 0 {
|
||||
for _, g := range c.RA.CTLogGroups2 {
|
||||
// Exit early if any of the log groups specify no logs
|
||||
if len(g.Logs) == 0 {
|
||||
cmd.Fail("Encountered empty CT log group")
|
||||
}
|
||||
for _, l := range g.Logs {
|
||||
if l.TemporalSet != nil {
|
||||
err := l.Setup()
|
||||
cmd.FailOnError(err, "Failed to setup a temporal log set")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctp = ctpolicy.New(pubc, c.RA.CTLogGroups2, c.RA.InformationalCTLogs, nil, nil, nil, c.RA.CTLogs.Stagger.Duration, logger, scope)
|
||||
} else if len(c.RA.CTLogs.SCTLogs) > 0 {
|
||||
allLogs, err := loglist.New(c.RA.CTLogs.LogListFile)
|
||||
cmd.FailOnError(err, "Failed to parse log list")
|
||||
|
||||
sctLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.SCTLogs, loglist.Issuance)
|
||||
cmd.FailOnError(err, "Failed to load SCT logs")
|
||||
|
||||
infoLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.InfoLogs, loglist.Informational)
|
||||
cmd.FailOnError(err, "Failed to load informational logs")
|
||||
|
||||
finalLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.FinalLogs, loglist.Informational)
|
||||
cmd.FailOnError(err, "Failed to load final logs")
|
||||
|
||||
ctp = ctpolicy.New(pubc, nil, nil, sctLogs, infoLogs, finalLogs, c.RA.CTLogs.Stagger.Duration, logger, scope)
|
||||
} else {
|
||||
cmd.Fail("Must configure either CTLogGroups2 or CTLogs")
|
||||
}
|
||||
ctp = ctpolicy.New(pubc, c.RA.CTLogGroups2, c.RA.InformationalCTLogs, logger, scope)
|
||||
|
||||
// Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document,
|
||||
// or completed validation MUST be obtained no more than 398 days prior
|
||||
|
|
|
@ -25,9 +25,11 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
"github.com/letsencrypt/boulder/identifier"
|
||||
_ "github.com/letsencrypt/boulder/linter"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/policy"
|
||||
"github.com/letsencrypt/boulder/sa"
|
||||
|
@ -352,7 +354,13 @@ type Config struct {
|
|||
// IgnoredLints is a list of zlint names. Any lint results from a lint in
|
||||
// the IgnoredLists list are ignored regardless of LintStatus level.
|
||||
IgnoredLints []string
|
||||
Features map[string]bool
|
||||
|
||||
// CTLogListFile is the path to a JSON file on disk containing the set of
|
||||
// all logs trusted by Chrome. The file must match the v3 log list schema:
|
||||
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
|
||||
CTLogListFile string
|
||||
|
||||
Features map[string]bool
|
||||
}
|
||||
PA cmd.PAConfig
|
||||
Syslog cmd.SyslogConfig
|
||||
|
@ -425,6 +433,11 @@ func main() {
|
|||
err = pa.SetHostnamePolicyFile(config.CertChecker.HostnamePolicyFile)
|
||||
cmd.FailOnError(err, "Failed to load HostnamePolicyFile")
|
||||
|
||||
if config.CertChecker.CTLogListFile != "" {
|
||||
err = loglist.InitLintList(config.CertChecker.CTLogListFile)
|
||||
cmd.FailOnError(err, "Failed to load CT Log List")
|
||||
}
|
||||
|
||||
checker := newChecker(
|
||||
saDbMap,
|
||||
cmd.Clock(),
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/jmhodges/clock"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
|
@ -526,6 +527,8 @@ func TestIgnoredLint(t *testing.T) {
|
|||
saCleanup()
|
||||
}()
|
||||
|
||||
err = loglist.InitLintList("../../test/ct-test-srv/log_list.json")
|
||||
test.AssertNotError(t, err, "failed to load ct log list")
|
||||
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
|
||||
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations)
|
||||
serial := big.NewInt(1337)
|
||||
|
@ -582,6 +585,7 @@ func TestIgnoredLint(t *testing.T) {
|
|||
"zlint error: e_sub_cert_aia_does_not_contain_ocsp_url",
|
||||
"zlint info: n_subject_common_name_included",
|
||||
"zlint info: w_ct_sct_policy_count_unsatisfied Certificate had 0 embedded SCTs. Browser policy may require 2 for this certificate.",
|
||||
"zlint error: e_scts_from_same_operator Certificate had too few embedded SCTs; browser policy requires 2.",
|
||||
}
|
||||
sort.Strings(expectedProblems)
|
||||
|
||||
|
@ -589,7 +593,7 @@ func TestIgnoredLint(t *testing.T) {
|
|||
// expected zlint problems.
|
||||
_, problems := checker.checkCert(cert, nil)
|
||||
sort.Strings(problems)
|
||||
test.Assert(t, reflect.DeepEqual(problems, expectedProblems), "problems did not match expected")
|
||||
test.AssertDeepEquals(t, problems, expectedProblems)
|
||||
|
||||
// Check the certificate again with an ignore map that excludes the affected
|
||||
// lints. This should return no problems.
|
||||
|
@ -597,6 +601,7 @@ func TestIgnoredLint(t *testing.T) {
|
|||
"e_sub_cert_aia_does_not_contain_ocsp_url": true,
|
||||
"n_subject_common_name_included": true,
|
||||
"w_ct_sct_policy_count_unsatisfied": true,
|
||||
"e_scts_from_same_operator": true,
|
||||
})
|
||||
test.AssertEquals(t, len(problems), 0)
|
||||
}
|
||||
|
|
|
@ -33,8 +33,7 @@ func (ts *TemporalSet) Setup() error {
|
|||
return errors.New("temporal set contains no shards")
|
||||
}
|
||||
for i := range ts.Shards {
|
||||
if ts.Shards[i].WindowEnd.Before(ts.Shards[i].WindowStart) ||
|
||||
ts.Shards[i].WindowEnd.Equal(ts.Shards[i].WindowStart) {
|
||||
if !ts.Shards[i].WindowEnd.After(ts.Shards[i].WindowStart) {
|
||||
return errors.New("WindowStart must be before WindowEnd")
|
||||
}
|
||||
}
|
||||
|
@ -81,10 +80,46 @@ func (ld LogDescription) Info(exp time.Time) (string, string, error) {
|
|||
return shard.URI, shard.Key, nil
|
||||
}
|
||||
|
||||
// CTGroup represents a group of CT Logs. Although capable of holding logs
|
||||
// grouped by any arbitrary feature, is today primarily used to hold logs which
|
||||
// are all operated by the same legal entity.
|
||||
type CTGroup struct {
|
||||
Name string
|
||||
Logs []LogDescription
|
||||
// How long to wait for one log to accept a certificate before moving on to
|
||||
// the next.
|
||||
// TODO(#5938): Remove this when CTLogGroups2 is removed from the RA.
|
||||
Stagger cmd.ConfigDuration
|
||||
}
|
||||
|
||||
// CTConfig is the top-level config object expected to be embedded in an
|
||||
// executable's JSON config struct.
|
||||
type CTConfig struct {
|
||||
// Stagger is duration (e.g. "200ms") indicating how long to wait for a log
|
||||
// from one operator group to accept a certificate before attempting
|
||||
// submission to a log run by a different operator instead.
|
||||
Stagger cmd.ConfigDuration
|
||||
// LogListFile is a path to a JSON log list file. The file must match Chrome's
|
||||
// schema: https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
|
||||
LogListFile string
|
||||
// SCTLogs is a list of CT log names to submit precerts to in order to get SCTs.
|
||||
SCTLogs []string
|
||||
// InfoLogs is a list of CT log names to submit precerts to on a best-effort
|
||||
// basis. Logs are included here for the sake of wider distribution of our
|
||||
// precerts, and to exercise logs that in the qualification process.
|
||||
InfoLogs []string
|
||||
// FinalLogs is a list of CT log names to submit final certificates to.
|
||||
// This may include duplicates from the lists above, to submit both precerts
|
||||
// and final certs to the same log.
|
||||
FinalLogs []string
|
||||
}
|
||||
|
||||
// LogID holds enough information to uniquely identify a CT Log: its log_id
|
||||
// (the base64-encoding of the SHA-256 hash of its public key) and its human-
|
||||
// readable name/description. This is used to extract other log parameters
|
||||
// (such as its URL and public key) from the Chrome Log List.
|
||||
type LogID struct {
|
||||
Name string
|
||||
ID string
|
||||
SubmitFinal bool
|
||||
}
|
||||
|
|
|
@ -3,12 +3,15 @@ package ctpolicy
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/canceled"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/ctconfig"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
pubpb "github.com/letsencrypt/boulder/publisher/proto"
|
||||
|
@ -18,33 +21,43 @@ import (
|
|||
// CTPolicy is used to hold information about SCTs required from various
|
||||
// groupings
|
||||
type CTPolicy struct {
|
||||
pub pubpb.PublisherClient
|
||||
pub pubpb.PublisherClient
|
||||
// TODO(#5938): Remove groups, informational, and final
|
||||
groups []ctconfig.CTGroup
|
||||
informational []ctconfig.LogDescription
|
||||
finalLogs []ctconfig.LogDescription
|
||||
log blog.Logger
|
||||
final []ctconfig.LogDescription
|
||||
sctLogs loglist.List
|
||||
infoLogs loglist.List
|
||||
finalLogs loglist.List
|
||||
stagger time.Duration
|
||||
|
||||
log blog.Logger
|
||||
winnerCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// New creates a new CTPolicy struct
|
||||
func New(pub pubpb.PublisherClient,
|
||||
func New(
|
||||
pub pubpb.PublisherClient,
|
||||
groups []ctconfig.CTGroup,
|
||||
informational []ctconfig.LogDescription,
|
||||
sctLogs loglist.List,
|
||||
infoLogs loglist.List,
|
||||
finalLogs loglist.List,
|
||||
stagger time.Duration,
|
||||
log blog.Logger,
|
||||
stats prometheus.Registerer,
|
||||
) *CTPolicy {
|
||||
var finalLogs []ctconfig.LogDescription
|
||||
var final []ctconfig.LogDescription
|
||||
for _, group := range groups {
|
||||
for _, log := range group.Logs {
|
||||
if log.SubmitFinalCert {
|
||||
finalLogs = append(finalLogs, log)
|
||||
final = append(final, log)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, log := range informational {
|
||||
if log.SubmitFinalCert {
|
||||
finalLogs = append(finalLogs, log)
|
||||
final = append(final, log)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +74,11 @@ func New(pub pubpb.PublisherClient,
|
|||
pub: pub,
|
||||
groups: groups,
|
||||
informational: informational,
|
||||
final: final,
|
||||
sctLogs: sctLogs,
|
||||
infoLogs: infoLogs,
|
||||
finalLogs: finalLogs,
|
||||
stagger: stagger,
|
||||
log: log,
|
||||
winnerCounter: winnerCounter,
|
||||
}
|
||||
|
@ -77,6 +94,7 @@ type result struct {
|
|||
// once it has the first SCT it cancels all of the other submissions and returns.
|
||||
// It allows up to len(group)-1 of the submissions to fail as we only care about
|
||||
// getting a single SCT.
|
||||
// TODO(#5938): Remove this when it becomes dead code.
|
||||
func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group ctconfig.CTGroup, expiration time.Time) ([]byte, error) {
|
||||
results := make(chan result, len(group.Logs))
|
||||
isPrecert := true
|
||||
|
@ -137,9 +155,24 @@ func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group ctconfig
|
|||
return nil, errors.New("all submissions failed")
|
||||
}
|
||||
|
||||
// GetSCTs attempts to retrieve a SCT from each configured grouping of logs and returns
|
||||
// the set of SCTs to the caller.
|
||||
// GetSCTs attempts to retrieve two SCTs from the configured log groups and
|
||||
// returns the set of SCTs to the caller.
|
||||
func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) {
|
||||
if len(ctp.sctLogs) != 0 {
|
||||
return ctp.getOperatorSCTs(ctx, cert, expiration)
|
||||
}
|
||||
return ctp.getGoogleSCTs(ctx, cert, expiration)
|
||||
}
|
||||
|
||||
// getGoogleSCTs retrieves exactly one SCT from each of the configured log
|
||||
// groups. It expects that there are exactly 2 such groups, and that one of
|
||||
// those groups contains only logs operated by Google. As such, it enforces
|
||||
// Google's *old* CT Policy, which required that certs have two SCTs, one of
|
||||
// which was from a Google log.
|
||||
// DEPRECATED: Google no longer enforces the "one Google, one non-Google" log
|
||||
// policy. Use getOperatorSCTs instead.
|
||||
// TODO(#5938): Remove this after the configured groups have been rearranged.
|
||||
func (ctp *CTPolicy) getGoogleSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) {
|
||||
results := make(chan result, len(ctp.groups))
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
@ -153,29 +186,8 @@ func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration
|
|||
results <- result{sct: sct}
|
||||
}(i, g)
|
||||
}
|
||||
isPrecert := true
|
||||
for _, log := range ctp.informational {
|
||||
go func(l ctconfig.LogDescription) {
|
||||
// We use a context.Background() here instead of subCtx because these
|
||||
// submissions are running in a goroutine and we don't want them to be
|
||||
// cancelled when the caller of CTPolicy.GetSCTs returns and cancels
|
||||
// its RPC context.
|
||||
uri, key, err := l.Info(expiration)
|
||||
if err != nil {
|
||||
ctp.log.Errf("unable to get log info: %s", err)
|
||||
return
|
||||
}
|
||||
_, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
|
||||
LogURL: uri,
|
||||
LogPublicKey: key,
|
||||
Der: cert,
|
||||
Precert: isPrecert,
|
||||
})
|
||||
if err != nil {
|
||||
ctp.log.Warningf("ct submission to informational log %q failed: %s", uri, err)
|
||||
}
|
||||
}(log)
|
||||
}
|
||||
|
||||
go ctp.submitPrecertInformational(cert, expiration)
|
||||
|
||||
var ret core.SCTDERs
|
||||
for i := 0; i < len(ctp.groups); i++ {
|
||||
|
@ -191,10 +203,173 @@ func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
// getOperatorSCTs retrieves exactly two SCTs from the total collection of
|
||||
// configured log groups, with at most one SCT coming from each group. It
|
||||
// expects that all logs run by a single operator (e.g. Google) are in the same
|
||||
// group, to guarantee that SCTs from logs in different groups do not end up
|
||||
// coming from the same operator. As such, it enforces Google's current CT
|
||||
// Policy, which requires that certs have two SCTs from logs run by different
|
||||
// operators.
|
||||
// TODO(#5938): Inline this into GetSCTs when getGoogleSCTs is removed.
|
||||
func (ctp *CTPolicy) getOperatorSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) {
|
||||
// We'll cancel this sub-context when we have the two SCTs we need, to cause
|
||||
// any other ongoing submission attempts to quit.
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// This closure will be called in parallel once for each operator group.
|
||||
getOne := func(i int, g string) ([]byte, error) {
|
||||
// Sleep a little bit to stagger our requests to the later groups. Use `i-1`
|
||||
// to compute the stagger duration so that the first two groups (indices 0
|
||||
// and 1) get negative or zero (i.e. instant) sleep durations. If the
|
||||
// context gets cancelled (most likely because two logs from other operator
|
||||
// groups returned SCTs already) before the sleep is complete, quit instead.
|
||||
select {
|
||||
case <-subCtx.Done():
|
||||
return nil, subCtx.Err()
|
||||
case <-time.After(time.Duration(i-1) * ctp.stagger):
|
||||
}
|
||||
|
||||
// Pick a random log from among those in the group. In practice, very few
|
||||
// operator groups have more than one log, so this loses little flexibility.
|
||||
uri, key, err := ctp.sctLogs.PickOne(g, expiration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get log info: %w", err)
|
||||
}
|
||||
|
||||
sct, err := ctp.pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{
|
||||
LogURL: uri,
|
||||
LogPublicKey: key,
|
||||
Der: cert,
|
||||
Precert: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ct submission to %q (%q) failed: %w", g, uri, err)
|
||||
}
|
||||
|
||||
return sct.Sct, nil
|
||||
}
|
||||
|
||||
// Ensure that this channel has a buffer equal to the number of goroutines
|
||||
// we're kicking off, so that they're all guaranteed to be able to write to
|
||||
// it and exit without blocking and leaking.
|
||||
results := make(chan result, len(ctp.sctLogs))
|
||||
|
||||
// Kick off a collection of goroutines to try to submit the precert to each
|
||||
// log operator group. Randomize the order of the groups so that we're not
|
||||
// always trying to submit to the same two operators.
|
||||
for i, group := range ctp.sctLogs.Permute() {
|
||||
go func(i int, g string) {
|
||||
sctDER, err := getOne(i, g)
|
||||
results <- result{sct: sctDER, err: err}
|
||||
}(i, group)
|
||||
}
|
||||
|
||||
go ctp.submitPrecertInformational(cert, expiration)
|
||||
|
||||
// Finally, collect SCTs and/or errors from our results channel.
|
||||
scts := make(core.SCTDERs, 0)
|
||||
errs := make([]string, 0)
|
||||
for i := 0; i < len(ctp.sctLogs); i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// We timed out (the calling function returned and canceled our context)
|
||||
// before getting two SCTs.
|
||||
return nil, berrors.MissingSCTsError("failed to get 2 SCTs before ctx finished: %s", ctx.Err())
|
||||
case res := <-results:
|
||||
if res.err != nil {
|
||||
errs = append(errs, res.err.Error())
|
||||
continue
|
||||
}
|
||||
scts = append(scts, res.sct)
|
||||
if len(scts) >= 2 {
|
||||
return scts, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we made it to the end of that loop, that means we never got two SCTs
|
||||
// to return. Error out instead.
|
||||
return nil, berrors.MissingSCTsError("failed to get 2 SCTs, got error(s): %s", strings.Join(errs, "; "))
|
||||
}
|
||||
|
||||
// submitAllBestEffort submits the given certificate or precertificate to every
|
||||
// log ("informational" for precerts, "final" for certs) configured in the policy.
|
||||
// It neither waits for these submission to complete, nor tracks their success.
|
||||
func (ctp *CTPolicy) submitAllBestEffort(blob core.CertDER, precert bool, expiry time.Time) {
|
||||
logs := ctp.finalLogs
|
||||
if precert {
|
||||
logs = ctp.infoLogs
|
||||
}
|
||||
|
||||
for _, group := range logs {
|
||||
for _, log := range group {
|
||||
if log.StartInclusive.After(expiry) || log.EndExclusive.Equal(expiry) || log.EndExclusive.Before(expiry) {
|
||||
continue
|
||||
}
|
||||
|
||||
go func(log loglist.Log) {
|
||||
_, err := ctp.pub.SubmitToSingleCTWithResult(
|
||||
context.Background(),
|
||||
&pubpb.Request{
|
||||
LogURL: log.Url,
|
||||
LogPublicKey: log.Key,
|
||||
Der: blob,
|
||||
Precert: precert,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
ctp.log.Warningf("ct submission of cert to log %q failed: %s", log.Url, err)
|
||||
}
|
||||
}(log)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// submitPrecertInformational submits precertificates to any configured
|
||||
// "informational" logs, but does not care about success or returned SCTs.
|
||||
func (ctp *CTPolicy) submitPrecertInformational(cert core.CertDER, expiration time.Time) {
|
||||
if len(ctp.sctLogs) != 0 {
|
||||
ctp.submitAllBestEffort(cert, true, expiration)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(#5938): Remove this when it becomes dead code.
|
||||
for _, log := range ctp.informational {
|
||||
go func(l ctconfig.LogDescription) {
|
||||
// We use a context.Background() here instead of a context from the parent
|
||||
// because these submissions are running in a goroutine and we don't want
|
||||
// them to be cancelled when the caller of CTPolicy.GetSCTs returns and
|
||||
// cancels its RPC context.
|
||||
uri, key, err := l.Info(expiration)
|
||||
if err != nil {
|
||||
ctp.log.Errf("unable to get log info: %s", err)
|
||||
return
|
||||
}
|
||||
_, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
|
||||
LogURL: uri,
|
||||
LogPublicKey: key,
|
||||
Der: cert,
|
||||
Precert: true,
|
||||
})
|
||||
if err != nil {
|
||||
ctp.log.Warningf("ct submission to informational log %q failed: %s", uri, err)
|
||||
}
|
||||
}(log)
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitFinalCert submits finalized certificates created from precertificates
|
||||
// to any configured logs
|
||||
func (ctp *CTPolicy) SubmitFinalCert(cert []byte, expiration time.Time) {
|
||||
for _, log := range ctp.finalLogs {
|
||||
// to any configured "final" logs, but does not care about success.
|
||||
func (ctp *CTPolicy) SubmitFinalCert(cert core.CertDER, expiration time.Time) {
|
||||
if len(ctp.sctLogs) != 0 {
|
||||
ctp.submitAllBestEffort(cert, false, expiration)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(#5938): Remove this when it becomes dead code.
|
||||
for _, log := range ctp.final {
|
||||
go func(l ctconfig.LogDescription) {
|
||||
uri, key, err := l.Info(expiration)
|
||||
if err != nil {
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/ctconfig"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
|
@ -31,7 +33,7 @@ func (mp *alwaysFail) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Req
|
|||
return nil, errors.New("BAD")
|
||||
}
|
||||
|
||||
func TestGetSCTs(t *testing.T) {
|
||||
func TestGetGoogleSCTs(t *testing.T) {
|
||||
expired, cancel := context.WithDeadline(context.Background(), time.Now())
|
||||
defer cancel()
|
||||
missingSCTErr := berrors.MissingSCTs
|
||||
|
@ -115,7 +117,7 @@ func TestGetSCTs(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctp := New(tc.mock, tc.groups, nil, blog.NewMock(), metrics.NoopRegisterer)
|
||||
ctp := New(tc.mock, tc.groups, nil, nil, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
ret, err := ctp.GetSCTs(tc.ctx, []byte{0}, time.Time{})
|
||||
if tc.result != nil {
|
||||
test.AssertDeepEquals(t, ret, tc.result)
|
||||
|
@ -131,6 +133,94 @@ func TestGetSCTs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetOperatorSCTs(t *testing.T) {
|
||||
expired, cancel := context.WithDeadline(context.Background(), time.Now())
|
||||
defer cancel()
|
||||
missingSCTErr := berrors.MissingSCTs
|
||||
testCases := []struct {
|
||||
name string
|
||||
mock pubpb.PublisherClient
|
||||
groups loglist.List
|
||||
ctx context.Context
|
||||
result core.SCTDERs
|
||||
expectErr string
|
||||
berrorType *berrors.ErrorType
|
||||
}{
|
||||
{
|
||||
name: "basic success case",
|
||||
mock: &mockPub{},
|
||||
groups: loglist.List{
|
||||
"OperA": {
|
||||
"LogA1": {Url: "UrlA1", Key: "KeyA1"},
|
||||
"LogA2": {Url: "UrlA2", Key: "KeyA2"},
|
||||
},
|
||||
"OperB": {
|
||||
"LogB1": {Url: "UrlB1", Key: "KeyB1"},
|
||||
},
|
||||
"OperC": {
|
||||
"LogC1": {Url: "UrlC1", Key: "KeyC1"},
|
||||
},
|
||||
},
|
||||
ctx: context.Background(),
|
||||
result: core.SCTDERs{[]byte{0}, []byte{0}},
|
||||
},
|
||||
{
|
||||
name: "basic failure case",
|
||||
mock: &alwaysFail{},
|
||||
groups: loglist.List{
|
||||
"OperA": {
|
||||
"LogA1": {Url: "UrlA1", Key: "KeyA1"},
|
||||
"LogA2": {Url: "UrlA2", Key: "KeyA2"},
|
||||
},
|
||||
"OperB": {
|
||||
"LogB1": {Url: "UrlB1", Key: "KeyB1"},
|
||||
},
|
||||
"OperC": {
|
||||
"LogC1": {Url: "UrlC1", Key: "KeyC1"},
|
||||
},
|
||||
},
|
||||
ctx: context.Background(),
|
||||
expectErr: "failed to get 2 SCTs, got error(s):",
|
||||
berrorType: &missingSCTErr,
|
||||
},
|
||||
{
|
||||
name: "parent context timeout failure case",
|
||||
mock: &alwaysFail{},
|
||||
groups: loglist.List{
|
||||
"OperA": {
|
||||
"LogA1": {Url: "UrlA1", Key: "KeyA1"},
|
||||
"LogA2": {Url: "UrlA2", Key: "KeyA2"},
|
||||
},
|
||||
"OperB": {
|
||||
"LogB1": {Url: "UrlB1", Key: "KeyB1"},
|
||||
},
|
||||
"OperC": {
|
||||
"LogC1": {Url: "UrlC1", Key: "KeyC1"},
|
||||
},
|
||||
},
|
||||
ctx: expired,
|
||||
expectErr: "failed to get 2 SCTs before ctx finished",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctp := New(tc.mock, nil, nil, tc.groups, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
ret, err := ctp.GetSCTs(tc.ctx, []byte{0}, time.Time{})
|
||||
if tc.result != nil {
|
||||
test.AssertDeepEquals(t, ret, tc.result)
|
||||
} else if tc.expectErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.expectErr) {
|
||||
t.Errorf("Error %q did not match expected %q", err, tc.expectErr)
|
||||
}
|
||||
if tc.berrorType != nil {
|
||||
test.AssertErrorIs(t, err, *tc.berrorType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type failOne struct {
|
||||
badURL string
|
||||
}
|
||||
|
@ -165,7 +255,7 @@ func TestGetSCTsMetrics(t *testing.T) {
|
|||
{URI: "ghi", Key: "jkl"},
|
||||
},
|
||||
},
|
||||
}, nil, blog.NewMock(), metrics.NoopRegisterer)
|
||||
}, nil, nil, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
|
||||
test.AssertNotError(t, err, "GetSCTs failed")
|
||||
test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"log": "ghi", "group": "a"}, 1)
|
||||
|
@ -182,7 +272,7 @@ func TestGetSCTsFailMetrics(t *testing.T) {
|
|||
{URI: "abc", Key: "def"},
|
||||
},
|
||||
},
|
||||
}, nil, blog.NewMock(), metrics.NoopRegisterer)
|
||||
}, nil, nil, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
|
||||
if err == nil {
|
||||
t.Fatal("GetSCTs should have failed")
|
||||
|
@ -200,7 +290,7 @@ func TestGetSCTsFailMetrics(t *testing.T) {
|
|||
{URI: "abc", Key: "def"},
|
||||
},
|
||||
},
|
||||
}, nil, blog.NewMock(), metrics.NoopRegisterer)
|
||||
}, nil, nil, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
_, err = ctp.GetSCTs(ctx, []byte{0}, time.Time{})
|
||||
if err == nil {
|
||||
t.Fatal("GetSCTs should have failed")
|
||||
|
@ -229,7 +319,7 @@ func TestStagger(t *testing.T) {
|
|||
{URI: "ghi", Key: "jkl"},
|
||||
},
|
||||
},
|
||||
}, nil, blog.NewMock(), metrics.NoopRegisterer)
|
||||
}, nil, nil, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
|
||||
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
|
||||
test.AssertNotError(t, err, "GetSCTs failed")
|
||||
if countingPub.count != 1 {
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
package loglist
|
||||
|
||||
import "sync"
|
||||
|
||||
var lintlist struct {
|
||||
sync.Once
|
||||
list List
|
||||
err error
|
||||
}
|
||||
|
||||
// InitLintList creates and stores a loglist intended for linting (i.e. with
|
||||
// purpose Validation). We have to store this in a global because the zlint
|
||||
// framework doesn't (yet) support configuration, so the e_scts_from_same_operator
|
||||
// lint cannot load a log list on its own. Instead, we have the CA call this
|
||||
// initialization function at startup, and have the lint call the getter below
|
||||
// to get access to the cached list.
|
||||
func InitLintList(path string) error {
|
||||
lintlist.Do(func() {
|
||||
l, err := New(path)
|
||||
if err != nil {
|
||||
lintlist.err = err
|
||||
return
|
||||
}
|
||||
|
||||
l, err = l.forPurpose(Validation)
|
||||
if err != nil {
|
||||
lintlist.err = err
|
||||
return
|
||||
}
|
||||
|
||||
lintlist.list = l
|
||||
})
|
||||
|
||||
return lintlist.err
|
||||
}
|
||||
|
||||
// GetLintList returns the log list initialized by InitLintList. This must
|
||||
// only be called after InitLintList has been called on the same (or parent)
|
||||
// goroutine.
|
||||
func GetLintList() List {
|
||||
return lintlist.list
|
||||
}
|
|
@ -0,0 +1,318 @@
|
|||
package loglist
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist/schema"
|
||||
)
|
||||
|
||||
// purpose is the use to which a log list will be put. This type exists to allow
|
||||
// the following consts to be declared for use by LogList consumers.
|
||||
type purpose string
|
||||
|
||||
// Issuance means that the new log list should only contain Usable logs, which
|
||||
// can issue SCTs that will be trusted by all Chrome clients.
|
||||
const Issuance purpose = "scts"
|
||||
|
||||
// Informational means that the new log list can contain Usable, Qualified, and
|
||||
// Pending logs, which will all accept submissions but not necessarily be
|
||||
// trusted by Chrome clients.
|
||||
const Informational purpose = "info"
|
||||
|
||||
// Validation means that the new log list should only contain Usable and
|
||||
// Readonly logs, whose SCTs will be trusted by all Chrome clients but aren't
|
||||
// necessarily still issuing SCTs today.
|
||||
const Validation purpose = "lint"
|
||||
|
||||
// List represents a list of logs, grouped by their operator, arranged by
|
||||
// the "v3" schema as published by Chrome:
|
||||
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
|
||||
// It exports no fields so that consumers don't have to deal with the terrible
|
||||
// autogenerated names of the structs it wraps.
|
||||
type List map[string]OperatorGroup
|
||||
|
||||
// OperatorGroup represents a group of logs which are all run by the same
|
||||
// operator organization. It provides constant-time lookup of logs within the
|
||||
// group by their unique ID.
|
||||
type OperatorGroup map[string]Log
|
||||
|
||||
// Log represents a single log run by an operator. It contains just the info
|
||||
// necessary to contact a log, and to determine whether that log will accept
|
||||
// the submission of a certificate with a given expiration.
|
||||
type Log struct {
|
||||
Name string
|
||||
Url string
|
||||
Key string
|
||||
StartInclusive time.Time
|
||||
EndExclusive time.Time
|
||||
State state
|
||||
}
|
||||
|
||||
// State is an enum representing the various states a CT log can be in. Only
|
||||
// pending, qualified, and usable logs can be submitted to. Only usable and
|
||||
// readonly logs are trusted by Chrome.
|
||||
type state int
|
||||
|
||||
const (
|
||||
unknown state = iota
|
||||
pending
|
||||
qualified
|
||||
usable
|
||||
readonly
|
||||
retired
|
||||
rejected
|
||||
)
|
||||
|
||||
func stateFromState(s *schema.LogListSchemaJsonOperatorsElemLogsElemState) state {
|
||||
if s.Rejected != nil {
|
||||
return rejected
|
||||
} else if s.Retired != nil {
|
||||
return retired
|
||||
} else if s.Readonly != nil {
|
||||
return readonly
|
||||
} else if s.Pending != nil {
|
||||
return pending
|
||||
} else if s.Qualified != nil {
|
||||
return qualified
|
||||
} else if s.Usable != nil {
|
||||
return usable
|
||||
}
|
||||
return unknown
|
||||
}
|
||||
|
||||
// usableForPurpose returns true if the log state is acceptable for the given
|
||||
// log list purpose, and false otherwise.
|
||||
func usableForPurpose(s state, p purpose) bool {
|
||||
switch p {
|
||||
case Issuance:
|
||||
return s == usable
|
||||
case Informational:
|
||||
return s == usable || s == qualified || s == pending
|
||||
case Validation:
|
||||
return s == usable || s == readonly
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// New returns a LogList of all operators and all logs parsed from the file at
|
||||
// the given path. The file must conform to the JSON Schema published by Google:
|
||||
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
|
||||
func New(path string) (List, error) {
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read CT Log List: %w", err)
|
||||
}
|
||||
|
||||
return newHelper(file)
|
||||
}
|
||||
|
||||
// newHelper is a helper to allow the core logic of `New()` to be unit tested
|
||||
// without having to write files to disk.
|
||||
func newHelper(file []byte) (List, error) {
|
||||
var parsed schema.LogListSchemaJson
|
||||
err := json.Unmarshal(file, &parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse CT Log List: %w", err)
|
||||
}
|
||||
|
||||
result := make(List)
|
||||
for _, op := range parsed.Operators {
|
||||
group := make(OperatorGroup)
|
||||
for _, log := range op.Logs {
|
||||
var name string
|
||||
if log.Description != nil {
|
||||
name = *log.Description
|
||||
}
|
||||
|
||||
info := Log{
|
||||
Name: name,
|
||||
Url: log.Url,
|
||||
Key: log.Key,
|
||||
State: stateFromState(log.State),
|
||||
}
|
||||
|
||||
if log.TemporalInterval != nil {
|
||||
startInclusive, err := time.Parse(time.RFC3339, log.TemporalInterval.StartInclusive)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log %q start timestamp: %w", log.Url, err)
|
||||
}
|
||||
|
||||
endExclusive, err := time.Parse(time.RFC3339, log.TemporalInterval.EndExclusive)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log %q end timestamp: %w", log.Url, err)
|
||||
}
|
||||
|
||||
info.StartInclusive = startInclusive
|
||||
info.EndExclusive = endExclusive
|
||||
}
|
||||
|
||||
group[log.LogId] = info
|
||||
}
|
||||
result[op.Name] = group
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SubsetForPurpose returns a new log list containing only those logs whose
|
||||
// names match those in the given list, and whose state is acceptable for the
|
||||
// given purpose. It returns an error if any of the given names are not found
|
||||
// in the starting list, or if the resulting list is too small to satisfy the
|
||||
// Chrome "two operators" policy.
|
||||
func (ll List) SubsetForPurpose(names []string, p purpose) (List, error) {
|
||||
sub, err := ll.subset(names)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := sub.forPurpose(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// subset returns a new log list containing only those logs whose names match
|
||||
// those in the given list. It returns an error if any of the given names are
|
||||
// not found.
|
||||
func (ll List) subset(names []string) (List, error) {
|
||||
remaining := make(map[string]struct{}, len(names))
|
||||
for _, name := range names {
|
||||
remaining[name] = struct{}{}
|
||||
}
|
||||
|
||||
newList := make(List)
|
||||
for operator, group := range ll {
|
||||
newGroup := make(OperatorGroup)
|
||||
for id, log := range group {
|
||||
if _, found := remaining[log.Name]; !found {
|
||||
continue
|
||||
}
|
||||
|
||||
newLog := Log{
|
||||
Name: log.Name,
|
||||
Url: log.Url,
|
||||
Key: log.Key,
|
||||
State: log.State,
|
||||
StartInclusive: log.StartInclusive,
|
||||
EndExclusive: log.EndExclusive,
|
||||
}
|
||||
|
||||
newGroup[id] = newLog
|
||||
delete(remaining, newLog.Name)
|
||||
}
|
||||
if len(newGroup) > 0 {
|
||||
newList[operator] = newGroup
|
||||
}
|
||||
}
|
||||
|
||||
if len(remaining) > 0 {
|
||||
missed := make([]string, len(remaining))
|
||||
for name := range remaining {
|
||||
missed = append(missed, fmt.Sprintf("%q", name))
|
||||
}
|
||||
return nil, fmt.Errorf("failed to find logs matching name(s): %s", strings.Join(missed, ", "))
|
||||
}
|
||||
|
||||
return newList, nil
|
||||
}
|
||||
|
||||
// forPurpose returns a new log list containing only those logs whose states are
|
||||
// acceptable for the given purpose. It returns an error if the purpose is
|
||||
// Issuance or Validation and the set of remaining logs is too small to satisfy
|
||||
// the Google "two operators" log policy.
|
||||
func (ll List) forPurpose(p purpose) (List, error) {
|
||||
newList := make(List)
|
||||
for operator, group := range ll {
|
||||
newGroup := make(OperatorGroup)
|
||||
for id, log := range group {
|
||||
if !usableForPurpose(log.State, p) {
|
||||
continue
|
||||
}
|
||||
|
||||
newLog := Log{
|
||||
Name: log.Name,
|
||||
Url: log.Url,
|
||||
Key: log.Key,
|
||||
State: log.State,
|
||||
StartInclusive: log.StartInclusive,
|
||||
EndExclusive: log.EndExclusive,
|
||||
}
|
||||
|
||||
newGroup[id] = newLog
|
||||
}
|
||||
if len(newGroup) > 0 {
|
||||
newList[operator] = newGroup
|
||||
}
|
||||
}
|
||||
|
||||
if len(newList) < 2 && p != Informational {
|
||||
return nil, errors.New("log list does not have enough groups to satisfy Chrome policy")
|
||||
}
|
||||
|
||||
return newList, nil
|
||||
}
|
||||
|
||||
// OperatorForLogID returns the Name of the Group containing the Log with the
|
||||
// given ID, or an error if no such log/group can be found.
|
||||
func (ll List) OperatorForLogID(logID string) (string, error) {
|
||||
for op, group := range ll {
|
||||
if _, found := group[logID]; found {
|
||||
return op, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no log with ID %q found", logID)
|
||||
}
|
||||
|
||||
// Permute returns the list of operator group names in a randomized order.
|
||||
func (ll List) Permute() []string {
|
||||
keys := make([]string, 0, len(ll))
|
||||
for k := range ll {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
result := make([]string, len(ll))
|
||||
for i, j := range rand.Perm(len(ll)) {
|
||||
result[i] = keys[j]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// PickOne returns the URI and Public Key of a single randomly-selected log
|
||||
// which is run by the given operator and whose temporal interval includes the
|
||||
// given expiry time. It returns an error if no such log can be found.
|
||||
func (ll List) PickOne(operator string, expiry time.Time) (string, string, error) {
|
||||
group, ok := ll[operator]
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("no log operator group named %q", operator)
|
||||
}
|
||||
|
||||
candidates := make([]Log, 0)
|
||||
for _, log := range group {
|
||||
if log.StartInclusive.IsZero() || log.EndExclusive.IsZero() {
|
||||
candidates = append(candidates, log)
|
||||
continue
|
||||
}
|
||||
|
||||
if (log.StartInclusive.Equal(expiry) || log.StartInclusive.Before(expiry)) && log.EndExclusive.After(expiry) {
|
||||
candidates = append(candidates, log)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure rand.Intn below won't panic.
|
||||
if len(candidates) < 1 {
|
||||
return "", "", fmt.Errorf("no log found for group %q and expiry %s", operator, expiry)
|
||||
}
|
||||
|
||||
log := candidates[rand.Intn(len(candidates))]
|
||||
return log.Url, log.Key, nil
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
package loglist
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestSubset(t *testing.T) {
|
||||
input := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1"},
|
||||
"ID A2": Log{Name: "Log A2"},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1"},
|
||||
"ID B2": Log{Name: "Log B2"},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C1": Log{Name: "Log C1"},
|
||||
"ID C2": Log{Name: "Log C2"},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := input.subset(nil)
|
||||
test.AssertNotError(t, err, "nil names should not error")
|
||||
test.AssertEquals(t, len(actual), 0)
|
||||
|
||||
actual, err = input.subset([]string{})
|
||||
test.AssertNotError(t, err, "empty names should not error")
|
||||
test.AssertEquals(t, len(actual), 0)
|
||||
|
||||
actual, err = input.subset([]string{"Other Log"})
|
||||
test.AssertError(t, err, "wrong name should result in error")
|
||||
test.AssertEquals(t, len(actual), 0)
|
||||
|
||||
expected := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1"},
|
||||
"ID A2": Log{Name: "Log A2"},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1"},
|
||||
},
|
||||
}
|
||||
actual, err = input.subset([]string{"Log B1", "Log A1", "Log A2"})
|
||||
test.AssertNotError(t, err, "normal usage should not error")
|
||||
test.AssertDeepEquals(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestForPurpose(t *testing.T) {
|
||||
input := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
"ID A2": Log{Name: "Log A2", State: rejected},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: usable},
|
||||
"ID B2": Log{Name: "Log B2", State: retired},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C1": Log{Name: "Log C1", State: pending},
|
||||
"ID C2": Log{Name: "Log C2", State: readonly},
|
||||
},
|
||||
}
|
||||
expected := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: usable},
|
||||
},
|
||||
}
|
||||
actual, err := input.forPurpose(Issuance)
|
||||
test.AssertNotError(t, err, "should have two acceptable logs")
|
||||
test.AssertDeepEquals(t, actual, expected)
|
||||
|
||||
input = List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
"ID A2": Log{Name: "Log A2", State: rejected},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: qualified},
|
||||
"ID B2": Log{Name: "Log B2", State: retired},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C1": Log{Name: "Log C1", State: pending},
|
||||
"ID C2": Log{Name: "Log C2", State: readonly},
|
||||
},
|
||||
}
|
||||
_, err = input.forPurpose(Issuance)
|
||||
test.AssertError(t, err, "should only have one acceptable log")
|
||||
|
||||
expected = List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C2": Log{Name: "Log C2", State: readonly},
|
||||
},
|
||||
}
|
||||
actual, err = input.forPurpose(Validation)
|
||||
test.AssertNotError(t, err, "should have two acceptable logs")
|
||||
test.AssertDeepEquals(t, actual, expected)
|
||||
|
||||
expected = List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: qualified},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C1": Log{Name: "Log C1", State: pending},
|
||||
},
|
||||
}
|
||||
actual, err = input.forPurpose(Informational)
|
||||
test.AssertNotError(t, err, "should have three acceptable logs")
|
||||
test.AssertDeepEquals(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestOperatorForLogID(t *testing.T) {
|
||||
input := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: qualified},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := input.OperatorForLogID("ID B1")
|
||||
test.AssertNotError(t, err, "should have found log")
|
||||
test.AssertEquals(t, actual, "Operator B")
|
||||
|
||||
_, err = input.OperatorForLogID("Other ID")
|
||||
test.AssertError(t, err, "should not have found log")
|
||||
}
|
||||
|
||||
func TestPermute(t *testing.T) {
|
||||
input := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", State: usable},
|
||||
"ID A2": Log{Name: "Log A2", State: rejected},
|
||||
},
|
||||
"Operator B": {
|
||||
"ID B1": Log{Name: "Log B1", State: qualified},
|
||||
"ID B2": Log{Name: "Log B2", State: retired},
|
||||
},
|
||||
"Operator C": {
|
||||
"ID C1": Log{Name: "Log C1", State: pending},
|
||||
"ID C2": Log{Name: "Log C2", State: readonly},
|
||||
},
|
||||
}
|
||||
|
||||
actual := input.Permute()
|
||||
test.AssertEquals(t, len(actual), 3)
|
||||
test.AssertSliceContains(t, actual, "Operator A")
|
||||
test.AssertSliceContains(t, actual, "Operator B")
|
||||
test.AssertSliceContains(t, actual, "Operator C")
|
||||
}
|
||||
|
||||
func TestPickOne(t *testing.T) {
|
||||
date0 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
date1 := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
date2 := time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
input := List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1"},
|
||||
},
|
||||
}
|
||||
_, _, err := input.PickOne("Operator B", date0)
|
||||
test.AssertError(t, err, "should have failed to find operator")
|
||||
|
||||
input = List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", StartInclusive: date0, EndExclusive: date1},
|
||||
},
|
||||
}
|
||||
_, _, err = input.PickOne("Operator A", date2)
|
||||
test.AssertError(t, err, "should have failed to find log")
|
||||
_, _, err = input.PickOne("Operator A", date1)
|
||||
test.AssertError(t, err, "should have failed to find log")
|
||||
_, _, err = input.PickOne("Operator A", date0)
|
||||
test.AssertNotError(t, err, "should have found a log")
|
||||
_, _, err = input.PickOne("Operator A", date0.Add(time.Hour))
|
||||
test.AssertNotError(t, err, "should have found a log")
|
||||
|
||||
input = List{
|
||||
"Operator A": {
|
||||
"ID A1": Log{Name: "Log A1", StartInclusive: date0, EndExclusive: date1, Key: "KA1", Url: "UA1"},
|
||||
"ID A2": Log{Name: "Log A2", StartInclusive: date1, EndExclusive: date2, Key: "KA2", Url: "UA2"},
|
||||
"ID B1": Log{Name: "Log B1", StartInclusive: date0, EndExclusive: date1, Key: "KB1", Url: "UB1"},
|
||||
"ID B2": Log{Name: "Log B2", StartInclusive: date1, EndExclusive: date2, Key: "KB2", Url: "UB2"},
|
||||
},
|
||||
}
|
||||
url, key, err := input.PickOne("Operator A", date0.Add(time.Hour))
|
||||
test.AssertNotError(t, err, "should have found a log")
|
||||
test.AssertSliceContains(t, []string{"UA1", "UB1"}, url)
|
||||
test.AssertSliceContains(t, []string{"KA1", "KB1"}, key)
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
{
|
||||
"type": "object",
|
||||
"id": "https://www.gstatic.com/ct/log_list/v3/log_list_schema.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema",
|
||||
"required": [
|
||||
"operators"
|
||||
],
|
||||
"definitions": {
|
||||
"state": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"timestamp": {
|
||||
"description": "The time at which the log entered this state.",
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"examples": [
|
||||
"2018-01-01T00:00:00Z"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"timestamp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "string",
|
||||
"title": "Version of this log list",
|
||||
"description": "The version will change whenever a change is made to any part of this log list.",
|
||||
"examples": [
|
||||
"1",
|
||||
"1.0.0",
|
||||
"1.0.0b"
|
||||
]
|
||||
},
|
||||
"log_list_timestamp": {
|
||||
"description": "The time at which this version of the log list was published.",
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"examples": [
|
||||
"2018-01-01T00:00:00Z"
|
||||
]
|
||||
},
|
||||
"operators": {
|
||||
"title": "CT log operators",
|
||||
"description": "People/organizations that run Certificate Transparency logs.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"email",
|
||||
"logs"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"title": "Name of this log operator",
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"title": "CT log operator email addresses",
|
||||
"description": "The log operator can be contacted using any of these email addresses.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "email"
|
||||
}
|
||||
},
|
||||
"logs": {
|
||||
"description": "Details of Certificate Transparency logs run by this operator.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"key",
|
||||
"log_id",
|
||||
"mmd",
|
||||
"url"
|
||||
],
|
||||
"properties": {
|
||||
"description": {
|
||||
"title": "Description of the CT log",
|
||||
"description": "A human-readable description that can be used to identify this log.",
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"title": "The public key of the CT log",
|
||||
"description": "The log's public key as a DER-encoded ASN.1 SubjectPublicKeyInfo structure, then encoded as base64 (https://tools.ietf.org/html/rfc5280#section-4.1.2.7).",
|
||||
"type": "string"
|
||||
},
|
||||
"log_id": {
|
||||
"title": "The SHA-256 hash of the CT log's public key, base64-encoded",
|
||||
"description": "This is the LogID found in SCTs issued by this log (https://tools.ietf.org/html/rfc6962#section-3.2).",
|
||||
"type": "string",
|
||||
"minLength": 44,
|
||||
"maxLength": 44
|
||||
},
|
||||
"mmd": {
|
||||
"title": "The Maximum Merge Delay, in seconds",
|
||||
"description": "The CT log should not take longer than this to incorporate a certificate (https://tools.ietf.org/html/rfc6962#section-3).",
|
||||
"type": "number",
|
||||
"minimum": 1,
|
||||
"default": 86400
|
||||
},
|
||||
"url": {
|
||||
"title": "The base URL of the CT log's HTTP API",
|
||||
"description": "The API endpoints are defined in https://tools.ietf.org/html/rfc6962#section-4.",
|
||||
"type": "string",
|
||||
"format": "uri",
|
||||
"examples": [
|
||||
"https://ct.googleapis.com/pilot/"
|
||||
]
|
||||
},
|
||||
"dns": {
|
||||
"title": "The domain name of the CT log's DNS API",
|
||||
"description": "The API endpoints are defined in https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md.",
|
||||
"type": "string",
|
||||
"format": "hostname",
|
||||
"examples": [
|
||||
"pilot.ct.googleapis.com"
|
||||
]
|
||||
},
|
||||
"temporal_interval": {
|
||||
"description": "The log will only accept certificates that expire (have a NotAfter date) between these dates.",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"start_inclusive",
|
||||
"end_exclusive"
|
||||
],
|
||||
"properties": {
|
||||
"start_inclusive": {
|
||||
"description": "All certificates must expire on this date or later.",
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"examples": [
|
||||
"2018-01-01T00:00:00Z"
|
||||
]
|
||||
},
|
||||
"end_exclusive": {
|
||||
"description": "All certificates must expire before this date.",
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"examples": [
|
||||
"2019-01-01T00:00:00Z"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"log_type": {
|
||||
"description": "The purpose of this log, e.g. test.",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"prod",
|
||||
"test"
|
||||
]
|
||||
},
|
||||
"state": {
|
||||
"title": "The state of the log from the log list distributor's perspective.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pending": {
|
||||
"$ref": "#/definitions/state"
|
||||
},
|
||||
"qualified": {
|
||||
"$ref": "#/definitions/state"
|
||||
},
|
||||
"usable": {
|
||||
"$ref": "#/definitions/state"
|
||||
},
|
||||
"readonly": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/state"
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"final_tree_head"
|
||||
],
|
||||
"properties": {
|
||||
"final_tree_head": {
|
||||
"description": "The tree head (tree size and root hash) at which the log was made read-only.",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"tree_size",
|
||||
"sha256_root_hash"
|
||||
],
|
||||
"properties": {
|
||||
"tree_size": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"sha256_root_hash": {
|
||||
"type": "string",
|
||||
"minLength": 44,
|
||||
"maxLength": 44
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"retired": {
|
||||
"$ref": "#/definitions/state"
|
||||
},
|
||||
"rejected": {
|
||||
"$ref": "#/definitions/state"
|
||||
}
|
||||
},
|
||||
"oneOf": [
|
||||
{
|
||||
"required": [
|
||||
"pending"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"qualified"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"usable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"readonly"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"retired"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"rejected"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"previous_operators": {
|
||||
"title": "Previous operators that ran this log in the past, if any.",
|
||||
"description": "If the log has changed operators, this will contain a list of the previous operators, along with the timestamp when they stopped operating the log.",
|
||||
"type": "array",
|
||||
"uniqueItems": true,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"end_time"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"title": "Name of the log operator",
|
||||
"type": "string"
|
||||
},
|
||||
"end_time": {
|
||||
"description": "The time at which this operator stopped operating this log.",
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"examples": [
|
||||
"2018-01-01T00:00:00Z"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,269 @@
|
|||
// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
import "fmt"
|
||||
import "encoding/json"
|
||||
import "reflect"
|
||||
|
||||
type LogListSchemaJson struct {
|
||||
// The time at which this version of the log list was published.
|
||||
LogListTimestamp *string `json:"log_list_timestamp,omitempty"`
|
||||
|
||||
// People/organizations that run Certificate Transparency logs.
|
||||
Operators []LogListSchemaJsonOperatorsElem `json:"operators"`
|
||||
|
||||
// The version will change whenever a change is made to any part of this log list.
|
||||
Version *string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type LogListSchemaJsonOperatorsElem struct {
|
||||
// The log operator can be contacted using any of these email addresses.
|
||||
Email []string `json:"email"`
|
||||
|
||||
// Details of Certificate Transparency logs run by this operator.
|
||||
Logs []LogListSchemaJsonOperatorsElemLogsElem `json:"logs"`
|
||||
|
||||
// Name corresponds to the JSON schema field "name".
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type LogListSchemaJsonOperatorsElemLogsElem struct {
|
||||
// A human-readable description that can be used to identify this log.
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// The API endpoints are defined in
|
||||
// https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md.
|
||||
Dns *string `json:"dns,omitempty"`
|
||||
|
||||
// The log's public key as a DER-encoded ASN.1 SubjectPublicKeyInfo structure,
|
||||
// then encoded as base64 (https://tools.ietf.org/html/rfc5280#section-4.1.2.7).
|
||||
Key string `json:"key"`
|
||||
|
||||
// This is the LogID found in SCTs issued by this log
|
||||
// (https://tools.ietf.org/html/rfc6962#section-3.2).
|
||||
LogId string `json:"log_id"`
|
||||
|
||||
// The purpose of this log, e.g. test.
|
||||
LogType *LogListSchemaJsonOperatorsElemLogsElemLogType `json:"log_type,omitempty"`
|
||||
|
||||
// The CT log should not take longer than this to incorporate a certificate
|
||||
// (https://tools.ietf.org/html/rfc6962#section-3).
|
||||
Mmd float64 `json:"mmd"`
|
||||
|
||||
// If the log has changed operators, this will contain a list of the previous
|
||||
// operators, along with the timestamp when they stopped operating the log.
|
||||
PreviousOperators []LogListSchemaJsonOperatorsElemLogsElemPreviousOperatorsElem `json:"previous_operators,omitempty"`
|
||||
|
||||
// State corresponds to the JSON schema field "state".
|
||||
State *LogListSchemaJsonOperatorsElemLogsElemState `json:"state,omitempty"`
|
||||
|
||||
// The log will only accept certificates that expire (have a NotAfter date)
|
||||
// between these dates.
|
||||
TemporalInterval *LogListSchemaJsonOperatorsElemLogsElemTemporalInterval `json:"temporal_interval,omitempty"`
|
||||
|
||||
// The API endpoints are defined in https://tools.ietf.org/html/rfc6962#section-4.
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type LogListSchemaJsonOperatorsElemLogsElemLogType string
|
||||
|
||||
const LogListSchemaJsonOperatorsElemLogsElemLogTypeProd LogListSchemaJsonOperatorsElemLogsElemLogType = "prod"
|
||||
const LogListSchemaJsonOperatorsElemLogsElemLogTypeTest LogListSchemaJsonOperatorsElemLogsElemLogType = "test"
|
||||
|
||||
type LogListSchemaJsonOperatorsElemLogsElemPreviousOperatorsElem struct {
|
||||
// The time at which this operator stopped operating this log.
|
||||
EndTime string `json:"end_time"`
|
||||
|
||||
// Name corresponds to the JSON schema field "name".
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type LogListSchemaJsonOperatorsElemLogsElemState struct {
|
||||
// Pending corresponds to the JSON schema field "pending".
|
||||
Pending *State `json:"pending,omitempty"`
|
||||
|
||||
// Qualified corresponds to the JSON schema field "qualified".
|
||||
Qualified *State `json:"qualified,omitempty"`
|
||||
|
||||
// Readonly corresponds to the JSON schema field "readonly".
|
||||
Readonly interface{} `json:"readonly,omitempty"`
|
||||
|
||||
// Rejected corresponds to the JSON schema field "rejected".
|
||||
Rejected *State `json:"rejected,omitempty"`
|
||||
|
||||
// Retired corresponds to the JSON schema field "retired".
|
||||
Retired *State `json:"retired,omitempty"`
|
||||
|
||||
// Usable corresponds to the JSON schema field "usable".
|
||||
Usable *State `json:"usable,omitempty"`
|
||||
}
|
||||
|
||||
// The log will only accept certificates that expire (have a NotAfter date) between
|
||||
// these dates.
|
||||
type LogListSchemaJsonOperatorsElemLogsElemTemporalInterval struct {
|
||||
// All certificates must expire before this date.
|
||||
EndExclusive string `json:"end_exclusive"`
|
||||
|
||||
// All certificates must expire on this date or later.
|
||||
StartInclusive string `json:"start_inclusive"`
|
||||
}
|
||||
|
||||
type State struct {
|
||||
// The time at which the log entered this state.
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJsonOperatorsElemLogsElemPreviousOperatorsElem) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["end_time"]; !ok || v == nil {
|
||||
return fmt.Errorf("field end_time: required")
|
||||
}
|
||||
if v, ok := raw["name"]; !ok || v == nil {
|
||||
return fmt.Errorf("field name: required")
|
||||
}
|
||||
type Plain LogListSchemaJsonOperatorsElemLogsElemPreviousOperatorsElem
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
*j = LogListSchemaJsonOperatorsElemLogsElemPreviousOperatorsElem(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJsonOperatorsElemLogsElemTemporalInterval) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["end_exclusive"]; !ok || v == nil {
|
||||
return fmt.Errorf("field end_exclusive: required")
|
||||
}
|
||||
if v, ok := raw["start_inclusive"]; !ok || v == nil {
|
||||
return fmt.Errorf("field start_inclusive: required")
|
||||
}
|
||||
type Plain LogListSchemaJsonOperatorsElemLogsElemTemporalInterval
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
*j = LogListSchemaJsonOperatorsElemLogsElemTemporalInterval(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJsonOperatorsElemLogsElemLogType) UnmarshalJSON(b []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
var ok bool
|
||||
for _, expected := range enumValues_LogListSchemaJsonOperatorsElemLogsElemLogType {
|
||||
if reflect.DeepEqual(v, expected) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_LogListSchemaJsonOperatorsElemLogsElemLogType, v)
|
||||
}
|
||||
*j = LogListSchemaJsonOperatorsElemLogsElemLogType(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJsonOperatorsElemLogsElem) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["key"]; !ok || v == nil {
|
||||
return fmt.Errorf("field key: required")
|
||||
}
|
||||
if v, ok := raw["log_id"]; !ok || v == nil {
|
||||
return fmt.Errorf("field log_id: required")
|
||||
}
|
||||
if v, ok := raw["url"]; !ok || v == nil {
|
||||
return fmt.Errorf("field url: required")
|
||||
}
|
||||
type Plain LogListSchemaJsonOperatorsElemLogsElem
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["mmd"]; !ok || v == nil {
|
||||
plain.Mmd = 86400
|
||||
}
|
||||
*j = LogListSchemaJsonOperatorsElemLogsElem(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *State) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["timestamp"]; !ok || v == nil {
|
||||
return fmt.Errorf("field timestamp: required")
|
||||
}
|
||||
type Plain State
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
*j = State(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJsonOperatorsElem) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["email"]; !ok || v == nil {
|
||||
return fmt.Errorf("field email: required")
|
||||
}
|
||||
if v, ok := raw["logs"]; !ok || v == nil {
|
||||
return fmt.Errorf("field logs: required")
|
||||
}
|
||||
if v, ok := raw["name"]; !ok || v == nil {
|
||||
return fmt.Errorf("field name: required")
|
||||
}
|
||||
type Plain LogListSchemaJsonOperatorsElem
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
*j = LogListSchemaJsonOperatorsElem(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
var enumValues_LogListSchemaJsonOperatorsElemLogsElemLogType = []interface{}{
|
||||
"prod",
|
||||
"test",
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *LogListSchemaJson) UnmarshalJSON(b []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["operators"]; !ok || v == nil {
|
||||
return fmt.Errorf("field operators: required")
|
||||
}
|
||||
type Plain LogListSchemaJson
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(b, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
*j = LogListSchemaJson(plain)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# This script updates the log list JSON Schema and the Go structs generated
|
||||
# from that schema.
|
||||
|
||||
# It is not intended to be run on a regular basis; we do not expect the JSON
|
||||
# Schema to change. It is retained here for historical purposes, so that if/when
|
||||
# the schema does change, or the ecosystem moves to a v4 version of the schema,
|
||||
# regenerating these files will be quick and easy.
|
||||
|
||||
# This script expects github.com/atombender/go-jsonschema to be installed:
|
||||
if ! command -v gojsonschema
|
||||
then
|
||||
echo "Install gojsonschema, then re-run this script:"
|
||||
echo "go install github.com/atombender/go-jsonschema/cmd/gojsonschema@latest"
|
||||
fi
|
||||
|
||||
this_dir=$(dirname $(readlink -f "${0}"))
|
||||
|
||||
curl https://www.gstatic.com/ct/log_list/v3/log_list_schema.json >| "${this_dir}"/log_list_schema.json
|
||||
|
||||
gojsonschema -p schema "${this_dir}"/log_list_schema.json >| "${this_dir}"/schema.go
|
|
@ -11,6 +11,7 @@ import (
|
|||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
|
@ -23,6 +24,7 @@ import (
|
|||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/linter"
|
||||
"github.com/letsencrypt/boulder/policyasn1"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
|
@ -539,7 +541,11 @@ func TestIssue(t *testing.T) {
|
|||
linter, err := linter.New(
|
||||
issuerCert.Certificate,
|
||||
issuerSigner,
|
||||
[]string{"w_ct_sct_policy_count_unsatisfied", "n_subject_common_name_included"},
|
||||
[]string{
|
||||
"w_ct_sct_policy_count_unsatisfied",
|
||||
"e_scts_from_same_operator",
|
||||
"n_subject_common_name_included",
|
||||
},
|
||||
)
|
||||
test.AssertNotError(t, err, "failed to create linter")
|
||||
signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc)
|
||||
|
@ -575,7 +581,10 @@ func TestIssueRSA(t *testing.T) {
|
|||
linter, err := linter.New(
|
||||
issuerCert.Certificate,
|
||||
issuerSigner,
|
||||
[]string{"w_ct_sct_policy_count_unsatisfied"},
|
||||
[]string{
|
||||
"w_ct_sct_policy_count_unsatisfied",
|
||||
"e_scts_from_same_operator",
|
||||
},
|
||||
)
|
||||
test.AssertNotError(t, err, "failed to create linter")
|
||||
signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc)
|
||||
|
@ -606,7 +615,10 @@ func TestIssueCTPoison(t *testing.T) {
|
|||
linter, err := linter.New(
|
||||
issuerCert.Certificate,
|
||||
issuerSigner,
|
||||
[]string{"w_ct_sct_policy_count_unsatisfied"},
|
||||
[]string{
|
||||
"w_ct_sct_policy_count_unsatisfied",
|
||||
"e_scts_from_same_operator",
|
||||
},
|
||||
)
|
||||
test.AssertNotError(t, err, "failed to create linter")
|
||||
signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc)
|
||||
|
@ -635,22 +647,35 @@ func TestIssueCTPoison(t *testing.T) {
|
|||
func TestIssueSCTList(t *testing.T) {
|
||||
fc := clock.NewFake()
|
||||
fc.Set(time.Now())
|
||||
err := loglist.InitLintList("../test/ct-test-srv/log_list.json")
|
||||
test.AssertNotError(t, err, "failed to load log list")
|
||||
linter, err := linter.New(
|
||||
issuerCert.Certificate,
|
||||
issuerSigner,
|
||||
[]string{"w_ct_sct_policy_count_unsatisfied"},
|
||||
[]string{},
|
||||
)
|
||||
test.AssertNotError(t, err, "failed to create linter")
|
||||
signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc)
|
||||
test.AssertNotError(t, err, "NewIssuer failed")
|
||||
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
test.AssertNotError(t, err, "failed to generate test key")
|
||||
logID1, err := base64.StdEncoding.DecodeString("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM=")
|
||||
test.AssertNotError(t, err, "failed to decode ct log ID")
|
||||
logID2, err := base64.StdEncoding.DecodeString("UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4=")
|
||||
test.AssertNotError(t, err, "failed to decode ct log ID")
|
||||
certBytes, err := signer.Issue(&IssuanceRequest{
|
||||
PublicKey: pk.Public(),
|
||||
Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
DNSNames: []string{"example.com"},
|
||||
SCTList: []ct.SignedCertificateTimestamp{
|
||||
{},
|
||||
{
|
||||
SCTVersion: ct.V1,
|
||||
LogID: ct.LogID{KeyID: *(*[32]byte)(logID1)},
|
||||
},
|
||||
{
|
||||
SCTVersion: ct.V1,
|
||||
LogID: ct.LogID{KeyID: *(*[32]byte)(logID2)},
|
||||
},
|
||||
},
|
||||
NotBefore: fc.Now(),
|
||||
NotAfter: fc.Now().Add(time.Hour - time.Second),
|
||||
|
@ -664,8 +689,15 @@ func TestIssueSCTList(t *testing.T) {
|
|||
test.AssertDeepEquals(t, cert.PublicKey, pk.Public())
|
||||
test.AssertEquals(t, len(cert.Extensions), 9) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies, SCT list
|
||||
test.AssertDeepEquals(t, cert.Extensions[8], pkix.Extension{
|
||||
Id: sctListOID,
|
||||
Value: []byte{4, 51, 0, 49, 0, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Id: sctListOID,
|
||||
Value: []byte{
|
||||
4, 100, 0, 98, 0, 47, 0, 56, 152, 140, 148, 208, 53, 152, 195, 147, 45,
|
||||
223, 233, 35, 186, 186, 242, 122, 66, 14, 185, 108, 65, 225, 90, 168, 12,
|
||||
26, 176, 252, 4, 189, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 47,
|
||||
0, 82, 212, 232, 202, 113, 132, 200, 201, 36, 92, 51, 16, 122, 47, 11,
|
||||
151, 158, 40, 51, 5, 135, 35, 66, 34, 120, 49, 10, 179, 93, 191, 77, 222,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -675,7 +707,10 @@ func TestIssueMustStaple(t *testing.T) {
|
|||
linter, err := linter.New(
|
||||
issuerCert.Certificate,
|
||||
issuerSigner,
|
||||
[]string{"w_ct_sct_policy_count_unsatisfied"},
|
||||
[]string{
|
||||
"w_ct_sct_policy_count_unsatisfied",
|
||||
"e_scts_from_same_operator",
|
||||
},
|
||||
)
|
||||
test.AssertNotError(t, err, "failed to create linter")
|
||||
signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc)
|
||||
|
@ -718,7 +753,7 @@ func TestIssueBadLint(t *testing.T) {
|
|||
NotAfter: fc.Now().Add(time.Hour - time.Second),
|
||||
})
|
||||
test.AssertError(t, err, "Issue didn't fail")
|
||||
test.AssertEquals(t, err.Error(), "tbsCertificate linting failed: failed lints: w_ct_sct_policy_count_unsatisfied")
|
||||
test.AssertContains(t, err.Error(), "tbsCertificate linting failed: failed lints")
|
||||
}
|
||||
|
||||
func TestLoadChain_Valid(t *testing.T) {
|
||||
|
|
|
@ -180,7 +180,7 @@ func check(lintCert *zlintx509.Certificate, lints lint.Registry) error {
|
|||
var failedLints []string
|
||||
for lintName, result := range lintRes.Results {
|
||||
if result.Status > lint.Pass {
|
||||
failedLints = append(failedLints, lintName)
|
||||
failedLints = append(failedLints, fmt.Sprintf("%s (%s)", lintName, result.Details))
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed lints: %s", strings.Join(failedLints, ", "))
|
||||
|
|
|
@ -18,6 +18,7 @@ const (
|
|||
LetsEncryptCPSIntermediate lint.LintSource = "LECPSIntermediate"
|
||||
LetsEncryptCPSRoot lint.LintSource = "LECPSRoot"
|
||||
LetsEncryptCPSSubscriber lint.LintSource = "LECPSSubscriber"
|
||||
ChromeCTPolicy lint.LintSource = "ChromeCT"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
package subscriber
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/zmap/zcrypto/x509"
|
||||
"github.com/zmap/zcrypto/x509/ct"
|
||||
"github.com/zmap/zlint/v3/lint"
|
||||
"github.com/zmap/zlint/v3/util"
|
||||
|
||||
"github.com/letsencrypt/boulder/ctpolicy/loglist"
|
||||
"github.com/letsencrypt/boulder/linter/lints"
|
||||
)
|
||||
|
||||
type sctsFromSameOperator struct {
|
||||
logList loglist.List
|
||||
}
|
||||
|
||||
func init() {
|
||||
lint.RegisterLint(&lint.Lint{
|
||||
Name: "e_scts_from_same_operator",
|
||||
Description: "Let's Encrypt Subscriber Certificates have two SCTs from logs run by different operators",
|
||||
Citation: "Chrome CT Policy",
|
||||
Source: lints.ChromeCTPolicy,
|
||||
EffectiveDate: time.Date(2022, time.April, 15, 0, 0, 0, 0, time.UTC),
|
||||
Lint: NewSCTsFromSameOperator,
|
||||
})
|
||||
}
|
||||
|
||||
func NewSCTsFromSameOperator() lint.LintInterface {
|
||||
return &sctsFromSameOperator{logList: loglist.GetLintList()}
|
||||
}
|
||||
|
||||
func (l *sctsFromSameOperator) CheckApplies(c *x509.Certificate) bool {
|
||||
return util.IsSubscriberCert(c) && !util.IsExtInCert(c, util.CtPoisonOID)
|
||||
}
|
||||
|
||||
func (l *sctsFromSameOperator) Execute(c *x509.Certificate) *lint.LintResult {
|
||||
if len(l.logList) == 0 {
|
||||
return &lint.LintResult{
|
||||
Status: lint.NE,
|
||||
Details: "Failed to load log list, unable to check Certificate SCTs.",
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.SignedCertificateTimestampList) < 2 {
|
||||
return &lint.LintResult{
|
||||
Status: lint.Error,
|
||||
Details: "Certificate had too few embedded SCTs; browser policy requires 2.",
|
||||
}
|
||||
}
|
||||
|
||||
logIDs := make(map[ct.SHA256Hash]struct{})
|
||||
for _, sct := range c.SignedCertificateTimestampList {
|
||||
logIDs[sct.LogID] = struct{}{}
|
||||
}
|
||||
|
||||
if len(logIDs) < 2 {
|
||||
return &lint.LintResult{
|
||||
Status: lint.Error,
|
||||
Details: "Certificate SCTs from too few distinct logs; browser policy requires 2.",
|
||||
}
|
||||
}
|
||||
|
||||
operatorNames := make(map[string]struct{})
|
||||
for logID := range logIDs {
|
||||
operator, err := l.logList.OperatorForLogID(logID.Base64String())
|
||||
if err != nil {
|
||||
// This certificate *may* have more than 2 SCTs, so missing one now isn't
|
||||
// a problem.
|
||||
continue
|
||||
}
|
||||
operatorNames[operator] = struct{}{}
|
||||
}
|
||||
|
||||
if len(operatorNames) < 2 {
|
||||
return &lint.LintResult{
|
||||
Status: lint.Error,
|
||||
Details: "Certificate SCTs from too few distinct log operators; browser policy requires 2.",
|
||||
}
|
||||
}
|
||||
|
||||
return &lint.LintResult{
|
||||
Status: lint.Pass,
|
||||
}
|
||||
}
|
|
@ -347,7 +347,7 @@ func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAutho
|
|||
Status: string(core.StatusValid),
|
||||
})
|
||||
|
||||
ctp := ctpolicy.New(&mocks.PublisherClient{}, nil, nil, log, metrics.NoopRegisterer)
|
||||
ctp := ctpolicy.New(&mocks.PublisherClient{}, nil, nil, nil, nil, nil, 0, log, metrics.NoopRegisterer)
|
||||
|
||||
ra := NewRegistrationAuthorityImpl(fc,
|
||||
log,
|
||||
|
@ -3159,8 +3159,7 @@ func TestCTPolicyMeasurements(t *testing.T) {
|
|||
_, ssa, ra, _, cleanup := initAuthorities(t)
|
||||
defer cleanup()
|
||||
|
||||
ctp := ctpolicy.New(&timeoutPub{}, []ctconfig.CTGroup{{}}, nil, log, metrics.NoopRegisterer)
|
||||
ra.ctpolicy = ctp
|
||||
ra.ctpolicy = ctpolicy.New(&timeoutPub{}, []ctconfig.CTGroup{{}}, nil, nil, nil, nil, 0, log, metrics.NoopRegisterer)
|
||||
|
||||
// Create valid authorizations for not-example.com and www.not-example.com
|
||||
exp := ra.clk.Now().Add(365 * 24 * time.Hour)
|
||||
|
|
|
@ -151,6 +151,17 @@ func AssertNotContains(t *testing.T, haystack string, needle string) {
|
|||
}
|
||||
}
|
||||
|
||||
// AssertSliceContains determines if needle can be found in haystack
|
||||
func AssertSliceContains[T comparable](t *testing.T, haystack []T, needle T) {
|
||||
t.Helper()
|
||||
for _, item := range haystack {
|
||||
if item == needle {
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Fatalf("Slice %v does not contain %v", haystack, needle)
|
||||
}
|
||||
|
||||
// AssertMetricWithLabelsEquals determines whether the value held by a prometheus Collector
|
||||
// (e.g. Gauge, Counter, CounterVec, etc) is equal to the expected float64.
|
||||
// In order to make useful assertions about just a subset of labels (e.g. for a
|
||||
|
|
|
@ -92,10 +92,11 @@
|
|||
"orphanQueueDir": "/tmp/orphaned-certificates-a",
|
||||
"ocspLogMaxLength": 4000,
|
||||
"ocspLogPeriod": "500ms",
|
||||
"ecdsaAllowListFilename": "test/config-next/ecdsaAllowList.yml",
|
||||
"ctLogListFile": "test/ct-test-srv/log_list.json",
|
||||
"features": {
|
||||
"NonCFSSLSigner": true
|
||||
},
|
||||
"ecdsaAllowListFilename": "test/config-next/ecdsaAllowList.yml"
|
||||
}
|
||||
},
|
||||
|
||||
"pa": {
|
||||
|
|
|
@ -92,10 +92,11 @@
|
|||
"orphanQueueDir": "/tmp/orphaned-certificates-b",
|
||||
"ocspLogMaxLength": 4000,
|
||||
"ocspLogPeriod": "500ms",
|
||||
"ecdsaAllowListFilename": "test/config-next/ecdsaAllowList.yml",
|
||||
"ctLogListFile": "test/ct-test-srv/log_list.json",
|
||||
"features": {
|
||||
"NonCFSSLSigner": true
|
||||
},
|
||||
"ecdsaAllowListFilename": "test/config-next/ecdsaAllowList.yml"
|
||||
}
|
||||
},
|
||||
|
||||
"pa": {
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
"acceptableValidityDurations": ["7776000s"],
|
||||
"ignoredLints": [
|
||||
"n_subject_common_name_included"
|
||||
]
|
||||
],
|
||||
"ctLogListFile": "test/ct-test-srv/log_list.json"
|
||||
},
|
||||
|
||||
"pa": {
|
||||
|
|
|
@ -61,66 +61,30 @@
|
|||
"AllowReRevocation": true,
|
||||
"MozRevocationReasons": true
|
||||
},
|
||||
"CTLogGroups2": [
|
||||
{
|
||||
"name": "a",
|
||||
"stagger": "500ms",
|
||||
"logs": [
|
||||
{
|
||||
"uri": "http://boulder:4500",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYggOxPnPkzKBIhTacSYoIfnSL2jPugcbUKx83vFMvk5gKAz/AGe87w20riuPwEGn229hKVbEKHFB61NIqNHC3Q==",
|
||||
"submitFinalCert": false
|
||||
},
|
||||
{
|
||||
"uri": "http://boulder:4501",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKtnFevaXV/kB8dmhCNZHmxKVLcHX1plaAsY9LrKilhYxdmQZiu36LvAvosTsqMVqRK9a96nC8VaxAdaHUbM8EA==",
|
||||
"submitFinalCert": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "b",
|
||||
"stagger": "500ms",
|
||||
"logs": [
|
||||
{
|
||||
"uri": "http://boulder:4510",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyw1HymhJkuxSIgt3gqW3sVXqMqB3EFsXcMfPFo0vYwjNiRmCJDXKsR0Flp7MAK+wc3X/7Hpc8liUbMhPet7tEA==",
|
||||
"submitFinalCert": true
|
||||
},
|
||||
{
|
||||
"name": "temporal test set",
|
||||
"shards": [
|
||||
{
|
||||
"uri": "http://boulder:4511",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"windowStart": "2006-01-02T15:04:05Z",
|
||||
"windowEnd": "2017-01-02T15:04:05Z"
|
||||
},
|
||||
{
|
||||
"uri": "http://boulder:4511",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"windowStart": "2017-01-02T15:04:05Z",
|
||||
"windowEnd": "2022-01-02T15:04:05Z"
|
||||
},
|
||||
{
|
||||
"uri": "http://boulder:4511",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"windowStart": "2022-01-02T15:04:05Z",
|
||||
"windowEnd": "2050-01-02T15:04:05Z"
|
||||
}
|
||||
],
|
||||
"submitFinalCert": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"InformationalCTLogs": [
|
||||
{
|
||||
"uri": "http://boulder:4512",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"submitFinalCert": true
|
||||
}
|
||||
]
|
||||
"ctLogs": {
|
||||
"stagger": "500ms",
|
||||
"logListFile": "test/ct-test-srv/log_list.json",
|
||||
"sctLogs": [
|
||||
"A1 Current",
|
||||
"A1 Future",
|
||||
"A2 Past",
|
||||
"A2 Current",
|
||||
"B1",
|
||||
"B2",
|
||||
"C1",
|
||||
"D1",
|
||||
"E1"
|
||||
],
|
||||
"infoLogs": [
|
||||
"F1"
|
||||
],
|
||||
"finalLogs": [
|
||||
"A1 Current",
|
||||
"A1 Future",
|
||||
"C1",
|
||||
"F1"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
"pa": {
|
||||
|
|
|
@ -4,67 +4,91 @@
|
|||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4500",
|
||||
"PrivKey": "MHcCAQEEIOCtGlGt/WT7471dOHdfBg43uJWJoZDkZAQjWfTitcVNoAoGCCqGSM49AwEHoUQDQgAEYggOxPnPkzKBIhTacSYoIfnSL2jPugcbUKx83vFMvk5gKAz/AGe87w20riuPwEGn229hKVbEKHFB61NIqNHC3Q==",
|
||||
"LatencySchedule": [
|
||||
0.1,
|
||||
0.1,
|
||||
1,
|
||||
5,
|
||||
60,
|
||||
5,
|
||||
0.3
|
||||
]
|
||||
"FlakinessRate": 30
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4501",
|
||||
"PrivKey": "MHcCAQEEIJSCFDYXt2xCIxv+G8BCzGdUsFIQDWEjxfJDfnn9JB5loAoGCCqGSM49AwEHoUQDQgAEKtnFevaXV/kB8dmhCNZHmxKVLcHX1plaAsY9LrKilhYxdmQZiu36LvAvosTsqMVqRK9a96nC8VaxAdaHUbM8EA==",
|
||||
"LatencySchedule": [
|
||||
0.7,
|
||||
0.3,
|
||||
2,
|
||||
0.0,
|
||||
0.0,
|
||||
2,
|
||||
0.2,
|
||||
0.3,
|
||||
0.2,
|
||||
5,
|
||||
0.1
|
||||
]
|
||||
"FlakinessRate": 2
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4510",
|
||||
"PrivKey": "MHcCAQEEIBtqLTgjiM9nMaUQkbsE1vQWYXpJP0uLqVLV73U2UzlioAoGCCqGSM49AwEHoUQDQgAEyw1HymhJkuxSIgt3gqW3sVXqMqB3EFsXcMfPFo0vYwjNiRmCJDXKsR0Flp7MAK+wc3X/7Hpc8liUbMhPet7tEA==",
|
||||
"LatencySchedule": [
|
||||
0.7,
|
||||
0.3,
|
||||
2,
|
||||
0.0,
|
||||
0.0,
|
||||
2,
|
||||
0.2,
|
||||
0.3,
|
||||
0.2,
|
||||
7,
|
||||
0.1
|
||||
]
|
||||
"FlakinessRate": 2
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4511",
|
||||
"PrivKey": "MHcCAQEEINwaal89BqkwvQ6r3uOj7R5VEjJi5iSDbAhlYyDhZv/joAoGCCqGSM49AwEHoUQDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"LatencySchedule": [
|
||||
100
|
||||
]
|
||||
"FlakinessRate": 100
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4512",
|
||||
"PrivKey": "MHcCAQEEINwaal89BqkwvQ6r3uOj7R5VEjJi5iSDbAhlYyDhZv/joAoGCCqGSM49AwEHoUQDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"LatencySchedule": [
|
||||
100
|
||||
]
|
||||
"FlakinessRate": 100
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4600",
|
||||
"PrivKey": "MHcCAQEEIArwh8VhAPXaUocPILwSJrQF1E2OXtY7O2aJyjGIR7UPoAoGCCqGSM49AwEHoUQDQgAExhriVaEwBOtdNzg5EOtJBHl/u+ua1FtCR/CBXQ1kvpFelcP3gozLNXyxV/UexuifpmzTN31CdfdHv1kK3KDIxQ==",
|
||||
"FlakinessRate": 1
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4601",
|
||||
"PrivKey": "MHcCAQEEINk7TLYXyJznFl32p62xfZZTarZJTWZe+8u1HF3xmn2doAoGCCqGSM49AwEHoUQDQgAE7uzW0zXQpWIk7MZUBdTu1muNzekMCIv/kn16+ifndQ584DElobOJ0ZlcACz9WdFyGTjOCfAqBmFybX2OJKfFVg==",
|
||||
"FlakinessRate": 1
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4602",
|
||||
"PrivKey": "MHcCAQEEIFJD5JlN30x8i3EkSHF8UuB4fG2WEqXrDD4NiswocRseoAoGCCqGSM49AwEHoUQDQgAE/s5W5OHfowdLA7KerJ+mOizfHJE6Snfib8ueoBYl8Y12lpOoJTtCmmrx4m9KAb9AptInWpGrIaLY+5Y29l2eGw==",
|
||||
"FlakinessRate": 1
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4603",
|
||||
"PrivKey": "MHcCAQEEIDrGahcizJgStF+Zf9h29wLZhNKyasQ2TMieIdHNn3ZBoAoGCCqGSM49AwEHoUQDQgAE2EFdA2UBfbJ2Sw1413hBN9YESyABmTGbdgcMh0l/GyV3eFrFjcVS0laNphkfRZ+qkcMbeF+IIHqVzxHAM/2mQQ==",
|
||||
"FlakinessRate": 1
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4604",
|
||||
"PrivKey": "MHcCAQEEIH6JmZXVRq2KDWJinKsDxv7gDzw0WEepfXu5s1VQvAHfoAoGCCqGSM49AwEHoUQDQgAEAMSHwrzvr/KvNmUT55+uQo7CXQLPx1X+qEdKGekUg1q/InN/E37bCY/x45wC00qgiE0D3xoxnUJbKaCQcAX39w==",
|
||||
"FlakinessRate": 2
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4605",
|
||||
"PrivKey": "MHcCAQEEIOkBiM7jy65TfsJTMxDwIcv3TD/FVTe/aXG4QUUXiQ98oAoGCCqGSM49AwEHoUQDQgAEzmpksKS/mHgJZ821po3ldwonsz3K19jwsZgNSGYvEuzAVtWbGfY+6aUXua7f8WK8l2amHETISOY4JTRwk5QFyw==",
|
||||
"FlakinessRate": 98
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4606",
|
||||
"PrivKey": "MHcCAQEEIHIAfD/dxvjxSLAW22Pz8xZR7eCJp2VcVgMID+VmhHtNoAoGCCqGSM49AwEHoUQDQgAE31BxBVCdehxOC35jJzvAPNrU4ZjNXbmxS+zSN5DSkpJWQUp5wUHPGnXiSCtx7jXnTYLVzslIyXWpNN8m8BiKjQ==",
|
||||
"FlakinessRate": 2
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4607",
|
||||
"PrivKey": "MHcCAQEEIMly7UpXClsaVP1Con6jTgiL6ZTuarj0kWxdo3NqNJWVoAoGCCqGSM49AwEHoUQDQgAEAjRx6Mhc/U4Ye7NzsZ7bbKMGhKVpGZHpZJMzLzNIveBAPh5OBDHpSdn9RY58t4diH8YLjqCi9o+k1T5RwiFbfQ==",
|
||||
"FlakinessRate": 2
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4608",
|
||||
"PrivKey": "MHcCAQEEIJF8W76HJanaUjvSX/mnjwwtBZ0yq1YD/PPvbWJuLhESoAoGCCqGSM49AwEHoUQDQgAEsHFSkgrlrwIY0PG79tOZhPvBzrnrpbrWa3pG2FfkLeEJQ2Uvgw1oTZZ+oXcrm4Yb3khWDbpkzDbupI+e8xloeA==",
|
||||
"FlakinessRate": 20
|
||||
},
|
||||
{
|
||||
"UserAgent": "boulder/1.0",
|
||||
"Addr": ":4609",
|
||||
"PrivKey": "MHcCAQEEIIazaamUIxkn+ie+qfDAnO9Fmnrm11rGeE+3fFTHjYNdoAoGCCqGSM49AwEHoUQDQgAEMVjHUOxzh2flagPhuEYy/AhAlpD9qqACg4fGcCxOhLU35r21CQXzKDdCHMu69QDFd6EAe8iGFsybg+Yn4/njtA==",
|
||||
"FlakinessRate": 100
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,266 @@
|
|||
{
|
||||
"version": "0.1",
|
||||
"log_list_timestamp": "1970-01-01T00:00:01Z",
|
||||
"operators": [
|
||||
{
|
||||
"name": "Operator A",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "A1 Current",
|
||||
"log_id": "OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExhriVaEwBOtdNzg5EOtJBHl/u+ua1FtCR/CBXQ1kvpFelcP3gozLNXyxV/UexuifpmzTN31CdfdHv1kK3KDIxQ==",
|
||||
"url": "http://boulder:4600",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "A1 Future",
|
||||
"log_id": "2OHE0zamM5iS1NRFWJf9N6CWxdJ93je+leBX371vC+k=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE7uzW0zXQpWIk7MZUBdTu1muNzekMCIv/kn16+ifndQ584DElobOJ0ZlcACz9WdFyGTjOCfAqBmFybX2OJKfFVg==",
|
||||
"url": "http://boulder:4601",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "2070-01-01T00:00:00Z",
|
||||
"end_exclusive": "3070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "A2 Past",
|
||||
"log_id": "z7banNzwEtmRiittSviBYKjWmVltXNBhLfudmDXIcoU=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE/s5W5OHfowdLA7KerJ+mOizfHJE6Snfib8ueoBYl8Y12lpOoJTtCmmrx4m9KAb9AptInWpGrIaLY+5Y29l2eGw==",
|
||||
"url": "http://boulder:4602",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1870-01-01T00:00:00Z",
|
||||
"end_exclusive": "1970-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "A2 Current",
|
||||
"log_id": "HRrTQca8iy14Qbrw6/itgVzVWTcaENF3tWnJP743pq8=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2EFdA2UBfbJ2Sw1413hBN9YESyABmTGbdgcMh0l/GyV3eFrFjcVS0laNphkfRZ+qkcMbeF+IIHqVzxHAM/2mQQ==",
|
||||
"url": "http://boulder:4603",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Operator B",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "B1",
|
||||
"log_id": "UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAMSHwrzvr/KvNmUT55+uQo7CXQLPx1X+qEdKGekUg1q/InN/E37bCY/x45wC00qgiE0D3xoxnUJbKaCQcAX39w==",
|
||||
"url": "http://boulder:4604",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "B2",
|
||||
"log_id": "EOPWVkKfDlS3lQe5brFUMsEYAJ8I7uZr7z55geKzv7c=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzmpksKS/mHgJZ821po3ldwonsz3K19jwsZgNSGYvEuzAVtWbGfY+6aUXua7f8WK8l2amHETISOY4JTRwk5QFyw==",
|
||||
"url": "http://boulder:4605",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Operator C",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "C1",
|
||||
"log_id": "Oqk/Tv0cUSnEJ4bZa0eprm3IQQ4XgNcv20/bXixlxnQ=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE31BxBVCdehxOC35jJzvAPNrU4ZjNXbmxS+zSN5DSkpJWQUp5wUHPGnXiSCtx7jXnTYLVzslIyXWpNN8m8BiKjQ==",
|
||||
"url": "http://boulder:4606",
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Operator D",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "D1",
|
||||
"log_id": "e90gTyc4KkZpHv2pgeSOS224Md6/21UmWIxRF9mXveI=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAjRx6Mhc/U4Ye7NzsZ7bbKMGhKVpGZHpZJMzLzNIveBAPh5OBDHpSdn9RY58t4diH8YLjqCi9o+k1T5RwiFbfQ==",
|
||||
"url": "http://boulder:4607",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Operator E",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "E1",
|
||||
"log_id": "ck+wYNY31I+5XBC7htsdNdYVjOSm4YgnDxlzO9PouwQ=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEsHFSkgrlrwIY0PG79tOZhPvBzrnrpbrWa3pG2FfkLeEJQ2Uvgw1oTZZ+oXcrm4Yb3khWDbpkzDbupI+e8xloeA==",
|
||||
"url": "http://boulder:4608",
|
||||
"state": {
|
||||
"retired": {
|
||||
"timestamp": "2000-01-01T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Operator F",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "F1",
|
||||
"log_id": "FWPcPPStmIK3l/jogz7yLYUtafS44cpLs6hQ3HrjdUQ=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMVjHUOxzh2flagPhuEYy/AhAlpD9qqACg4fGcCxOhLU35r21CQXzKDdCHMu69QDFd6EAe8iGFsybg+Yn4/njtA==",
|
||||
"url": "http://boulder:4609",
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:00Z",
|
||||
"end_exclusive": "2070-01-01T00:00:00Z"
|
||||
},
|
||||
"state": {
|
||||
"pending": {
|
||||
"timestamp": "2000-01-01T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Fake Google TODO(#5938): Remove this group",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "G4500",
|
||||
"log_id": "KHYaGJAn++880NYaAY12sFBXKcenQRvMvfYE9F1CYVM=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYggOxPnPkzKBIhTacSYoIfnSL2jPugcbUKx83vFMvk5gKAz/AGe87w20riuPwEGn229hKVbEKHFB61NIqNHC3Q==",
|
||||
"url": "http://boulder:4500",
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "G4501",
|
||||
"log_id": "3Zk0/KXnJIDJVmh9gTSZCEmySfe1adjHvKs/XMHzbmQ=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKtnFevaXV/kB8dmhCNZHmxKVLcHX1plaAsY9LrKilhYxdmQZiu36LvAvosTsqMVqRK9a96nC8VaxAdaHUbM8EA==",
|
||||
"url": "http://boulder:4501",
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Fake Everyone Else TODO(#5938): Remove this group",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "O4510",
|
||||
"log_id": "FuhpwdGV6tfD+Jca4/B2AfeM4badMahSGLaDfzGoFQg=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyw1HymhJkuxSIgt3gqW3sVXqMqB3EFsXcMfPFo0vYwjNiRmCJDXKsR0Flp7MAK+wc3X/7Hpc8liUbMhPet7tEA==",
|
||||
"url": "http://boulder:4510",
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "O4511",
|
||||
"log_id": "NvR3OcSRDDWwwb0Hg+t9aKCpL3+tDuk99WrHkTwabYo=",
|
||||
"key": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFRu37ZRLg8lT4rVQwMwh4oAOpXb4Sx+9hgQ+JFCjmAv3oDV+sDOMsC7hULkGTn+LB5L1SRo/XIY4Kw5V+nFXgg==",
|
||||
"url": "http://boulder:4511",
|
||||
"state": {
|
||||
"usable": {
|
||||
"timestamp": "2000-00-00T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Unused",
|
||||
"email": ["fake@example.org"],
|
||||
"logs": [
|
||||
{
|
||||
"description": "This Log Has Every Field To Ensure We Can Parse It",
|
||||
"log_id": "BaseSixtyFourEncodingOfSHA256HashOfPublicKey=",
|
||||
"key": "BaseSixtyFourEncodingOfDEREncodingOfPublicKey=",
|
||||
"url": "https://example.com/ct/",
|
||||
"mmd": 86400,
|
||||
"state": {
|
||||
"readonly": {
|
||||
"timestamp": "2020-01-01T00:00:01Z",
|
||||
"final_tree_head": {
|
||||
"sha256_root_hash": "D1H4wAJmq0MRCeLfeOtrsZ9Am015anO5MkeasNhnQWI=",
|
||||
"tree_size": 123456789
|
||||
}
|
||||
}
|
||||
},
|
||||
"temporal_interval": {
|
||||
"start_inclusive": "1970-01-01T00:00:01Z",
|
||||
"end_exclusive": "2070-01-01T00:00:01Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -12,6 +12,7 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -32,11 +33,10 @@ type integrationSrv struct {
|
|||
// path where all CT servers fail.
|
||||
rejectHosts map[string]bool
|
||||
// A list of entries that we rejected based on rejectHosts.
|
||||
rejected []string
|
||||
key *ecdsa.PrivateKey
|
||||
latencySchedule []float64
|
||||
latencyItem int
|
||||
userAgent string
|
||||
rejected []string
|
||||
key *ecdsa.PrivateKey
|
||||
flakinessRate int
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func readJSON(w http.ResponseWriter, r *http.Request, output interface{}) error {
|
||||
|
@ -159,13 +159,10 @@ func (is *integrationSrv) addChainOrPre(w http.ResponseWriter, r *http.Request,
|
|||
is.submissions[hostnames]++
|
||||
is.Unlock()
|
||||
|
||||
if is.latencySchedule != nil {
|
||||
is.Lock()
|
||||
sleepTime := time.Duration(is.latencySchedule[is.latencyItem%len(is.latencySchedule)]) * time.Second
|
||||
is.latencyItem++
|
||||
is.Unlock()
|
||||
time.Sleep(sleepTime)
|
||||
if is.flakinessRate != 0 && rand.Intn(100) < is.flakinessRate {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(publisher.CreateTestingSignedSCT(addChainReq.Chain, is.key, precert, time.Now()))
|
||||
}
|
||||
|
@ -198,9 +195,9 @@ type Personality struct {
|
|||
// Generate your own with:
|
||||
// openssl ecparam -name prime256v1 -genkey -outform der -noout | base64 -w 0
|
||||
PrivKey string
|
||||
// If present, sleep for the given number of seconds before replying. Each
|
||||
// request uses the next number in the list, eventually cycling through.
|
||||
LatencySchedule []float64
|
||||
// FlakinessRate is an integer between 0-100 that controls how often the log
|
||||
// "flakes", i.e. fails to respond in a reasonable time frame.
|
||||
FlakinessRate int
|
||||
}
|
||||
|
||||
func runPersonality(p Personality) {
|
||||
|
@ -217,11 +214,11 @@ func runPersonality(p Personality) {
|
|||
log.Fatal(err)
|
||||
}
|
||||
is := integrationSrv{
|
||||
key: key,
|
||||
latencySchedule: p.LatencySchedule,
|
||||
submissions: make(map[string]int64),
|
||||
rejectHosts: make(map[string]bool),
|
||||
userAgent: p.UserAgent,
|
||||
key: key,
|
||||
flakinessRate: p.FlakinessRate,
|
||||
submissions: make(map[string]int64),
|
||||
rejectHosts: make(map[string]bool),
|
||||
userAgent: p.UserAgent,
|
||||
}
|
||||
m := http.NewServeMux()
|
||||
m.HandleFunc("/submissions", is.getSubmissions)
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
)
|
||||
|
||||
var ctSrvPorts = []int{4500, 4501, 4510, 4511}
|
||||
// TODO(#5938): Remove 45XX ports.
|
||||
var ctSrvPorts = []int{4500, 4501, 4510, 4511, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609}
|
||||
|
||||
// ctAddRejectHost adds a domain to all of the CT test server's reject-host
|
||||
// lists. If this fails the test is aborted with a fatal error.
|
||||
|
|
|
@ -1299,7 +1299,61 @@ def test_ocsp():
|
|||
# checking OCSP until we either see a good response or we timeout (5s).
|
||||
verify_ocsp(cert_file.name, "/hierarchy/intermediate-cert-rsa-a.pem", "http://localhost:4002", "good")
|
||||
|
||||
def test_ct_submission():
|
||||
# TODO(#5938): Remove _operator suffix from this test and remove CONFIG_NEXT check.
|
||||
def test_ct_submission_operator():
|
||||
if not CONFIG_NEXT:
|
||||
return
|
||||
|
||||
hostname = random_domain()
|
||||
|
||||
chisel2.auth_and_issue([hostname])
|
||||
|
||||
# These should correspond to the configured logs in ra.json.
|
||||
log_groups = [
|
||||
["http://boulder:4600/submissions", "http://boulder:4601/submissions", "http://boulder:4602/submissions", "http://boulder:4603/submissions"],
|
||||
["http://boulder:4604/submissions", "http://boulder:4605/submissions"],
|
||||
["http://boulder:4606/submissions"],
|
||||
["http://boulder:4607/submissions"],
|
||||
["http://boulder:4608/submissions"],
|
||||
["http://boulder:4609/submissions"],
|
||||
]
|
||||
|
||||
# These should correspond to the logs with `submitFinal` in ra.json.
|
||||
final_logs = [
|
||||
"http://boulder:4600/submissions",
|
||||
"http://boulder:4601/submissions",
|
||||
"http://boulder:4606/submissions",
|
||||
"http://boulder:4609/submissions",
|
||||
]
|
||||
|
||||
# We'd like to enforce strict limits here (exactly 1 submission per group,
|
||||
# exactly two submissions overall) but the async nature of the race system
|
||||
# means we can't -- a slowish submission to one log in a group could trigger
|
||||
# a very fast submission to a different log in the same group, and then both
|
||||
# submissions could succeed at the same time. Although the Go code will only
|
||||
# use one of the SCTs, both logs will still have been submitted to, and it
|
||||
# will show up here.
|
||||
total_count = 0
|
||||
for i in range(len(log_groups)):
|
||||
group_count = 0
|
||||
for j in range(len(log_groups[i])):
|
||||
log = log_groups[i][j]
|
||||
count = int(requests.get(log + "?hostnames=%s" % hostname).text)
|
||||
threshold = 1
|
||||
if log in final_logs:
|
||||
threshold += 1
|
||||
if count > threshold:
|
||||
raise(Exception("Got %d submissions for log %s, expected at most %d" % (count, log, threshold)))
|
||||
group_count += count
|
||||
total_count += group_count
|
||||
if total_count < 2:
|
||||
raise(Exception("Got %d total submissions, expected at least 2" % total_count))
|
||||
|
||||
# TODO(#5938): Remove this test.
|
||||
def test_ct_submission_google():
|
||||
if CONFIG_NEXT:
|
||||
return
|
||||
|
||||
hostname = random_domain()
|
||||
|
||||
# These should correspond to the configured logs in ra.json.
|
||||
|
|
Loading…
Reference in New Issue