CRL: Create crl-updater service (#6212)
Create a new service named crl-updater. It is responsible for maintaining the full set of CRLs we issue: one "full and complete" CRL for each currently-active Issuer, split into a number of "shards" which are essentially CRLs with arbitrary scopes. The crl-updater is modeled after the ocsp-updater: it is a long-running standalone service that wakes up periodically, does a large amount of work in parallel, and then sleeps. The period at which it wakes to do work is configurable. Unlike the ocsp-responder, it does all of its work every time it wakes, so we expect to set the update frequency at 6-24 hours. Maintaining CRL scopes is done statelessly. Every certificate belongs to a specific "bucket", given its notAfter date. This mapping is generally unchanging over the life of the certificate, so revoked certificate entries will not be moving between shards upon every update. The only exception is if we change the number of shards, in which case all of the bucket boundaries will be recomputed. For more details, see the comment on `getShardBoundaries`. It uses the new SA.GetRevokedCerts method to collect all of the revoked certificates whose notAfter timestamps fall within the boundaries of each shard's time-bucket. It uses the new CA.GenerateCRL method to sign the CRLs. In the future, it will send signed CRLs to the crl-storer to be persisted outside our infrastructure. Fixes #6163
This commit is contained in:
parent
62f7caf14c
commit
436061fb35
|
|
@ -105,7 +105,7 @@ func (ci *crlImpl) GenerateCRL(stream capb.CRLGenerator_GenerateCRLServer) error
|
|||
logID := blog.LogLineChecksum(fmt.Sprintf("%d", issuer.Cert.NameID()) + template.Number.String() + fmt.Sprintf("%d", shard))
|
||||
ci.log.AuditInfof(
|
||||
"Signing CRL: logID=[%s] issuer=[%s] number=[%s] shard=[%d] thisUpdate=[%s] nextUpdate=[%s] numEntries=[%d]",
|
||||
logID, issuer.Cert.Subject.CommonName, template.Number.String(), template.ThisUpdate, template.NextUpdate, len(rcs),
|
||||
logID, issuer.Cert.Subject.CommonName, template.Number.String(), shard, template.ThisUpdate, template.NextUpdate, len(rcs),
|
||||
)
|
||||
|
||||
builder := strings.Builder{}
|
||||
|
|
@ -121,7 +121,7 @@ func (ci *crlImpl) GenerateCRL(stream capb.CRLGenerator_GenerateCRLServer) error
|
|||
fmt.Fprintf(&builder, "%x:%d,", rcs[i].SerialNumber.Bytes(), reason)
|
||||
|
||||
if builder.Len() != ci.maxLogLen {
|
||||
ci.log.AuditInfof("%s", builder)
|
||||
ci.log.AuditInfo(builder.String())
|
||||
builder = strings.Builder{}
|
||||
}
|
||||
}
|
||||
|
|
@ -139,8 +139,8 @@ func (ci *crlImpl) GenerateCRL(stream capb.CRLGenerator_GenerateCRLServer) error
|
|||
|
||||
hash := sha256.Sum256(crlBytes)
|
||||
ci.log.AuditInfof(
|
||||
"Signing CRL success: logID=[%s] size=[%d] hash=[%d]",
|
||||
logID, len(crlBytes), hash[:],
|
||||
"Signing CRL success: logID=[%s] size=[%d] hash=[%x]",
|
||||
logID, len(crlBytes), hash,
|
||||
)
|
||||
|
||||
for i := 0; i < len(crlBytes); i += 1000 {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
_ "github.com/letsencrypt/boulder/cmd/ceremony"
|
||||
_ "github.com/letsencrypt/boulder/cmd/cert-checker"
|
||||
_ "github.com/letsencrypt/boulder/cmd/contact-auditor"
|
||||
_ "github.com/letsencrypt/boulder/cmd/crl-updater"
|
||||
_ "github.com/letsencrypt/boulder/cmd/expiration-mailer"
|
||||
_ "github.com/letsencrypt/boulder/cmd/id-exporter"
|
||||
_ "github.com/letsencrypt/boulder/cmd/log-validator"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,135 @@
|
|||
package notmain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"github.com/honeycombio/beeline-go"
|
||||
|
||||
capb "github.com/letsencrypt/boulder/ca/proto"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/crl/updater"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
"github.com/letsencrypt/boulder/issuance"
|
||||
sapb "github.com/letsencrypt/boulder/sa/proto"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
CRLUpdater struct {
|
||||
cmd.ServiceConfig
|
||||
|
||||
CRLGeneratorService *cmd.GRPCClientConfig
|
||||
SAService *cmd.GRPCClientConfig
|
||||
// TODO(#6162): Add CRLStorerService stanza
|
||||
|
||||
// IssuerCerts is a list of paths to issuer certificates on disk. This
|
||||
// controls the set of CRLs which will be published by this updater: it will
|
||||
// publish one set of NumShards CRL shards for each issuer in this list.
|
||||
IssuerCerts []string
|
||||
|
||||
// NumShards is the number of shards into which each issuer's "full and
|
||||
// complete" CRL will be split.
|
||||
// WARNING: When this number is changed, the "JSON Array of CRL URLs" field
|
||||
// in CCADB MUST be updated.
|
||||
NumShards int
|
||||
|
||||
// CertificateLifetime is the validity period (usually expressed in hours,
|
||||
// like "2160h") of the longest-lived currently-unexpired certificate. For
|
||||
// Let's Encrypt, this is usually ninety days. If the validity period of
|
||||
// the issued certificates ever changes upwards, this value must be updated
|
||||
// immediately; if the validity period of the issued certificates ever
|
||||
// changes downwards, the value must not change until after all certificates with
|
||||
// the old validity period have expired.
|
||||
CertificateLifetime cmd.ConfigDuration
|
||||
|
||||
// UpdatePeriod controls how frequently the crl-updater runs and publishes
|
||||
// new versions of every CRL shard. The Baseline Requirements, Section 4.9.7
|
||||
// state that this MUST NOT be more than 7 days. We believe that future
|
||||
// updates may require that this not be more than 24 hours, and currently
|
||||
// recommend an UpdatePeriod of 6 hours.
|
||||
UpdatePeriod cmd.ConfigDuration
|
||||
|
||||
// MaxParallelism controls how many workers may be running in parallel.
|
||||
// A higher value reduces the total time necessary to update all CRL shards
|
||||
// that this updater is responsible for, but also increases the memory used
|
||||
// by this updater.
|
||||
MaxParallelism int
|
||||
|
||||
Features map[string]bool
|
||||
}
|
||||
|
||||
Syslog cmd.SyslogConfig
|
||||
Beeline cmd.BeelineConfig
|
||||
}
|
||||
|
||||
func main() {
|
||||
configFile := flag.String("config", "", "File path to the configuration file for this service")
|
||||
flag.Parse()
|
||||
if *configFile == "" {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var c Config
|
||||
err := cmd.ReadConfigFile(*configFile, &c)
|
||||
cmd.FailOnError(err, "Reading JSON config file into config structure")
|
||||
|
||||
err = features.Set(c.CRLUpdater.Features)
|
||||
cmd.FailOnError(err, "Failed to set feature flags")
|
||||
|
||||
tlsConfig, err := c.CRLUpdater.TLS.Load()
|
||||
cmd.FailOnError(err, "TLS config")
|
||||
|
||||
scope, logger := cmd.StatsAndLogging(c.Syslog, c.CRLUpdater.DebugAddr)
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString())
|
||||
clk := cmd.Clock()
|
||||
|
||||
bc, err := c.Beeline.Load()
|
||||
cmd.FailOnError(err, "Failed to load Beeline config")
|
||||
beeline.Init(bc)
|
||||
defer beeline.Close()
|
||||
|
||||
issuers := make([]*issuance.Certificate, 0, len(c.CRLUpdater.IssuerCerts))
|
||||
for _, filepath := range c.CRLUpdater.IssuerCerts {
|
||||
cert, err := issuance.LoadCertificate(filepath)
|
||||
cmd.FailOnError(err, "Failed to load issuer cert")
|
||||
issuers = append(issuers, cert)
|
||||
}
|
||||
|
||||
clientMetrics := bgrpc.NewClientMetrics(scope)
|
||||
|
||||
caConn, err := bgrpc.ClientSetup(c.CRLUpdater.CRLGeneratorService, tlsConfig, clientMetrics, clk)
|
||||
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CRLGenerator")
|
||||
cac := capb.NewCRLGeneratorClient(caConn)
|
||||
|
||||
saConn, err := bgrpc.ClientSetup(c.CRLUpdater.SAService, tlsConfig, clientMetrics, clk)
|
||||
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
|
||||
sac := sapb.NewStorageAuthorityClient(saConn)
|
||||
|
||||
// TODO(#6162): Set up crl-storer client connection.
|
||||
|
||||
u, err := updater.NewUpdater(
|
||||
issuers,
|
||||
c.CRLUpdater.NumShards,
|
||||
c.CRLUpdater.CertificateLifetime.Duration,
|
||||
c.CRLUpdater.UpdatePeriod.Duration,
|
||||
c.CRLUpdater.MaxParallelism,
|
||||
sac,
|
||||
cac,
|
||||
scope,
|
||||
logger,
|
||||
clk,
|
||||
)
|
||||
cmd.FailOnError(err, "Failed to create crl-updater")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go cmd.CatchSignals(logger, cancel)
|
||||
u.Run(ctx)
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmd.RegisterCommand("crl-updater", main)
|
||||
}
|
||||
|
|
@ -0,0 +1,385 @@
|
|||
package updater
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
capb "github.com/letsencrypt/boulder/ca/proto"
|
||||
"github.com/letsencrypt/boulder/issuance"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
sapb "github.com/letsencrypt/boulder/sa/proto"
|
||||
)
|
||||
|
||||
type crlUpdater struct {
|
||||
issuers map[issuance.IssuerNameID]*issuance.Certificate
|
||||
numShards int
|
||||
lookbackPeriod time.Duration
|
||||
lookforwardPeriod time.Duration
|
||||
updatePeriod time.Duration
|
||||
maxParallelism int
|
||||
|
||||
sa sapb.StorageAuthorityClient
|
||||
ca capb.CRLGeneratorClient
|
||||
// TODO(#6162): Add a crl-storer gRPC client.
|
||||
|
||||
tickHistogram *prometheus.HistogramVec
|
||||
generatedCounter *prometheus.CounterVec
|
||||
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
}
|
||||
|
||||
func NewUpdater(
|
||||
issuers []*issuance.Certificate,
|
||||
numShards int,
|
||||
certLifetime time.Duration,
|
||||
updatePeriod time.Duration,
|
||||
maxParallelism int,
|
||||
sa sapb.StorageAuthorityClient,
|
||||
ca capb.CRLGeneratorClient,
|
||||
stats prometheus.Registerer,
|
||||
log blog.Logger,
|
||||
clk clock.Clock,
|
||||
) (*crlUpdater, error) {
|
||||
issuersByNameID := make(map[issuance.IssuerNameID]*issuance.Certificate, len(issuers))
|
||||
for _, issuer := range issuers {
|
||||
issuersByNameID[issuer.NameID()] = issuer
|
||||
}
|
||||
|
||||
if numShards < 1 {
|
||||
return nil, fmt.Errorf("must have positive number of shards, got: %d", numShards)
|
||||
}
|
||||
|
||||
if updatePeriod >= 7*24*time.Hour {
|
||||
return nil, fmt.Errorf("must update CRLs at least every 7 days, got: %s", updatePeriod)
|
||||
}
|
||||
|
||||
// Set the lookback period to be significantly greater than the update period.
|
||||
// This guarantees that a certificate which was revoked very shortly before it
|
||||
// expired will still appear on at least one CRL, as required by RFC 5280
|
||||
// Section 3.3.
|
||||
lookbackPeriod := 4 * updatePeriod
|
||||
|
||||
// Set the lookforward period to be greater than the lifetime of the longest
|
||||
// currently-valid certificate. Ensure it overshoots by more than the width
|
||||
// of one shard. See comment on getShardBoundaries for details.
|
||||
tentativeShardWidth := (lookbackPeriod + certLifetime).Nanoseconds() / int64(numShards)
|
||||
lookforwardPeriod := certLifetime + time.Duration(4*tentativeShardWidth)
|
||||
|
||||
// Ensure that the total window (lookback + lookforward) is evenly divisible
|
||||
// by the number of shards, to make shard boundary calculations easy.
|
||||
window := lookbackPeriod + lookforwardPeriod
|
||||
offset := window.Nanoseconds() % int64(numShards)
|
||||
if offset != 0 {
|
||||
lookforwardPeriod += time.Duration(int64(numShards) - offset)
|
||||
}
|
||||
|
||||
if maxParallelism <= 0 {
|
||||
maxParallelism = 1
|
||||
}
|
||||
|
||||
tickHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "crl_updater_ticks",
|
||||
Help: "A histogram of crl-updater tick latencies labeled by issuer and result",
|
||||
Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000},
|
||||
}, []string{"issuer", "result"})
|
||||
stats.MustRegister(tickHistogram)
|
||||
|
||||
generatedCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "crl_updater_generated",
|
||||
Help: "A counter of CRL generation calls labeled by result",
|
||||
}, []string{"result"})
|
||||
stats.MustRegister(generatedCounter)
|
||||
|
||||
// TODO(#6162): Add a storedCounter when sending to the crl-storer.
|
||||
|
||||
return &crlUpdater{
|
||||
issuersByNameID,
|
||||
numShards,
|
||||
lookbackPeriod,
|
||||
lookforwardPeriod,
|
||||
updatePeriod,
|
||||
maxParallelism,
|
||||
sa,
|
||||
ca,
|
||||
tickHistogram,
|
||||
generatedCounter,
|
||||
log,
|
||||
clk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run causes the crl-updater to run immediately, and then re-run continuously
|
||||
// on the frequency specified by crlUpdater.updatePeriod. The provided context
|
||||
// can be used to gracefully stop (cancel) the process.
|
||||
func (cu *crlUpdater) Run(ctx context.Context) {
|
||||
// TODO(#6163): Should there also be a configurable per-run timeout, to
|
||||
// prevent overruns, used in a context.WithTimeout here?
|
||||
cu.tick(ctx)
|
||||
ticker := time.NewTicker(cu.updatePeriod)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
cu.tick(ctx)
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cu *crlUpdater) tick(ctx context.Context) {
|
||||
atTime := cu.clk.Now()
|
||||
result := "success"
|
||||
defer func() {
|
||||
cu.tickHistogram.WithLabelValues("all", result).Observe(cu.clk.Since(atTime).Seconds())
|
||||
}()
|
||||
cu.log.Debugf("Ticking at time %s", atTime)
|
||||
|
||||
for id, iss := range cu.issuers {
|
||||
// For now, process each issuer serially. This keeps the worker pool system
|
||||
// simple, and processing all of the issuers in parallel likely wouldn't
|
||||
// meaningfully speed up the overall process.
|
||||
err := cu.tickIssuer(ctx, atTime, id)
|
||||
if err != nil {
|
||||
cu.log.AuditErrf(
|
||||
"tick for issuer %s at time %s failed: %s",
|
||||
iss.Subject.CommonName,
|
||||
atTime.Format(time.RFC3339Nano),
|
||||
err)
|
||||
result = "failed"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tickIssuer performs the full CRL issuance cycle for a single issuer cert.
|
||||
func (cu *crlUpdater) tickIssuer(ctx context.Context, atTime time.Time, issuerID issuance.IssuerNameID) error {
|
||||
start := cu.clk.Now()
|
||||
result := "success"
|
||||
defer func() {
|
||||
cu.tickHistogram.WithLabelValues(cu.issuers[issuerID].Subject.CommonName+" (Overall)", result).Observe(cu.clk.Since(start).Seconds())
|
||||
}()
|
||||
cu.log.Debugf("Ticking issuer %d at time %s", issuerID, atTime)
|
||||
|
||||
type shardResult struct {
|
||||
shardID int
|
||||
err error
|
||||
}
|
||||
|
||||
shardWorker := func(in <-chan int, out chan<- shardResult) {
|
||||
for id := range in {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
out <- shardResult{
|
||||
shardID: id,
|
||||
err: cu.tickShard(ctx, atTime, issuerID, id),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shardIDs := make(chan int, cu.numShards)
|
||||
shardResults := make(chan shardResult, cu.numShards)
|
||||
for i := 0; i < cu.maxParallelism; i++ {
|
||||
go shardWorker(shardIDs, shardResults)
|
||||
}
|
||||
|
||||
for shardID := 0; shardID < cu.numShards; shardID++ {
|
||||
shardIDs <- shardID
|
||||
}
|
||||
close(shardIDs)
|
||||
|
||||
for i := 0; i < cu.numShards; i++ {
|
||||
res := <-shardResults
|
||||
if res.err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("updating shard %d: %w", res.shardID, res.err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(#6162): Send an RPC to the crl-storer to atomically update this CRL's
|
||||
// urls to all point to the newly-uploaded shards.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cu *crlUpdater) tickShard(ctx context.Context, atTime time.Time, issuerID issuance.IssuerNameID, shardID int) error {
|
||||
start := cu.clk.Now()
|
||||
result := "success"
|
||||
defer func() {
|
||||
cu.tickHistogram.WithLabelValues(cu.issuers[issuerID].Subject.CommonName, result).Observe(cu.clk.Since(start).Seconds())
|
||||
cu.generatedCounter.WithLabelValues(result).Inc()
|
||||
}()
|
||||
cu.log.Debugf("Ticking shard %d of issuer %d at time %s", shardID, issuerID, atTime)
|
||||
|
||||
expiresAfter, expiresBefore := cu.getShardBoundaries(atTime, shardID)
|
||||
|
||||
saStream, err := cu.sa.GetRevokedCerts(ctx, &sapb.GetRevokedCertsRequest{
|
||||
IssuerNameID: int64(issuerID),
|
||||
ExpiresAfter: expiresAfter.UnixNano(),
|
||||
ExpiresBefore: expiresBefore.UnixNano(),
|
||||
RevokedBefore: atTime.UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("connecting to SA for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
caStream, err := cu.ca.GenerateCRL(ctx)
|
||||
if err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("connecting to CA for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
err = caStream.Send(&capb.GenerateCRLRequest{
|
||||
Payload: &capb.GenerateCRLRequest_Metadata{
|
||||
Metadata: &capb.CRLMetadata{
|
||||
IssuerNameID: int64(issuerID),
|
||||
ThisUpdate: atTime.UnixNano(),
|
||||
Shard: int64(shardID),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("sending CA metadata for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
for {
|
||||
entry, err := saStream.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
result = "failed"
|
||||
return fmt.Errorf("retrieving entry from SA for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
err = caStream.Send(&capb.GenerateCRLRequest{
|
||||
Payload: &capb.GenerateCRLRequest_Entry{
|
||||
Entry: entry,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("sending entry to CA for shard %d: %w", shardID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// It's okay to close the CA send stream before we start reading from the
|
||||
// receive stream, because we know that the CA has to hold the entire tbsCRL
|
||||
// in memory before it can sign it and start returning the real CRL.
|
||||
err = caStream.CloseSend()
|
||||
if err != nil {
|
||||
result = "failed"
|
||||
return fmt.Errorf("closing CA request stream for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
// TODO(#6162): Connect to the crl-storer, and stream the bytes there.
|
||||
crlBytes := make([]byte, 0)
|
||||
crlHasher := sha256.New()
|
||||
for {
|
||||
out, err := caStream.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
result = "failed"
|
||||
return fmt.Errorf("receiving CRL bytes for shard %d: %w", shardID, err)
|
||||
}
|
||||
|
||||
crlBytes = append(crlBytes, out.Chunk...)
|
||||
crlHasher.Write(out.Chunk)
|
||||
}
|
||||
|
||||
crlHash := crlHasher.Sum(nil)
|
||||
cu.log.AuditInfof(
|
||||
"Received CRL: issuerID=[%d] number=[%d] shard=[%d] size=[%d] hash=[%x]",
|
||||
issuerID, atTime.UnixNano(), shardID, len(crlBytes), crlHash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getShardBoundaries computes the start (inclusive) and end (exclusive) times
|
||||
// for a given integer-indexed CRL shard. The idea here is that shards should be
|
||||
// stable. Picture a timeline, divided into chunks. Number those chunks from 0
|
||||
// to cu.numShards, then repeat the cycle when you run out of numbers:
|
||||
//
|
||||
// chunk: 5 0 1 2 3 4 5 0 1 2 3
|
||||
// ...-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
|
||||
// ^ ^-atTime ^
|
||||
// atTime-lookbackPeriod-┘ atTime+lookforwardPeriod-┘
|
||||
//
|
||||
// The width of each chunk is determined by dividing the total time window we
|
||||
// care about (lookbackPeriod+lookforwardPeriod) by the number of shards we
|
||||
// want (numShards).
|
||||
//
|
||||
// Even as "now" (atTime) moves forward, and the total window of expiration
|
||||
// times that we care about moves forward, the boundaries of each chunk remain
|
||||
// stable:
|
||||
//
|
||||
// chunk: 5 0 1 2 3 4 5 0 1 2 3
|
||||
// ...-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
|
||||
// ^ ^-atTime ^
|
||||
// atTime-lookbackPeriod-┘ atTime+lookforwardPeriod-┘
|
||||
//
|
||||
// However, note that at essentially all times the window includes parts of two
|
||||
// different instances of the chunk which appears at its ends. For example,
|
||||
// in the second diagram above, the window includes almost all of the middle
|
||||
// chunk labeled "3", but also includes just a little bit of the rightmost chunk
|
||||
// also labeled "3".
|
||||
//
|
||||
// In order to handle this case, this function always treats the *leftmost*
|
||||
// (i.e. earliest) chunk with the given ID that has *any* overlap with the
|
||||
// current window as the current shard. It returns the boundaries of this chunk
|
||||
// as the boundaries of the desired shard. In the diagram below, even though
|
||||
// there is another chunk with ID "1" near the right-hand edge of the window,
|
||||
// that chunk is ignored.
|
||||
//
|
||||
// shard: | 1 | 2 | 3 | 4 | 5 | 0 |
|
||||
// ...-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----...
|
||||
// ^ ^-atTime ^
|
||||
// atTime-lookbackPeriod-┘ atTime+lookforwardPeriod-┘
|
||||
//
|
||||
// This means that the lookforwardPeriod MUST be configured large enough that
|
||||
// there is a buffer of at least one whole chunk width between the actual
|
||||
// furthest-future expiration (generally atTime+90d) and the right-hand edge of
|
||||
// the window (atTime+lookforwardPeriod).
|
||||
func (cu *crlUpdater) getShardBoundaries(atTime time.Time, shardID int) (time.Time, time.Time) {
|
||||
// Ensure that the given shardID falls within the space of acceptable IDs.
|
||||
shardID = shardID % cu.numShards
|
||||
|
||||
// Compute the width of the full window.
|
||||
windowWidth := cu.lookbackPeriod + cu.lookforwardPeriod
|
||||
// Compute the amount of time between the left-hand edge of the most recent
|
||||
// "0" chunk and the current time.
|
||||
atTimeOffset := time.Duration(atTime.Sub(time.Time{}).Nanoseconds() % windowWidth.Nanoseconds())
|
||||
// Compute the left-hand edge of the most recent "0" chunk.
|
||||
zeroStart := atTime.Add(-atTimeOffset)
|
||||
|
||||
// Compute the width of a single shard.
|
||||
shardWidth := time.Duration(windowWidth.Nanoseconds() / int64(cu.numShards))
|
||||
// Compute the amount of time between the left-hand edge of the most recent
|
||||
// "0" chunk and the left-hand edge of the desired chunk.
|
||||
shardOffset := time.Duration(int64(shardID) * shardWidth.Nanoseconds())
|
||||
// Compute the left-hand edge of the most recent chunk with the given ID.
|
||||
shardStart := zeroStart.Add(shardOffset)
|
||||
// Compute the right-hand edge of the most recent chunk with the given ID.
|
||||
shardEnd := shardStart.Add(shardWidth)
|
||||
|
||||
// But the shard boundaries we just computed might be for a chunk that is
|
||||
// completely behind the left-hand edge of our current window. If they are,
|
||||
// bump them forward by one window width to bring them inside our window.
|
||||
if shardEnd.Before(atTime.Add(-cu.lookbackPeriod)) {
|
||||
shardStart = shardStart.Add(windowWidth)
|
||||
shardEnd = shardEnd.Add(windowWidth)
|
||||
}
|
||||
return shardStart, shardEnd
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
package updater
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
)
|
||||
|
||||
func TestGetWindowForShard(t *testing.T) {
|
||||
// Our test updater divides a 107-day window into 107 shards, resulting in a
|
||||
// shard width of 24 hours.
|
||||
tcu := crlUpdater{
|
||||
numShards: 107,
|
||||
lookbackPeriod: 7 * 24 * time.Hour,
|
||||
lookforwardPeriod: 100 * 24 * time.Hour,
|
||||
}
|
||||
zeroTime := time.Time{}
|
||||
|
||||
// At just a moment past the 0 time, the zeroth shard should start at time 0,
|
||||
// and end exactly one day later.
|
||||
start, end := tcu.getShardBoundaries(zeroTime.Add(time.Minute), 0)
|
||||
test.Assert(t, start.IsZero(), "start time should be zero")
|
||||
test.AssertEquals(t, end, zeroTime.Add(24*time.Hour))
|
||||
|
||||
// At the same moment, the 93rd shard should start 93 days later.
|
||||
start, end = tcu.getShardBoundaries(zeroTime.Add(time.Minute), 93)
|
||||
test.AssertEquals(t, start, zeroTime.Add(93*24*time.Hour))
|
||||
test.AssertEquals(t, end, zeroTime.Add(94*24*time.Hour))
|
||||
|
||||
// If we jump 100 days into the future, now the 0th shard should start 107
|
||||
// days after the zero time.
|
||||
start, end = tcu.getShardBoundaries(zeroTime.Add(100*24*time.Hour+time.Minute), 0)
|
||||
test.AssertEquals(t, start, zeroTime.Add(107*24*time.Hour))
|
||||
test.AssertEquals(t, end, zeroTime.Add(108*24*time.Hour))
|
||||
|
||||
// During day 100, the 93rd shard should still start at the same time (just
|
||||
// over 7 days ago), because we haven't fully left it behind yet. The 92nd
|
||||
// shard, however, should have jumped into the future.
|
||||
start, end = tcu.getShardBoundaries(zeroTime.Add(100*24*time.Hour+time.Minute), 93)
|
||||
test.AssertEquals(t, start, zeroTime.Add(93*24*time.Hour))
|
||||
test.AssertEquals(t, end, zeroTime.Add(94*24*time.Hour))
|
||||
start, end = tcu.getShardBoundaries(zeroTime.Add(100*24*time.Hour+time.Minute), 92)
|
||||
test.AssertEquals(t, start, zeroTime.Add(199*24*time.Hour))
|
||||
test.AssertEquals(t, end, zeroTime.Add(200*24*time.Hour))
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"crlUpdater": {
|
||||
"debugAddr": ":8021",
|
||||
"tls": {
|
||||
"caCertFile": "test/grpc-creds/minica.pem",
|
||||
"certFile": "test/grpc-creds/crl-updater.boulder/cert.pem",
|
||||
"keyFile": "test/grpc-creds/crl-updater.boulder/key.pem"
|
||||
},
|
||||
"crlGeneratorService": {
|
||||
"serverAddress": "ca.boulder:9106",
|
||||
"timeout": "15s"
|
||||
},
|
||||
"saService": {
|
||||
"serverAddress": "sa.boulder:9095",
|
||||
"timeout": "15s"
|
||||
},
|
||||
"issuerCerts": [
|
||||
"/hierarchy/intermediate-cert-rsa-a.pem",
|
||||
"/hierarchy/intermediate-cert-rsa-b.pem",
|
||||
"/hierarchy/intermediate-cert-ecdsa-a.pem"
|
||||
],
|
||||
"numShards": 10,
|
||||
"certificateLifetime": "2160h",
|
||||
"updatePeriod": "6h",
|
||||
"maxParallelism": 10
|
||||
},
|
||||
|
||||
"syslog": {
|
||||
"stdoutlevel": 6,
|
||||
"sysloglevel": 6
|
||||
},
|
||||
"beeline": {
|
||||
"mute": true
|
||||
}
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@
|
|||
"clientNames": [
|
||||
"admin-revoker.boulder",
|
||||
"ca.boulder",
|
||||
"crl-updater.boulder",
|
||||
"expiration-mailer.boulder",
|
||||
"health-checker.boulder",
|
||||
"orphan-finder.boulder",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"crlUpdater": {
|
||||
"debugAddr": ":8021",
|
||||
"tls": {
|
||||
"caCertFile": "test/grpc-creds/minica.pem",
|
||||
"certFile": "test/grpc-creds/crl-updater.boulder/cert.pem",
|
||||
"keyFile": "test/grpc-creds/crl-updater.boulder/key.pem"
|
||||
},
|
||||
"crlGeneratorService": {
|
||||
"serverAddress": "ca.boulder:9106",
|
||||
"timeout": "15s"
|
||||
},
|
||||
"saService": {
|
||||
"serverAddress": "sa.boulder:9095",
|
||||
"timeout": "15s"
|
||||
},
|
||||
"issuerCerts": [
|
||||
"/hierarchy/intermediate-cert-rsa-a.pem",
|
||||
"/hierarchy/intermediate-cert-rsa-b.pem",
|
||||
"/hierarchy/intermediate-cert-ecdsa-a.pem"
|
||||
],
|
||||
"numShards": 10,
|
||||
"certificateLifetime": "2160h",
|
||||
"updatePeriod": "6h",
|
||||
"maxParallelism": 10
|
||||
},
|
||||
|
||||
"syslog": {
|
||||
"stdoutlevel": 6,
|
||||
"sysloglevel": 6
|
||||
},
|
||||
"beeline": {
|
||||
"mute": true
|
||||
}
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@
|
|||
"clientNames": [
|
||||
"admin-revoker.boulder",
|
||||
"ca.boulder",
|
||||
"crl-updater.boulder",
|
||||
"expiration-mailer.boulder",
|
||||
"health-checker.boulder",
|
||||
"orphan-finder.boulder",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDHTCCAgWgAwIBAgIIHyji7lDVSqUwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
|
||||
AxMVbWluaWNhIHJvb3QgY2EgM2I4YjJjMB4XDTIyMDYwMTE4NDkzOVoXDTI0MDcw
|
||||
MTE4NDkzOVowHTEbMBkGA1UEAxMSY3JsLXN0b3Jlci5ib3VsZGVyMIIBIjANBgkq
|
||||
hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArRAD3NxXfSW6GLOnf+M9F7rlwNRjzH+5
|
||||
IFInKeFHDdop/S8uYU1+7X7cQRz/5sZC5KdB8pAZZPCvmrUCJO+0VMfGH16NSax9
|
||||
D6OZNg0gPdnGV0nfbFv0nGhnN7hTEAFTzslo5Yrbk21ZUsHBIoCimApZYba9EMVV
|
||||
PHL0S/GPTSyt4UWaxBwMZ+rZkUiB3fdhj2TIjw3YW7P8TWgI13B3MOYV5cxEWgYb
|
||||
/l3TZEQIBMfMUbB0n1V6n7tqczdKNfHtgathnRrsY98VOUFJH0kcWNCwyHWddE7k
|
||||
gkiI+QyTPKIGbnfEjmSDHmW6XdLFnl37zOPY1Dscuxpbs8Mjp0YMEwIDAQABo14w
|
||||
XDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
|
||||
MAwGA1UdEwEB/wQCMAAwHQYDVR0RBBYwFIISY3JsLXN0b3Jlci5ib3VsZGVyMA0G
|
||||
CSqGSIb3DQEBCwUAA4IBAQCetSnCIrGp63JhCeLpw3Xaj7+kX49h3sChckcA7FyF
|
||||
/BnYJP1P4hkMMZptqkhZQyOUKfiBKKz8jNf5h+qT0o5T5LDTY2z7971850C79EwL
|
||||
4qeDM4crExqQhvKVgg5qYFo2Rt7hT1sAVWKC2qJY0HtQO7WGDn0p1M5dehwzhgyU
|
||||
jJZAWPMMnrL+njRrHWoN5AHkz+Ykbva/4K4uyTaiLx4wvk2WHgwNRZ7FVcdj4wdh
|
||||
Im/eoTTbBQPaZhT0W1/suJOxSoFZYWUWFg4dNglaPIVMEm1eTARohbYjimCXvB8P
|
||||
JhqJFC+tEgmBVdldALR7fJS50emOMoO9Qc2nAGV0+yIV
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEArRAD3NxXfSW6GLOnf+M9F7rlwNRjzH+5IFInKeFHDdop/S8u
|
||||
YU1+7X7cQRz/5sZC5KdB8pAZZPCvmrUCJO+0VMfGH16NSax9D6OZNg0gPdnGV0nf
|
||||
bFv0nGhnN7hTEAFTzslo5Yrbk21ZUsHBIoCimApZYba9EMVVPHL0S/GPTSyt4UWa
|
||||
xBwMZ+rZkUiB3fdhj2TIjw3YW7P8TWgI13B3MOYV5cxEWgYb/l3TZEQIBMfMUbB0
|
||||
n1V6n7tqczdKNfHtgathnRrsY98VOUFJH0kcWNCwyHWddE7kgkiI+QyTPKIGbnfE
|
||||
jmSDHmW6XdLFnl37zOPY1Dscuxpbs8Mjp0YMEwIDAQABAoIBAGSBJH0jSXvYaRbn
|
||||
XqMaqOQbGkq+Dac5lZ17mFruBYG57SCoQdsoUkpCnk8CNGslQLvhlp7zqIvCa3kB
|
||||
Hdmti6Cg1CJrmsnqbkaiz9tgDBVfX5xZvnMDhC9BQpBNb4+f9bflqBGGbrZTjshM
|
||||
1YjgA40glX/1zB7tOu78I3vnMT2ze+l/vy8ShXv/Jg3k5KwnvVO19bBlNcT76faJ
|
||||
Q5+XAd5jDSLi3Ldyi0YXKDdialaQZBw8+R5NjW6rYwoDr5hqxGMn7ZmUDy1BBSLY
|
||||
ol5/QfXjrys2qswvs64WsKh5rqdzei9xSWlHMcnaPp1/bbIJYX3arpuiTFl6bPo7
|
||||
5f7jzoECgYEAxhLlqEEsrDZWm7jsbV1qyJzq9djDMQbWnZJ16JiCWqFpxoTY+zVR
|
||||
JiqntFwDx58I2qvS7POPSjtZgD8HbutwkIorQqdRn3HhOdyCm2Y7GUL+WgQveCQm
|
||||
2Hitsrz7KPFdr7czYkp9rYRqnr8iR9wsB5BvZOiXq6hYEhXUft/UY3ECgYEA36yc
|
||||
iFNo2PrL5N0aHnMsubGxlmcCpD/k/b+h6HB4opB5kajJURd5glHuy1dHo4kNuvkT
|
||||
K0+274zC3ls1JWLrSEOV2uFvYR139kuDiJK0Tt5cDtSXyd8BK5yfxNUx5TAbB1qO
|
||||
cn+gYHNYYkz+ED7useu2UBgv1N1xEnUckZqincMCgYBwYQJZzviIkJ/c5DOH7UuL
|
||||
nLQWXjCInWegqRsqp3S19QStdGwjQuHStLrHsrkWRh3g60HV3G484KVsBNaz0PLh
|
||||
nvLic0n1G8h8aqbTglIfiAoFioBgOqqrgPSUB3K8jQzpiBioOLlLfGh9vtW1Ta7R
|
||||
b9ozt19uLiohwK76eQXokQKBgQClGQ25eoerkQMq/SCoVS8wKb8BWyuR/S+TNP3X
|
||||
Sh1hMO7q2wEt//vKe5udL+FLDoC5/9tsXryS+P1lj0EdRXSRtIFKPJgvwMapX3fJ
|
||||
OTAp/utIbxdTBG4iuMsdSJg5qVZqCSeX0CiayMj2Vgo6B5m8blX1rMSxhHpPu23I
|
||||
87EQbQKBgQCIL7YmlsVUEYXJiau0zH/MULAQOhBmbRM+e/vPZ4UfpK8fP+RMrgbc
|
||||
N0JWw75vn32zC5GIoyn8BQ4h1Pd8nz59CAPmyB3Tt644gEQsESzsOXuyndHoRhAP
|
||||
vVLhHpSVme/oUbpabdI+eoWAOdmK6lmmMR/fX2PcgUr2rG7xOOQ9eg==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDHzCCAgegAwIBAgIITDAjij+YvqwwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
|
||||
AxMVbWluaWNhIHJvb3QgY2EgM2I4YjJjMB4XDTIyMDQyOTAxNDUzM1oXDTI0MDUy
|
||||
OTAxNDUzM1owHjEcMBoGA1UEAxMTY3JsLXVwZGF0ZXIuYm91bGRlcjCCASIwDQYJ
|
||||
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAJZnehi4ybH5r7wgxD9q0XEBlhV6EOXw
|
||||
DNzwniCRshWl494m+eseIfNv6ZhrGzbCLj9HWpdei9RsUFLVo6QW4UcyS3nHP9Nh
|
||||
s9m2EYWv5hKuyVMC5nTGpnSd0DgVw/Rlr+hw4+uBw/5PdEBqKrajjm09Z07NWrIs
|
||||
ktZ8hCaStVzuz2cZJwtLgHedZ4sljp3OoxEuTWFWRqGU/zxih7pXewKSlwKBdkWY
|
||||
3xWo2uH4mj/OhbjfS+2RYZoWV60kj9/1sv9zsbjg/6nTXICp9Dka+6xUFxJUCiaI
|
||||
NJjAbbsAsrAyTXs/ghacDvRUFWXd6SOw5tf+DDD3ay9KmjhB30gBDgUCAwEAAaNf
|
||||
MF0wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD
|
||||
AjAMBgNVHRMBAf8EAjAAMB4GA1UdEQQXMBWCE2NybC11cGRhdGVyLmJvdWxkZXIw
|
||||
DQYJKoZIhvcNAQELBQADggEBAIrGUyCBpfRAdl5IXD7Cp2OsXisDRbyl3xTuPNCu
|
||||
97cXXL0n8cO8TzfgvXMMm+UqhFjrN2RhVbIGtGkYUIxxYlc7WMG3TpufzbU9fgtg
|
||||
Rk406xQo6QSzPvLDyjI8DYGgT7wpzC7lV4U3zOcDTAY8IXBMzE1p9Aq6hvyaeKjI
|
||||
CaL+MFEYmrSB/rYjyyyaCUDkQzsAhdXknlNKr4sWjJ8a8RilzBxDvKzi1EJYjjky
|
||||
kpjuFBZp/seFJMIu2iuRc7Dqq/LjDUH/V33uTmYs32TF8VNRwtujbKHmdF2bKfZ8
|
||||
5RkwYF65ChAppu59EcB+ovodH6hip9DFLnA277xFzQAWw6M=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAlmd6GLjJsfmvvCDEP2rRcQGWFXoQ5fAM3PCeIJGyFaXj3ib5
|
||||
6x4h82/pmGsbNsIuP0dal16L1GxQUtWjpBbhRzJLecc/02Gz2bYRha/mEq7JUwLm
|
||||
dMamdJ3QOBXD9GWv6HDj64HD/k90QGoqtqOObT1nTs1asiyS1nyEJpK1XO7PZxkn
|
||||
C0uAd51niyWOnc6jES5NYVZGoZT/PGKHuld7ApKXAoF2RZjfFaja4fiaP86FuN9L
|
||||
7ZFhmhZXrSSP3/Wy/3OxuOD/qdNcgKn0ORr7rFQXElQKJog0mMBtuwCysDJNez+C
|
||||
FpwO9FQVZd3pI7Dm1/4MMPdrL0qaOEHfSAEOBQIDAQABAoIBAGpgJWr2xzCpeDGG
|
||||
u0ZfNmLGzG/ENc9rSK27QRun8Fx/R/sHdhtk5H5Y2M6Ecmr4S91Vii0lIaDMOsNb
|
||||
drHHvO2uFNO8qNfIcnu42gvDrOXp2Vps4rIVDgXM2EfSnRVuFkxAC/O1g/mKGJ62
|
||||
eB9U7pc0pqyFcXAzNmedCZp+iOeAqn6MrvtbFrDM3QDR0OTJaBddvavwUoksNK43
|
||||
RtJG7+/E5Ywo2OcStPYkVHX9S50v4/ejmzxvundsi4hr30FCfLd3IWkzUZv72ab4
|
||||
lpUeEOHIEjiddUvCgLu9ivpXuACqJLGxG1hhEWWPaiM764CsiiMKs1uDeOBqBS0W
|
||||
ghRZysECgYEAxNzoYyNCNupbeANDA1UxKiWTsUOErUuiOA++tKvthFARExQKpSad
|
||||
yFx9pl7ETNSPJJQFXY5Jo/R/0Js+mo5KtM8pauOsrP2ApfBTwjDJC6ZE/bP1iYQd
|
||||
kZIsxDOZUaSP7u0tnB4J9OyF/s/MbMOV8rv+qF+hhDIfvTvDsnAy47MCgYEAw5XQ
|
||||
uUIG490fOC52uf5UaOvs2hjK+tc8k5lEAalVieFQWpHdNONbblW0SuabmEvm6t3w
|
||||
XE0EGgoceeAx6WfACRdZbUVQ+4v+pQ0v8ifXbWrsyl1PfdgZzNVHArx/Zut060I4
|
||||
AECFf+GgOi7/pINRxRmsYLDHV3WB9mWV3LjmS2cCgYA6MYcsjyaqrxp83pH3sT5I
|
||||
VDVViDz3MU1xvw3DwLMEktqJRjpjWYf4y4aFIKxsfcUWrf7sCfywz7zcwJd//qIM
|
||||
0tROeaD4vjNCWkmYNO2pWOw6BvGFRh2rg4diNFKVNM0rdsctyB9vXF+71Fd8q1Bv
|
||||
Ia2EvkgzmNGTEdCmtc4FSwKBgDg3Vn/5J2vVQcTvLfb10CM8NJ7XY3hH50RxWCe2
|
||||
GT0BQwxVQqSR+NS005Z3xB85Kc02L4EILfaiE19Afngc9whl6rFKrCUxLucX7EEJ
|
||||
DJWJlzfvLAr60yeZ4cg6j8/wO3HnXZtM8FI6cshme9o4WIdzgqVkClfsEYfV2QZR
|
||||
KvhfAoGAO9sKfzjv7cgwWBm/1EHunztR99kGsM2MXiHUBETJnRHtstCs6njH5XFh
|
||||
fsDDdrL5keR0cX7QAcqZgJypFmak+3xjGaP/1BOdR8K70QYz3aAY1sj5iYzzoTWq
|
||||
P8yv4KyAR2LlRmR6ugc3c78B926+GuaYBRA0iJmsjRkTCWszvKc=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
|
@ -10,7 +10,7 @@ command -v minica >/dev/null 2>&1 || {
|
|||
}
|
||||
|
||||
for SERVICE in admin-revoker expiration-mailer ocsp-updater orphan-finder wfe \
|
||||
akamai-purger nonce bad-key-revoker health-checker; do
|
||||
akamai-purger nonce bad-key-revoker crl-updater health-checker; do
|
||||
minica -domains "${SERVICE}.boulder"
|
||||
done
|
||||
|
||||
|
|
|
|||
|
|
@ -64,11 +64,11 @@ SERVICES = (
|
|||
('sd-test-srv', 'boulder-remoteva-a', 'boulder-remoteva-b')),
|
||||
Service('boulder-ca-a',
|
||||
8001, 'ca1.boulder:9093',
|
||||
('./bin/boulder-ca', '--config', os.path.join(config_dir, 'ca-a.json'), '--ca-addr', 'ca1.boulder:9093', '--ocsp-addr', 'ca1.boulder:9096', '--crl-addr', 'ca1.boulder:9196', '--debug-addr', ':8001'),
|
||||
('./bin/boulder-ca', '--config', os.path.join(config_dir, 'ca-a.json'), '--ca-addr', 'ca1.boulder:9093', '--ocsp-addr', 'ca1.boulder:9096', '--crl-addr', 'ca1.boulder:9106', '--debug-addr', ':8001'),
|
||||
('sd-test-srv', 'boulder-sa-1', 'boulder-sa-2')),
|
||||
Service('boulder-ca-b',
|
||||
8101, 'ca2.boulder:9093',
|
||||
('./bin/boulder-ca', '--config', os.path.join(config_dir, 'ca-b.json'), '--ca-addr', 'ca2.boulder:9093', '--ocsp-addr', 'ca2.boulder:9096', '--crl-addr', 'ca2.boulder:9196', '--debug-addr', ':8101'),
|
||||
('./bin/boulder-ca', '--config', os.path.join(config_dir, 'ca-b.json'), '--ca-addr', 'ca2.boulder:9093', '--ocsp-addr', 'ca2.boulder:9096', '--crl-addr', 'ca2.boulder:9106', '--debug-addr', ':8101'),
|
||||
('sd-test-srv', 'boulder-sa-1', 'boulder-sa-2')),
|
||||
Service('akamai-test-srv',
|
||||
6789, None,
|
||||
|
|
@ -82,6 +82,10 @@ SERVICES = (
|
|||
8006, None,
|
||||
('./bin/ocsp-updater', '--config', os.path.join(config_dir, 'ocsp-updater.json')),
|
||||
('boulder-ca-a', 'boulder-ca-b')),
|
||||
Service('crl-updater',
|
||||
8021, None,
|
||||
('./bin/crl-updater', '--config', os.path.join(config_dir, 'crl-updater.json')),
|
||||
('boulder-ca-a', 'boulder-ca-b', 'boulder-sa-1', 'boulder-sa-2')),
|
||||
Service('boulder-ra-1',
|
||||
8002, 'ra1.boulder:9094',
|
||||
('./bin/boulder-ra', '--config', os.path.join(config_dir, 'ra.json'), '--addr', 'ra1.boulder:9094', '--debug-addr', ':8002'),
|
||||
|
|
|
|||
Loading…
Reference in New Issue