Compare commits

..

No commits in common. "main" and "release-2024-11-04" have entirely different histories.

1829 changed files with 67634 additions and 102797 deletions

View File

@ -27,7 +27,7 @@ jobs:
# tags and 5 tests there would be 10 jobs run.
b:
# The type of runner that the job will run on
runs-on: ubuntu-24.04
runs-on: ubuntu-20.04
strategy:
# When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true
@ -36,7 +36,7 @@ jobs:
matrix:
# Add additional docker image tags here and all tests will be run with the additional image.
BOULDER_TOOLS_TAG:
- go1.24.4_2025-06-06
- go1.23.1_2024-09-05
# Tests command definitions. Use the entire "docker compose" command you want to run.
tests:
# Run ./test.sh --help for a description of each of the flags.
@ -71,7 +71,7 @@ jobs:
- name: Docker Login
# You may pin to the exact commit or the version.
# uses: docker/login-action@f3364599c6aa293cdc2b8391b1b56d0c30e45c8a
uses: docker/login-action@v3.4.0
uses: docker/login-action@v3.3.0
with:
# Username used to log against the Docker registry
username: ${{ secrets.DOCKER_USERNAME}}
@ -95,7 +95,7 @@ jobs:
run: ${{ matrix.tests }}
govulncheck:
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
@ -117,12 +117,12 @@ jobs:
run: go run golang.org/x/vuln/cmd/govulncheck@latest ./...
vendorcheck:
runs-on: ubuntu-24.04
runs-on: ubuntu-20.04
strategy:
# When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true
fail-fast: false
matrix:
go-version: [ '1.24.1' ]
go-version: [ '1.22.5' ]
steps:
# Checks out your repository under $GITHUB_WORKSPACE, so your job can access it
@ -153,7 +153,7 @@ jobs:
permissions:
contents: none
if: ${{ always() }}
runs-on: ubuntu-24.04
runs-on: ubuntu-latest
name: Boulder CI Test Matrix
needs:
- b

View File

@ -1,53 +0,0 @@
name: Check for IANA special-purpose address registry updates
on:
schedule:
- cron: "20 16 * * *"
workflow_dispatch:
jobs:
check-iana-registries:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout iana/data from main branch
uses: actions/checkout@v4
with:
sparse-checkout: iana/data
# If the branch already exists, this will fail, which will remind us about
# the outstanding PR.
- name: Create an iana-registries-gha branch
run: |
git checkout --track origin/main -b iana-registries-gha
- name: Retrieve the IANA special-purpose address registries
run: |
IANA_IPV4="https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry-1.csv"
IANA_IPV6="https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry-1.csv"
REPO_IPV4="iana/data/iana-ipv4-special-registry-1.csv"
REPO_IPV6="iana/data/iana-ipv6-special-registry-1.csv"
curl --fail --location --show-error --silent --output "${REPO_IPV4}" "${IANA_IPV4}"
curl --fail --location --show-error --silent --output "${REPO_IPV6}" "${IANA_IPV6}"
- name: Create a commit and pull request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell:
bash
# `git diff --exit-code` returns an error code if there are any changes.
run: |
if ! git diff --exit-code; then
git add iana/data/
git config user.name "Irwin the IANA Bot"
git commit \
--message "Update IANA special-purpose address registries"
git push origin HEAD
gh pr create --fill
fi

View File

@ -1,17 +0,0 @@
# This GitHub Action runs only on pushes to main or a hotfix branch. It can
# be used by tag protection rules to ensure that tags may only be pushed if
# their corresponding commit was first pushed to one of those branches.
name: Merged to main (or hotfix)
on:
push:
branches:
- main
- release-branch-*
jobs:
merged-to-main:
name: Merged to main (or hotfix)
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false

View File

@ -15,25 +15,20 @@ jobs:
fail-fast: false
matrix:
GO_VERSION:
- "1.24.4"
runs-on: ubuntu-24.04
- "1.23.1"
runs-on: ubuntu-20.04
permissions:
contents: write
packages: write
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
fetch-depth: '0' # Needed for verify-release-ancestry.sh to see origin/main
- name: Verify release ancestry
run: ./tools/verify-release-ancestry.sh "$GITHUB_SHA"
- name: Build .deb
id: build
env:
GO_VERSION: ${{ matrix.GO_VERSION }}
run: docker run -v $PWD:/boulder -e GO_VERSION=$GO_VERSION -e COMMIT_ID="$(git rev-parse --short=8 HEAD)" ubuntu:24.04 bash -c 'apt update && apt -y install gnupg2 curl sudo git gcc && cd /boulder/ && ./tools/make-assets.sh'
run: ./tools/make-assets.sh
- name: Compute checksums
id: checksums
@ -53,14 +48,3 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# https://cli.github.com/manual/gh_release_upload
run: gh release upload "${GITHUB_REF_NAME}" boulder*.deb boulder*.tar.gz boulder*.checksums.txt
- name: Build ct-test-srv Container
run: docker buildx build . --build-arg "GO_VERSION=${{ matrix.GO_VERSION }}" -f test/ct-test-srv/Dockerfile -t "ghcr.io/letsencrypt/ct-test-srv:${{ github.ref_name }}-go${{ matrix.GO_VERSION }}"
- name: Login to ghcr.io
run: printenv GITHUB_TOKEN | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Push ct-test-srv Container
run: docker push "ghcr.io/letsencrypt/ct-test-srv:${{ github.ref_name }}-go${{ matrix.GO_VERSION }}"

View File

@ -16,8 +16,8 @@ jobs:
fail-fast: false
matrix:
GO_VERSION:
- "1.24.4"
runs-on: ubuntu-24.04
- "1.23.1"
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
with:
@ -27,7 +27,7 @@ jobs:
id: build
env:
GO_VERSION: ${{ matrix.GO_VERSION }}
run: docker run -v $PWD:/boulder -e GO_VERSION=$GO_VERSION -e COMMIT_ID="$(git rev-parse --short=8 HEAD)" ubuntu:24.04 bash -c 'apt update && apt -y install gnupg2 curl sudo git gcc && cd /boulder/ && ./tools/make-assets.sh'
run: ./tools/make-assets.sh
- name: Compute checksums
id: checksums
@ -42,6 +42,3 @@ jobs:
- name: Show checksums
id: check
run: cat boulder*.checksums.txt
- name: Build ct-test-srv Container
run: docker buildx build . --build-arg "GO_VERSION=${{ matrix.GO_VERSION }}" -f test/ct-test-srv/Dockerfile -t "ghcr.io/letsencrypt/ct-test-srv:${{ github.sha }}-go${{ matrix.GO_VERSION }}"

View File

@ -1,89 +1,60 @@
version: "2"
linters:
default: none
disable-all: true
enable:
- asciicheck
- bidichk
- errcheck
- gofmt
- gosec
- gosimple
- govet
- ineffassign
- misspell
- nolintlint
- spancheck
- sqlclosecheck
- staticcheck
- typecheck
- unconvert
- unparam
- unused
- wastedassign
settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (net.Conn).Write
- encoding/binary.Write
- io.Write
- net/http.Write
- os.Remove
- github.com/miekg/dns.WriteMsg
govet:
disable:
- fieldalignment
- shadow
enable-all: true
settings:
printf:
funcs:
- (github.com/letsencrypt/boulder/log.Logger).Errf
- (github.com/letsencrypt/boulder/log.Logger).Warningf
- (github.com/letsencrypt/boulder/log.Logger).Infof
- (github.com/letsencrypt/boulder/log.Logger).Debugf
- (github.com/letsencrypt/boulder/log.Logger).AuditInfof
- (github.com/letsencrypt/boulder/log.Logger).AuditErrf
- (github.com/letsencrypt/boulder/ocsp/responder).SampledError
- (github.com/letsencrypt/boulder/web.RequestEvent).AddError
gosec:
excludes:
# TODO: Identify, fix, and remove violations of most of these rules
- G101 # Potential hardcoded credentials
- G102 # Binds to all network interfaces
- G104 # Errors unhandled
- G107 # Potential HTTP request made with variable url
- G201 # SQL string formatting
- G202 # SQL string concatenation
- G204 # Subprocess launched with variable
- G302 # Expect file permissions to be 0600 or less
- G306 # Expect WriteFile permissions to be 0600 or less
- G304 # Potential file inclusion via variable
- G401 # Use of weak cryptographic primitive
- G402 # TLS InsecureSkipVerify set true.
- G403 # RSA keys should be at least 2048 bits
- G404 # Use of weak random number generator
nolintlint:
require-explanation: true
require-specific: true
allow-unused: false
staticcheck:
checks:
- all
# TODO: Identify, fix, and remove violations of most of these rules
- -S1029 # Range over the string directly
- -SA1019 # Using a deprecated function, variable, constant or field
- -SA6003 # Converting a string to a slice of runes before ranging over it
- -ST1000 # Incorrect or missing package comment
- -ST1003 # Poorly chosen identifier
- -ST1005 # Incorrectly formatted error string
- -QF1001 # Could apply De Morgan's law
- -QF1003 # Could use tagged switch
- -QF1004 # Could use strings.Split instead
- -QF1007 # Could merge conditional assignment into variable declaration
- -QF1008 # Could remove embedded field from selector
- -QF1009 # Probably want to use time.Time.Equal
- -QF1012 # Use fmt.Fprintf(...) instead of Write(fmt.Sprintf(...))
exclusions:
presets:
- std-error-handling
formatters:
enable:
- gofmt
# TODO(#6202): Re-enable 'wastedassign' linter
linters-settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (net.Conn).Write
- encoding/binary.Write
- io.Write
- net/http.Write
- os.Remove
- github.com/miekg/dns.WriteMsg
gosimple:
# S1029: Range over the string directly
checks: ["all", "-S1029"]
govet:
enable-all: true
disable:
- fieldalignment
- shadow
settings:
printf:
funcs:
- (github.com/letsencrypt/boulder/log.Logger).Errf
- (github.com/letsencrypt/boulder/log.Logger).Warningf
- (github.com/letsencrypt/boulder/log.Logger).Infof
- (github.com/letsencrypt/boulder/log.Logger).Debugf
- (github.com/letsencrypt/boulder/log.Logger).AuditInfof
- (github.com/letsencrypt/boulder/log.Logger).AuditErrf
- (github.com/letsencrypt/boulder/ocsp/responder).SampledError
- (github.com/letsencrypt/boulder/web.RequestEvent).AddError
gosec:
excludes:
# TODO: Identify, fix, and remove violations of most of these rules
- G101 # Potential hardcoded credentials
- G102 # Binds to all network interfaces
- G107 # Potential HTTP request made with variable url
- G201 # SQL string formatting
- G202 # SQL string concatenation
- G306 # Expect WriteFile permissions to be 0600 or less
- G401 # Use of weak cryptographic primitive
- G402 # TLS InsecureSkipVerify set true.
- G403 # RSA keys should be at least 2048 bits
- G404 # Use of weak random number generator (math/rand instead of crypto/rand)
- G501 # Blacklisted import `crypto/md5`: weak cryptographic primitive
- G505 # Blacklisted import `crypto/sha1`: weak cryptographic primitive
- G601 # Implicit memory aliasing in for loop (this is fixed by go1.22)

View File

@ -33,6 +33,5 @@ extend-ignore-re = [
"otConf" = "otConf"
"serInt" = "serInt"
"StratName" = "StratName"
"typ" = "typ"
"UPDATEs" = "UPDATEs"
"vai" = "vai"

View File

@ -6,8 +6,9 @@ VERSION ?= 1.0.0
EPOCH ?= 1
MAINTAINER ?= "Community"
CMDS = admin boulder ceremony ct-test-srv pardot-test-srv chall-test-srv
CMD_BINS = $(addprefix bin/, $(CMDS) )
CMDS = $(shell find ./cmd -maxdepth 1 -mindepth 1 -type d | grep -v testdata)
CMD_BASENAMES = $(shell echo $(CMDS) | xargs -n1 basename)
CMD_BINS = $(addprefix bin/, $(CMD_BASENAMES) )
OBJECTS = $(CMD_BINS)
# Build environment variables (referencing core/util.go)
@ -24,7 +25,7 @@ BUILD_TIME_VAR = github.com/letsencrypt/boulder/core.BuildTime
GO_BUILD_FLAGS = -ldflags "-X \"$(BUILD_ID_VAR)=$(BUILD_ID)\" -X \"$(BUILD_TIME_VAR)=$(BUILD_TIME)\" -X \"$(BUILD_HOST_VAR)=$(BUILD_HOST)\""
.PHONY: all build build_cmds deb tar
.PHONY: all build build_cmds rpm deb tar
all: build
build: $(OBJECTS)
@ -37,13 +38,24 @@ $(CMD_BINS): build_cmds
build_cmds: | $(OBJDIR)
echo $(OBJECTS)
GOBIN=$(OBJDIR) GO111MODULE=on go install -mod=vendor $(GO_BUILD_FLAGS) ./...
./link.sh
# Building a .deb requires `fpm` from https://github.com/jordansissel/fpm
# Building an RPM requires `fpm` from https://github.com/jordansissel/fpm
# which you can install with `gem install fpm`.
# It is recommended that maintainers use environment overrides to specify
# Version and Epoch, such as:
#
# VERSION=0.1.9 EPOCH=52 MAINTAINER="$(whoami)" ARCHIVEDIR=/tmp make build deb
# VERSION=0.1.9 EPOCH=52 MAINTAINER="$(whoami)" ARCHIVEDIR=/tmp make build rpm
rpm: build
fpm -f -s dir -t rpm --rpm-digest sha256 --name "boulder" \
--license "Mozilla Public License v2.0" --vendor "ISRG" \
--url "https://github.com/letsencrypt/boulder" --prefix=/opt/boulder \
--version "$(VERSION)" --iteration "$(COMMIT_ID)" --epoch "$(EPOCH)" \
--package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).x86_64.rpm" \
--description "Boulder is an ACME-compatible X.509 Certificate Authority" \
--maintainer "$(MAINTAINER)" \
test/config/ sa/db data/ $(OBJECTS)
deb: build
fpm -f -s dir -t deb --name "boulder" \
--license "Mozilla Public License v2.0" --vendor "ISRG" \
@ -52,10 +64,10 @@ deb: build
--package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).x86_64.deb" \
--description "Boulder is an ACME-compatible X.509 Certificate Authority" \
--maintainer "$(MAINTAINER)" \
test/config/ sa/db data/ $(OBJECTS)
test/config/ sa/db data/ $(OBJECTS) bin/ct-test-srv
tar: build
fpm -f -s dir -t tar --name "boulder" --prefix=/opt/boulder \
--package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).amd64.tar" \
test/config/ sa/db data/ $(OBJECTS)
test/config/ sa/db data/ $(OBJECTS) bin/ct-test-srv
gzip -f "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).amd64.tar"

View File

@ -3,10 +3,10 @@
[![Build Status](https://github.com/letsencrypt/boulder/actions/workflows/boulder-ci.yml/badge.svg?branch=main)](https://github.com/letsencrypt/boulder/actions/workflows/boulder-ci.yml?query=branch%3Amain)
This is an implementation of an ACME-based CA. The [ACME
protocol](https://github.com/ietf-wg-acme/acme/) allows the CA to automatically
verify that an applicant for a certificate actually controls an identifier, and
allows subscribers to issue and revoke certificates for the identifiers they
control. Boulder is the software that runs [Let's
protocol](https://github.com/ietf-wg-acme/acme/) allows the CA to
automatically verify that an applicant for a certificate actually controls an
identifier, and allows domain holders to issue and revoke certificates for
their domains. Boulder is the software that runs [Let's
Encrypt](https://letsencrypt.org).
## Contents

View File

@ -3,7 +3,7 @@ package akamai
import (
"bytes"
"crypto/hmac"
"crypto/md5" //nolint: gosec // MD5 is required by the Akamai API.
"crypto/md5"
"crypto/sha256"
"crypto/x509"
"encoding/base64"

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc-gen-go v1.34.1
// protoc v3.20.1
// source: akamai.proto
@ -12,7 +12,6 @@ import (
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -23,17 +22,20 @@ const (
)
type PurgeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Urls []string `protobuf:"bytes,1,rep,name=urls,proto3" json:"urls,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Urls []string `protobuf:"bytes,1,rep,name=urls,proto3" json:"urls,omitempty"`
}
func (x *PurgeRequest) Reset() {
*x = PurgeRequest{}
mi := &file_akamai_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_akamai_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PurgeRequest) String() string {
@ -44,7 +46,7 @@ func (*PurgeRequest) ProtoMessage() {}
func (x *PurgeRequest) ProtoReflect() protoreflect.Message {
mi := &file_akamai_proto_msgTypes[0]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -68,7 +70,7 @@ func (x *PurgeRequest) GetUrls() []string {
var File_akamai_proto protoreflect.FileDescriptor
var file_akamai_proto_rawDesc = string([]byte{
var file_akamai_proto_rawDesc = []byte{
0x0a, 0x0c, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
@ -83,22 +85,22 @@ var file_akamai_proto_rawDesc = string([]byte{
0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64,
0x65, 0x72, 0x2f, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
}
var (
file_akamai_proto_rawDescOnce sync.Once
file_akamai_proto_rawDescData []byte
file_akamai_proto_rawDescData = file_akamai_proto_rawDesc
)
func file_akamai_proto_rawDescGZIP() []byte {
file_akamai_proto_rawDescOnce.Do(func() {
file_akamai_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_akamai_proto_rawDesc), len(file_akamai_proto_rawDesc)))
file_akamai_proto_rawDescData = protoimpl.X.CompressGZIP(file_akamai_proto_rawDescData)
})
return file_akamai_proto_rawDescData
}
var file_akamai_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_akamai_proto_goTypes = []any{
var file_akamai_proto_goTypes = []interface{}{
(*PurgeRequest)(nil), // 0: akamai.PurgeRequest
(*emptypb.Empty)(nil), // 1: google.protobuf.Empty
}
@ -117,11 +119,25 @@ func file_akamai_proto_init() {
if File_akamai_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_akamai_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PurgeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_akamai_proto_rawDesc), len(file_akamai_proto_rawDesc)),
RawDescriptor: file_akamai_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@ -132,6 +148,7 @@ func file_akamai_proto_init() {
MessageInfos: file_akamai_proto_msgTypes,
}.Build()
File_akamai_proto = out.File
file_akamai_proto_rawDesc = nil
file_akamai_proto_goTypes = nil
file_akamai_proto_depIdxs = nil
}

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: akamai.proto
@ -50,24 +50,20 @@ func (c *akamaiPurgerClient) Purge(ctx context.Context, in *PurgeRequest, opts .
// AkamaiPurgerServer is the server API for AkamaiPurger service.
// All implementations must embed UnimplementedAkamaiPurgerServer
// for forward compatibility.
// for forward compatibility
type AkamaiPurgerServer interface {
Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error)
mustEmbedUnimplementedAkamaiPurgerServer()
}
// UnimplementedAkamaiPurgerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedAkamaiPurgerServer struct{}
// UnimplementedAkamaiPurgerServer must be embedded to have forward compatible implementations.
type UnimplementedAkamaiPurgerServer struct {
}
func (UnimplementedAkamaiPurgerServer) Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Purge not implemented")
}
func (UnimplementedAkamaiPurgerServer) mustEmbedUnimplementedAkamaiPurgerServer() {}
func (UnimplementedAkamaiPurgerServer) testEmbeddedByValue() {}
// UnsafeAkamaiPurgerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to AkamaiPurgerServer will
@ -77,13 +73,6 @@ type UnsafeAkamaiPurgerServer interface {
}
func RegisterAkamaiPurgerServer(s grpc.ServiceRegistrar, srv AkamaiPurgerServer) {
// If the following call pancis, it indicates UnimplementedAkamaiPurgerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&AkamaiPurger_ServiceDesc, srv)
}

View File

@ -1,43 +0,0 @@
package allowlist
import (
"github.com/letsencrypt/boulder/strictyaml"
)
// List holds a unique collection of items of type T. Membership can be checked
// by calling the Contains method.
type List[T comparable] struct {
members map[T]struct{}
}
// NewList returns a *List[T] populated with the provided members of type T. All
// duplicate entries are ignored, ensuring uniqueness.
func NewList[T comparable](members []T) *List[T] {
l := &List[T]{members: make(map[T]struct{})}
for _, m := range members {
l.members[m] = struct{}{}
}
return l
}
// NewFromYAML reads a YAML sequence of values of type T and returns a *List[T]
// containing those values. If data is empty, an empty (deny all) list is
// returned. If data cannot be parsed, an error is returned.
func NewFromYAML[T comparable](data []byte) (*List[T], error) {
if len(data) == 0 {
return NewList([]T{}), nil
}
var entries []T
err := strictyaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
return NewList(entries), nil
}
// Contains reports whether the provided entry is a member of the list.
func (l *List[T]) Contains(entry T) bool {
_, ok := l.members[entry]
return ok
}

View File

@ -1,109 +0,0 @@
package allowlist
import (
"testing"
)
func TestNewFromYAML(t *testing.T) {
t.Parallel()
tests := []struct {
name string
yamlData string
check []string
expectAnswers []bool
expectErr bool
}{
{
name: "valid YAML",
yamlData: "- oak\n- maple\n- cherry",
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
expectErr: false,
},
{
name: "empty YAML",
yamlData: "",
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
expectErr: false,
},
{
name: "invalid YAML",
yamlData: "{ invalid_yaml",
check: []string{},
expectAnswers: []bool{},
expectErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
list, err := NewFromYAML[string]([]byte(tt.yamlData))
if (err != nil) != tt.expectErr {
t.Fatalf("NewFromYAML() error = %v, expectErr = %v", err, tt.expectErr)
}
if err == nil {
for i, item := range tt.check {
got := list.Contains(item)
if got != tt.expectAnswers[i] {
t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i])
}
}
}
})
}
}
func TestNewList(t *testing.T) {
t.Parallel()
tests := []struct {
name string
members []string
check []string
expectAnswers []bool
}{
{
name: "unique members",
members: []string{"oak", "maple", "cherry"},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
},
{
name: "duplicate members",
members: []string{"oak", "maple", "cherry", "oak"},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
},
{
name: "nil list",
members: nil,
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
},
{
name: "empty list",
members: []string{},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
list := NewList[string](tt.members)
for i, item := range tt.check {
got := list.Contains(item)
if got != tt.expectAnswers[i] {
t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i])
}
}
})
}
}

View File

@ -9,7 +9,6 @@ import (
"io"
"net"
"net/http"
"net/netip"
"net/url"
"slices"
"strconv"
@ -21,11 +20,137 @@ import (
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
)
func parseCidr(network string, comment string) net.IPNet {
_, net, err := net.ParseCIDR(network)
if err != nil {
panic(fmt.Sprintf("error parsing %s (%s): %s", network, comment, err))
}
return *net
}
var (
// Private CIDRs to ignore
privateNetworks = []net.IPNet{
// RFC1918
// 10.0.0.0/8
{
IP: []byte{10, 0, 0, 0},
Mask: []byte{255, 0, 0, 0},
},
// 172.16.0.0/12
{
IP: []byte{172, 16, 0, 0},
Mask: []byte{255, 240, 0, 0},
},
// 192.168.0.0/16
{
IP: []byte{192, 168, 0, 0},
Mask: []byte{255, 255, 0, 0},
},
// RFC5735
// 127.0.0.0/8
{
IP: []byte{127, 0, 0, 0},
Mask: []byte{255, 0, 0, 0},
},
// RFC1122 Section 3.2.1.3
// 0.0.0.0/8
{
IP: []byte{0, 0, 0, 0},
Mask: []byte{255, 0, 0, 0},
},
// RFC3927
// 169.254.0.0/16
{
IP: []byte{169, 254, 0, 0},
Mask: []byte{255, 255, 0, 0},
},
// RFC 5736
// 192.0.0.0/24
{
IP: []byte{192, 0, 0, 0},
Mask: []byte{255, 255, 255, 0},
},
// RFC 5737
// 192.0.2.0/24
{
IP: []byte{192, 0, 2, 0},
Mask: []byte{255, 255, 255, 0},
},
// 198.51.100.0/24
{
IP: []byte{198, 51, 100, 0},
Mask: []byte{255, 255, 255, 0},
},
// 203.0.113.0/24
{
IP: []byte{203, 0, 113, 0},
Mask: []byte{255, 255, 255, 0},
},
// RFC 3068
// 192.88.99.0/24
{
IP: []byte{192, 88, 99, 0},
Mask: []byte{255, 255, 255, 0},
},
// RFC 2544, Errata 423
// 198.18.0.0/15
{
IP: []byte{198, 18, 0, 0},
Mask: []byte{255, 254, 0, 0},
},
// RFC 3171
// 224.0.0.0/4
{
IP: []byte{224, 0, 0, 0},
Mask: []byte{240, 0, 0, 0},
},
// RFC 1112
// 240.0.0.0/4
{
IP: []byte{240, 0, 0, 0},
Mask: []byte{240, 0, 0, 0},
},
// RFC 919 Section 7
// 255.255.255.255/32
{
IP: []byte{255, 255, 255, 255},
Mask: []byte{255, 255, 255, 255},
},
// RFC 6598
// 100.64.0.0/10
{
IP: []byte{100, 64, 0, 0},
Mask: []byte{255, 192, 0, 0},
},
}
// Sourced from https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
// where Global, Source, or Destination is False
privateV6Networks = []net.IPNet{
parseCidr("::/128", "RFC 4291: Unspecified Address"),
parseCidr("::1/128", "RFC 4291: Loopback Address"),
parseCidr("::ffff:0:0/96", "RFC 4291: IPv4-mapped Address"),
parseCidr("100::/64", "RFC 6666: Discard Address Block"),
parseCidr("2001::/23", "RFC 2928: IETF Protocol Assignments"),
parseCidr("2001:2::/48", "RFC 5180: Benchmarking"),
parseCidr("2001:db8::/32", "RFC 3849: Documentation"),
parseCidr("2001::/32", "RFC 4380: TEREDO"),
parseCidr("fc00::/7", "RFC 4193: Unique-Local"),
parseCidr("fe80::/10", "RFC 4291: Section 2.5.6 Link-Scoped Unicast"),
parseCidr("ff00::/8", "RFC 4291: Section 2.7"),
// We disable validations to IPs under the 6to4 anycast prefix because
// there's too much risk of a malicious actor advertising the prefix and
// answering validations for a 6to4 host they do not control.
// https://community.letsencrypt.org/t/problems-validating-ipv6-against-host-running-6to4/18312/9
parseCidr("2002::/16", "RFC 7526: 6to4 anycast prefix deprecated"),
}
)
// ResolverAddrs contains DNS resolver(s) that were chosen to perform a
// validation request or CAA recheck. A ResolverAddr will be in the form of
// host:port, A:host:port, or AAAA:host:port depending on which type of lookup
@ -35,7 +160,7 @@ type ResolverAddrs []string
// Client queries for DNS records
type Client interface {
LookupTXT(context.Context, string) (txts []string, resolver ResolverAddrs, err error)
LookupHost(context.Context, string) ([]netip.Addr, ResolverAddrs, error)
LookupHost(context.Context, string) ([]net.IP, ResolverAddrs, error)
LookupCAA(context.Context, string) ([]*dns.CAA, string, ResolverAddrs, error)
}
@ -71,28 +196,33 @@ func New(
stats prometheus.Registerer,
clk clock.Clock,
maxTries int,
userAgent string,
log blog.Logger,
tlsConfig *tls.Config,
) Client {
var client exchanger
// Clone the default transport because it comes with various settings
// that we like, which are different from the zero value of an
// `http.Transport`.
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = tlsConfig
// The default transport already sets this field, but it isn't
// documented that it will always be set. Set it again to be sure,
// because Unbound will reject non-HTTP/2 DoH requests.
transport.ForceAttemptHTTP2 = true
client = &dohExchanger{
clk: clk,
hc: http.Client{
Timeout: readTimeout,
Transport: transport,
},
userAgent: userAgent,
if features.Get().DOH {
// Clone the default transport because it comes with various settings
// that we like, which are different from the zero value of an
// `http.Transport`.
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = tlsConfig
// The default transport already sets this field, but it isn't
// documented that it will always be set. Set it again to be sure,
// because Unbound will reject non-HTTP/2 DoH requests.
transport.ForceAttemptHTTP2 = true
client = &dohExchanger{
clk: clk,
hc: http.Client{
Timeout: readTimeout,
Transport: transport,
},
}
} else {
client = &dns.Client{
// Set timeout for underlying net.Conn
ReadTimeout: readTimeout,
Net: "udp",
}
}
queryTime := prometheus.NewHistogramVec(
@ -149,11 +279,10 @@ func NewTest(
stats prometheus.Registerer,
clk clock.Clock,
maxTries int,
userAgent string,
log blog.Logger,
tlsConfig *tls.Config,
) Client {
resolver := New(readTimeout, servers, stats, clk, maxTries, userAgent, log, tlsConfig)
resolver := New(readTimeout, servers, stats, clk, maxTries, log, tlsConfig)
resolver.(*impl).allowRestrictedAddresses = true
return resolver
}
@ -273,10 +402,17 @@ func (dnsClient *impl) exchangeOne(ctx context.Context, hostname string, qtype u
case r := <-ch:
if r.err != nil {
var isRetryable bool
// According to the http package documentation, retryable
// errors emitted by the http package are of type *url.Error.
var urlErr *url.Error
isRetryable = errors.As(r.err, &urlErr) && urlErr.Temporary()
if features.Get().DOH {
// According to the http package documentation, retryable
// errors emitted by the http package are of type *url.Error.
var urlErr *url.Error
isRetryable = errors.As(r.err, &urlErr) && urlErr.Temporary()
} else {
// According to the net package documentation, retryable
// errors emitted by the net package are of type *net.OpError.
var opErr *net.OpError
isRetryable = errors.As(r.err, &opErr) && opErr.Temporary()
}
hasRetriesLeft := tries < dnsClient.maxTries
if isRetryable && hasRetriesLeft {
tries++
@ -301,6 +437,7 @@ func (dnsClient *impl) exchangeOne(ctx context.Context, hostname string, qtype u
return
}
}
}
// isTLD returns a simplified view of whether something is a TLD: does it have
@ -342,6 +479,24 @@ func (dnsClient *impl) LookupTXT(ctx context.Context, hostname string) ([]string
return txt, ResolverAddrs{resolver}, err
}
func isPrivateV4(ip net.IP) bool {
for _, net := range privateNetworks {
if net.Contains(ip) {
return true
}
}
return false
}
func isPrivateV6(ip net.IP) bool {
for _, net := range privateV6Networks {
if net.Contains(ip) {
return true
}
}
return false
}
func (dnsClient *impl) lookupIP(ctx context.Context, hostname string, ipType uint16) ([]dns.RR, string, error) {
resp, resolver, err := dnsClient.exchangeOne(ctx, hostname, ipType)
switch ipType {
@ -366,7 +521,7 @@ func (dnsClient *impl) lookupIP(ctx context.Context, hostname string, ipType uin
// chase CNAME/DNAME aliases and return relevant records. It will retry
// requests in the case of temporary network errors. It returns an error if
// both the A and AAAA lookups fail or are empty, but succeeds otherwise.
func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]netip.Addr, ResolverAddrs, error) {
func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]net.IP, ResolverAddrs, error) {
var recordsA, recordsAAAA []dns.RR
var errA, errAAAA error
var resolverA, resolverAAAA string
@ -389,16 +544,13 @@ func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]netip
return a == ""
})
var addrsA []netip.Addr
var addrsA []net.IP
if errA == nil {
for _, answer := range recordsA {
if answer.Header().Rrtype == dns.TypeA {
a, ok := answer.(*dns.A)
if ok && a.A.To4() != nil {
netIP, ok := netip.AddrFromSlice(a.A)
if ok && (iana.IsReservedAddr(netIP) == nil || dnsClient.allowRestrictedAddresses) {
addrsA = append(addrsA, netIP)
}
if ok && a.A.To4() != nil && (!isPrivateV4(a.A) || dnsClient.allowRestrictedAddresses) {
addrsA = append(addrsA, a.A)
}
}
}
@ -407,16 +559,13 @@ func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]netip
}
}
var addrsAAAA []netip.Addr
var addrsAAAA []net.IP
if errAAAA == nil {
for _, answer := range recordsAAAA {
if answer.Header().Rrtype == dns.TypeAAAA {
aaaa, ok := answer.(*dns.AAAA)
if ok && aaaa.AAAA.To16() != nil {
netIP, ok := netip.AddrFromSlice(aaaa.AAAA)
if ok && (iana.IsReservedAddr(netIP) == nil || dnsClient.allowRestrictedAddresses) {
addrsAAAA = append(addrsAAAA, netIP)
}
if ok && aaaa.AAAA.To16() != nil && (!isPrivateV6(aaaa.AAAA) || dnsClient.allowRestrictedAddresses) {
addrsAAAA = append(addrsAAAA, aaaa.AAAA)
}
}
}
@ -536,9 +685,8 @@ func logDNSError(
}
type dohExchanger struct {
clk clock.Clock
hc http.Client
userAgent string
clk clock.Clock
hc http.Client
}
// Exchange sends a DoH query to the provided DoH server and returns the response.
@ -556,9 +704,6 @@ func (d *dohExchanger) Exchange(query *dns.Msg, server string) (*dns.Msg, time.D
}
req.Header.Set("Content-Type", "application/dns-message")
req.Header.Set("Accept", "application/dns-message")
if len(d.userAgent) > 0 {
req.Header.Set("User-Agent", d.userAgent)
}
start := d.clk.Now()
resp, err := d.hc.Do(req)

View File

@ -2,15 +2,10 @@ package bdns
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"regexp"
@ -24,6 +19,7 @@ import (
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
@ -31,30 +27,7 @@ import (
const dnsLoopbackAddr = "127.0.0.1:4053"
func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) {
if httpReq.Header.Get("Content-Type") != "application/dns-message" {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "client didn't send Content-Type: application/dns-message")
}
if httpReq.Header.Get("Accept") != "application/dns-message" {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "client didn't accept Content-Type: application/dns-message")
}
requestBody, err := io.ReadAll(httpReq.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "reading body: %s", err)
}
httpReq.Body.Close()
r := new(dns.Msg)
err = r.Unpack(requestBody)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "unpacking request: %s", err)
}
func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
m.Compress = false
@ -84,19 +57,19 @@ func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) {
if q.Name == "v6.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "v6.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
record.AAAA = net.ParseIP("::1")
appendAnswer(record)
}
if q.Name == "dualstack.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
record.AAAA = net.ParseIP("::1")
appendAnswer(record)
}
if q.Name == "v4error.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "v4error.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
record.AAAA = net.ParseIP("::1")
appendAnswer(record)
}
if q.Name == "v6error.letsencrypt.org." {
@ -112,19 +85,19 @@ func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) {
if q.Name == "cps.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "cps.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
record.A = net.ParseIP("127.0.0.1")
appendAnswer(record)
}
if q.Name == "dualstack.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
record.A = net.ParseIP("127.0.0.1")
appendAnswer(record)
}
if q.Name == "v6error.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
record.A = net.ParseIP("127.0.0.1")
appendAnswer(record)
}
if q.Name == "v4error.letsencrypt.org." {
@ -200,37 +173,45 @@ func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) {
}
}
body, err := m.Pack()
if err != nil {
fmt.Fprintf(os.Stderr, "packing reply: %s\n", err)
}
w.Header().Set("Content-Type", "application/dns-message")
_, err = w.Write(body)
err := w.WriteMsg(m)
if err != nil {
panic(err) // running tests, so panic is OK
}
}
func serveLoopResolver(stopChan chan bool) {
m := http.NewServeMux()
m.HandleFunc("/dns-query", mockDNSQuery)
httpServer := &http.Server{
dns.HandleFunc(".", mockDNSQuery)
tcpServer := &dns.Server{
Addr: dnsLoopbackAddr,
Handler: m,
Net: "tcp",
ReadTimeout: time.Second,
WriteTimeout: time.Second,
}
udpServer := &dns.Server{
Addr: dnsLoopbackAddr,
Net: "udp",
ReadTimeout: time.Second,
WriteTimeout: time.Second,
}
go func() {
cert := "../test/certs/ipki/localhost/cert.pem"
key := "../test/certs/ipki/localhost/key.pem"
err := httpServer.ListenAndServeTLS(cert, key)
err := tcpServer.ListenAndServe()
if err != nil {
fmt.Println(err)
}
}()
go func() {
err := udpServer.ListenAndServe()
if err != nil {
fmt.Println(err)
}
}()
go func() {
<-stopChan
err := httpServer.Shutdown(context.Background())
err := tcpServer.Shutdown()
if err != nil {
log.Fatal(err)
}
err = udpServer.Shutdown()
if err != nil {
log.Fatal(err)
}
@ -258,21 +239,7 @@ func pollServer() {
}
}
// tlsConfig is used for the TLS config of client instances that talk to the
// DoH server set up in TestMain.
var tlsConfig *tls.Config
func TestMain(m *testing.M) {
root, err := os.ReadFile("../test/certs/ipki/minica.pem")
if err != nil {
log.Fatal(err)
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(root)
tlsConfig = &tls.Config{
RootCAs: pool,
}
stop := make(chan bool, 1)
serveLoopResolver(stop)
pollServer()
@ -285,7 +252,7 @@ func TestDNSNoServers(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Hour, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Hour, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
_, resolvers, err := obj.LookupHost(context.Background(), "letsencrypt.org")
test.AssertEquals(t, len(resolvers), 0)
@ -302,7 +269,7 @@ func TestDNSOneServer(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
_, resolvers, err := obj.LookupHost(context.Background(), "cps.letsencrypt.org")
test.AssertEquals(t, len(resolvers), 2)
@ -315,7 +282,7 @@ func TestDNSDuplicateServers(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr, dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
_, resolvers, err := obj.LookupHost(context.Background(), "cps.letsencrypt.org")
test.AssertEquals(t, len(resolvers), 2)
@ -328,7 +295,7 @@ func TestDNSServFail(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
bad := "servfail.com"
_, _, err = obj.LookupTXT(context.Background(), bad)
@ -346,7 +313,7 @@ func TestDNSLookupTXT(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
a, _, err := obj.LookupTXT(context.Background(), "letsencrypt.org")
t.Logf("A: %v", a)
@ -359,12 +326,11 @@ func TestDNSLookupTXT(t *testing.T) {
test.AssertEquals(t, a[0], "abc")
}
// TODO(#8213): Convert this to a table test.
func TestDNSLookupHost(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
ip, resolvers, err := obj.LookupHost(context.Background(), "servfail.com")
t.Logf("servfail.com - IP: %s, Err: %s", ip, err)
@ -407,10 +373,10 @@ func TestDNSLookupHost(t *testing.T) {
t.Logf("dualstack.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 2, "Should have 2 IPs")
expected := netip.MustParseAddr("64.112.117.1")
test.Assert(t, ip[0] == expected, "wrong ipv4 address")
expected = netip.MustParseAddr("2602:80a:6000:abad:cafe::1")
test.Assert(t, ip[1] == expected, "wrong ipv6 address")
expected := net.ParseIP("127.0.0.1")
test.Assert(t, ip[0].To4().Equal(expected), "wrong ipv4 address")
expected = net.ParseIP("::1")
test.Assert(t, ip[1].To16().Equal(expected), "wrong ipv6 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
@ -419,8 +385,8 @@ func TestDNSLookupHost(t *testing.T) {
t.Logf("v6error.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have 1 IP")
expected = netip.MustParseAddr("64.112.117.1")
test.Assert(t, ip[0] == expected, "wrong ipv4 address")
expected = net.ParseIP("127.0.0.1")
test.Assert(t, ip[0].To4().Equal(expected), "wrong ipv4 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
@ -429,8 +395,8 @@ func TestDNSLookupHost(t *testing.T) {
t.Logf("v4error.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have 1 IP")
expected = netip.MustParseAddr("2602:80a:6000:abad:cafe::1")
test.Assert(t, ip[0] == expected, "wrong ipv6 address")
expected = net.ParseIP("::1")
test.Assert(t, ip[0].To16().Equal(expected), "wrong ipv6 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
@ -450,7 +416,7 @@ func TestDNSNXDOMAIN(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
hostname := "nxdomain.letsencrypt.org"
_, _, err = obj.LookupHost(context.Background(), hostname)
@ -466,7 +432,7 @@ func TestDNSLookupCAA(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock(), nil)
removeIDExp := regexp.MustCompile(" id: [[:digit:]]+")
caas, resp, resolvers, err := obj.LookupCAA(context.Background(), "bracewel.net")
@ -521,6 +487,37 @@ caa.example.com. 0 IN CAA 1 issue "letsencrypt.org"
test.AssertEquals(t, resolvers[0], "127.0.0.1:4053")
}
func TestIsPrivateIP(t *testing.T) {
test.Assert(t, isPrivateV4(net.ParseIP("127.0.0.1")), "should be private")
test.Assert(t, isPrivateV4(net.ParseIP("192.168.254.254")), "should be private")
test.Assert(t, isPrivateV4(net.ParseIP("10.255.0.3")), "should be private")
test.Assert(t, isPrivateV4(net.ParseIP("172.16.255.255")), "should be private")
test.Assert(t, isPrivateV4(net.ParseIP("172.31.255.255")), "should be private")
test.Assert(t, !isPrivateV4(net.ParseIP("128.0.0.1")), "should be private")
test.Assert(t, !isPrivateV4(net.ParseIP("192.169.255.255")), "should not be private")
test.Assert(t, !isPrivateV4(net.ParseIP("9.255.0.255")), "should not be private")
test.Assert(t, !isPrivateV4(net.ParseIP("172.32.255.255")), "should not be private")
test.Assert(t, isPrivateV6(net.ParseIP("::0")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("::1")), "should be private")
test.Assert(t, !isPrivateV6(net.ParseIP("::2")), "should not be private")
test.Assert(t, isPrivateV6(net.ParseIP("fe80::1")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("febf::1")), "should be private")
test.Assert(t, !isPrivateV6(net.ParseIP("fec0::1")), "should not be private")
test.Assert(t, !isPrivateV6(net.ParseIP("feff::1")), "should not be private")
test.Assert(t, isPrivateV6(net.ParseIP("ff00::1")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("ff10::1")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("2002::")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("0100::")), "should be private")
test.Assert(t, isPrivateV6(net.ParseIP("0100::0000:ffff:ffff:ffff:ffff")), "should be private")
test.Assert(t, !isPrivateV6(net.ParseIP("0100::0001:0000:0000:0000:0000")), "should be private")
}
type testExchanger struct {
sync.Mutex
count int
@ -545,9 +542,10 @@ func (te *testExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration
}
func TestRetry(t *testing.T) {
isTempErr := &url.Error{Op: "read", Err: tempError(true)}
nonTempErr := &url.Error{Op: "read", Err: tempError(false)}
isTempErr := &net.OpError{Op: "read", Err: tempError(true)}
nonTempErr := &net.OpError{Op: "read", Err: tempError(false)}
servFailError := errors.New("DNS problem: server failure at resolver looking up TXT for example.com")
netError := errors.New("DNS problem: networking error looking up TXT for example.com")
type testCase struct {
name string
maxTries int
@ -598,7 +596,7 @@ func TestRetry(t *testing.T) {
isTempErr,
},
},
expected: servFailError,
expected: netError,
expectedCount: 3,
metricsAllRetries: 1,
},
@ -651,7 +649,7 @@ func TestRetry(t *testing.T) {
isTempErr,
},
},
expected: servFailError,
expected: netError,
expectedCount: 3,
metricsAllRetries: 1,
},
@ -665,7 +663,7 @@ func TestRetry(t *testing.T) {
nonTempErr,
},
},
expected: servFailError,
expected: netError,
expectedCount: 2,
},
}
@ -675,7 +673,7 @@ func TestRetry(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), tc.maxTries, "", blog.UseMock(), tlsConfig)
testClient := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), tc.maxTries, blog.UseMock(), nil)
dr := testClient.(*impl)
dr.dnsClient = tc.te
_, _, err = dr.LookupTXT(context.Background(), "example.com")
@ -706,7 +704,7 @@ func TestRetry(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, "", blog.UseMock(), tlsConfig)
testClient := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, blog.UseMock(), nil)
dr := testClient.(*impl)
dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}}
ctx, cancel := context.WithCancel(context.Background())
@ -785,7 +783,7 @@ func (e *rotateFailureExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.
// If its a broken server, return a retryable error
if e.brokenAddresses[a] {
isTempErr := &url.Error{Op: "read", Err: tempError(true)}
isTempErr := &net.OpError{Op: "read", Err: tempError(true)}
return nil, 2 * time.Millisecond, isTempErr
}
@ -807,9 +805,10 @@ func TestRotateServerOnErr(t *testing.T) {
// working server
staticProvider, err := NewStaticProvider(dnsServers)
test.AssertNotError(t, err, "Got error creating StaticProvider")
fmt.Println(staticProvider.servers)
maxTries := 5
client := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), maxTries, "", blog.UseMock(), tlsConfig)
client := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), maxTries, blog.UseMock(), nil)
// Configure a mock exchanger that will always return a retryable error for
// servers A and B. This will force server "[2606:4700:4700::1111]:53" to do
@ -873,10 +872,13 @@ func (dohE *dohAlwaysRetryExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, t
}
func TestDOHMetric(t *testing.T) {
features.Set(features.Config{DOH: true})
defer features.Reset()
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*11, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 0, "", blog.UseMock(), tlsConfig)
testClient := NewTest(time.Second*11, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 0, blog.UseMock(), nil)
resolver := testClient.(*impl)
resolver.dnsClient = &dohAlwaysRetryExchanger{err: &url.Error{Op: "read", Err: tempError(true)}}

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"net"
"net/netip"
"os"
"github.com/miekg/dns"
@ -68,13 +67,13 @@ func (t timeoutError) Timeout() bool {
}
// LookupHost is a mock
func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]netip.Addr, ResolverAddrs, error) {
func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]net.IP, ResolverAddrs, error) {
if hostname == "always.invalid" ||
hostname == "invalid.invalid" {
return []netip.Addr{}, ResolverAddrs{"MockClient"}, nil
return []net.IP{}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "always.timeout" {
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, "always.timeout", makeTimeoutError(), -1, nil}
return []net.IP{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, "always.timeout", makeTimeoutError(), -1, nil}
}
if hostname == "always.error" {
err := &net.OpError{
@ -87,7 +86,7 @@ func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]netip.
m.AuthenticatedData = true
m.SetEdns0(4096, false)
logDNSError(mock.Log, "mock.server", hostname, m, nil, err)
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
return []net.IP{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
}
if hostname == "id.mismatch" {
err := dns.ErrId
@ -101,21 +100,22 @@ func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]netip.
record.A = net.ParseIP("127.0.0.1")
r.Answer = append(r.Answer, record)
logDNSError(mock.Log, "mock.server", hostname, m, r, err)
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
return []net.IP{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
}
// dual-homed host with an IPv6 and an IPv4 address
if hostname == "ipv4.and.ipv6.localhost" {
return []netip.Addr{
netip.MustParseAddr("::1"),
netip.MustParseAddr("127.0.0.1"),
return []net.IP{
net.ParseIP("::1"),
net.ParseIP("127.0.0.1"),
}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "ipv6.localhost" {
return []netip.Addr{
netip.MustParseAddr("::1"),
return []net.IP{
net.ParseIP("::1"),
}, ResolverAddrs{"MockClient"}, nil
}
return []netip.Addr{netip.MustParseAddr("127.0.0.1")}, ResolverAddrs{"MockClient"}, nil
ip := net.ParseIP("127.0.0.1")
return []net.IP{ip}, ResolverAddrs{"MockClient"}, nil
}
// LookupCAA returns mock records for use in tests.

View File

@ -6,7 +6,6 @@ import (
"fmt"
"math/rand/v2"
"net"
"net/netip"
"strconv"
"sync"
"time"
@ -62,9 +61,10 @@ func validateServerAddress(address string) error {
}
// Ensure the `host` portion of `address` is a valid FQDN or IP address.
_, err = netip.ParseAddr(host)
IPv6 := net.ParseIP(host).To16()
IPv4 := net.ParseIP(host).To4()
FQDN := dns.IsFqdn(dns.Fqdn(host))
if err != nil && !FQDN {
if IPv6 == nil && IPv4 == nil && !FQDN {
return errors.New("host is not an FQDN or IP address")
}
return nil

317
ca/ca.go
View File

@ -9,11 +9,13 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"math/big"
mrand "math/rand/v2"
"strings"
"time"
ct "github.com/google/certificate-transparency-go"
@ -32,14 +34,13 @@ import (
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
csrlib "github.com/letsencrypt/boulder/csr"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
@ -50,24 +51,6 @@ const (
certType = certificateType("certificate")
)
// issuanceEvent is logged before and after issuance of precertificates and certificates.
// The `omitempty` fields are not always present.
// CSR, Precertificate, and Certificate are hex-encoded DER bytes to make it easier to
// ad-hoc search for sequences or OIDs in logs. Other data, like public key within CSR,
// is logged as base64 because it doesn't have interesting DER structure.
type issuanceEvent struct {
CSR string `json:",omitempty"`
IssuanceRequest *issuance.IssuanceRequest
Issuer string
OrderID int64
Profile string
Requester int64
Result struct {
Precertificate string `json:",omitempty"`
Certificate string `json:",omitempty"`
}
}
// Two maps of keys to Issuers. Lookup by PublicKeyAlgorithm is useful for
// determining the set of issuers which can sign a given (pre)cert, based on its
// PublicKeyAlgorithm. Lookup by NameID is useful for looking up a specific
@ -79,17 +62,31 @@ type issuerMaps struct {
type certProfileWithID struct {
// name is a human readable name used to refer to the certificate profile.
name string
name string
// hash is SHA256 sum over every exported field of an issuance.ProfileConfig
// used to generate the embedded *issuance.Profile.
hash [32]byte
profile *issuance.Profile
}
// certProfilesMaps allows looking up the human-readable name of a certificate
// profile to retrieve the actual profile. The default profile to be used is
// stored alongside the maps.
type certProfilesMaps struct {
// The name of the profile that will be selected if no explicit profile name
// is provided via gRPC.
defaultName string
profileByHash map[[32]byte]*certProfileWithID
profileByName map[string]*certProfileWithID
}
// caMetrics holds various metrics which are shared between caImpl, ocspImpl,
// and crlImpl.
type caMetrics struct {
signatureCount *prometheus.CounterVec
signErrorCount *prometheus.CounterVec
lintErrorCount prometheus.Counter
certificates *prometheus.CounterVec
}
func NewCAMetrics(stats prometheus.Registerer) *caMetrics {
@ -114,15 +111,7 @@ func NewCAMetrics(stats prometheus.Registerer) *caMetrics {
})
stats.MustRegister(lintErrorCount)
certificates := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "certificates",
Help: "Number of certificates issued",
},
[]string{"profile"})
stats.MustRegister(certificates)
return &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificates}
return &caMetrics{signatureCount, signErrorCount, lintErrorCount}
}
func (m *caMetrics) noteSignError(err error) {
@ -137,10 +126,9 @@ func (m *caMetrics) noteSignError(err error) {
type certificateAuthorityImpl struct {
capb.UnsafeCertificateAuthorityServer
sa sapb.StorageAuthorityCertificateClient
sctClient rapb.SCTProviderClient
pa core.PolicyAuthority
issuers issuerMaps
certProfiles map[string]*certProfileWithID
certProfiles certProfilesMaps
// The prefix is prepended to the serial number.
prefix byte
@ -180,27 +168,66 @@ func makeIssuerMaps(issuers []*issuance.Issuer) (issuerMaps, error) {
}
// makeCertificateProfilesMap processes a set of named certificate issuance
// profile configs into a map from name to profile.
func makeCertificateProfilesMap(profiles map[string]*issuance.ProfileConfig) (map[string]*certProfileWithID, error) {
// profile configs into a two pre-computed maps: 1) a human-readable name to the
// profile and 2) a unique hash over contents of the profile to the profile
// itself. It returns the maps or an error if a duplicate name or hash is found.
//
// The unique hash is used in the case of
// - RA instructs CA1 to issue a precertificate
// - CA1 returns the precertificate DER bytes and profile hash to the RA
// - RA instructs CA2 to issue a final certificate, but CA2 does not contain a
// profile corresponding to that hash and an issuance is prevented.
func makeCertificateProfilesMap(defaultName string, profiles map[string]*issuance.ProfileConfig) (certProfilesMaps, error) {
if len(profiles) <= 0 {
return nil, fmt.Errorf("must pass at least one certificate profile")
return certProfilesMaps{}, fmt.Errorf("must pass at least one certificate profile")
}
// Check that a profile exists with the configured default profile name.
_, ok := profiles[defaultName]
if !ok {
return certProfilesMaps{}, fmt.Errorf("defaultCertificateProfileName:\"%s\" was configured, but a profile object was not found for that name", defaultName)
}
profilesByName := make(map[string]*certProfileWithID, len(profiles))
profilesByHash := make(map[[32]byte]*certProfileWithID, len(profiles))
for name, profileConfig := range profiles {
profile, err := issuance.NewProfile(profileConfig)
if err != nil {
return nil, err
return certProfilesMaps{}, err
}
profilesByName[name] = &certProfileWithID{
// gob can only encode exported fields, of which an issuance.Profile has
// none. However, since we're already in a loop iteration having access
// to the issuance.ProfileConfig used to generate the issuance.Profile,
// we'll generate the hash from that.
var encodedProfile bytes.Buffer
enc := gob.NewEncoder(&encodedProfile)
err = enc.Encode(profileConfig)
if err != nil {
return certProfilesMaps{}, err
}
if len(encodedProfile.Bytes()) <= 0 {
return certProfilesMaps{}, fmt.Errorf("certificate profile encoding returned 0 bytes")
}
hash := sha256.Sum256(encodedProfile.Bytes())
withID := certProfileWithID{
name: name,
hash: hash,
profile: profile,
}
profilesByName[name] = &withID
_, found := profilesByHash[hash]
if found {
return certProfilesMaps{}, fmt.Errorf("duplicate certificate profile hash %d", hash)
}
profilesByHash[hash] = &withID
}
return profilesByName, nil
return certProfilesMaps{defaultName, profilesByHash, profilesByName}, nil
}
// NewCertificateAuthorityImpl creates a CA instance that can sign certificates
@ -208,9 +235,9 @@ func makeCertificateProfilesMap(profiles map[string]*issuance.ProfileConfig) (ma
// OCSP (via delegation to an ocspImpl and its issuers).
func NewCertificateAuthorityImpl(
sa sapb.StorageAuthorityCertificateClient,
sctService rapb.SCTProviderClient,
pa core.PolicyAuthority,
boulderIssuers []*issuance.Issuer,
defaultCertProfileName string,
certificateProfiles map[string]*issuance.ProfileConfig,
serialPrefix byte,
maxNames int,
@ -231,7 +258,7 @@ func NewCertificateAuthorityImpl(
return nil, errors.New("must have at least one issuer")
}
certProfiles, err := makeCertificateProfilesMap(certificateProfiles)
certProfiles, err := makeCertificateProfilesMap(defaultCertProfileName, certificateProfiles)
if err != nil {
return nil, err
}
@ -243,7 +270,6 @@ func NewCertificateAuthorityImpl(
ca = &certificateAuthorityImpl{
sa: sa,
sctClient: sctService,
pa: pa,
issuers: issuers,
certProfiles: certProfiles,
@ -265,18 +291,35 @@ var ocspStatusToCode = map[string]int{
"unknown": ocsp.Unknown,
}
// issuePrecertificate is the first step in the [issuance cycle]. It allocates and stores a serial number,
// IssuePrecertificate is the first step in the [issuance cycle]. It allocates and stores a serial number,
// selects a certificate profile, generates and stores a linting certificate, sets the serial's status to
// "wait", signs and stores a precertificate, updates the serial's status to "good", then returns the
// precertificate.
//
// Subsequent final issuance based on this precertificate must happen at most once, and must use the same
// certificate profile.
//
// Returns precertificate DER.
// certificate profile. The certificate profile is identified by a hash to ensure an exact match even if
// the configuration for a specific profile _name_ changes.
//
// [issuance cycle]: https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md
func (ca *certificateAuthorityImpl) issuePrecertificate(ctx context.Context, certProfile *certProfileWithID, issueReq *capb.IssueCertificateRequest) ([]byte, error) {
func (ca *certificateAuthorityImpl) IssuePrecertificate(ctx context.Context, issueReq *capb.IssueCertificateRequest) (*capb.IssuePrecertificateResponse, error) {
// issueReq.orderID may be zero, for ACMEv1 requests.
if core.IsAnyNilOrZero(issueReq, issueReq.Csr, issueReq.RegistrationID) {
return nil, berrors.InternalServerError("Incomplete issue certificate request")
}
// The CA must check if it is capable of issuing for the given certificate
// profile name. The name is checked here instead of the hash because the RA
// is unaware of what certificate profiles exist. Pre-existing orders stored
// in the database may not have an associated certificate profile name and
// will take the default name stored alongside the map.
if issueReq.CertProfileName == "" {
issueReq.CertProfileName = ca.certProfiles.defaultName
}
certProfile, ok := ca.certProfiles.profileByName[issueReq.CertProfileName]
if !ok {
return nil, fmt.Errorf("the CA is incapable of using a profile named %s", issueReq.CertProfileName)
}
serialBigInt, err := ca.generateSerialNumber()
if err != nil {
return nil, err
@ -296,7 +339,7 @@ func (ca *certificateAuthorityImpl) issuePrecertificate(ctx context.Context, cer
return nil, err
}
precertDER, _, err := ca.issuePrecertificateInner(ctx, issueReq, certProfile, serialBigInt, notBefore, notAfter)
precertDER, cpwid, err := ca.issuePrecertificateInner(ctx, issueReq, certProfile, serialBigInt, notBefore, notAfter)
if err != nil {
return nil, err
}
@ -306,39 +349,14 @@ func (ca *certificateAuthorityImpl) issuePrecertificate(ctx context.Context, cer
return nil, err
}
return precertDER, nil
return &capb.IssuePrecertificateResponse{
DER: precertDER,
CertProfileName: cpwid.name,
CertProfileHash: cpwid.hash[:],
}, nil
}
func (ca *certificateAuthorityImpl) IssueCertificate(ctx context.Context, issueReq *capb.IssueCertificateRequest) (*capb.IssueCertificateResponse, error) {
if core.IsAnyNilOrZero(issueReq, issueReq.Csr, issueReq.RegistrationID, issueReq.OrderID) {
return nil, berrors.InternalServerError("Incomplete issue certificate request")
}
if ca.sctClient == nil {
return nil, errors.New("IssueCertificate called with a nil SCT service")
}
// All issuance requests must come with a profile name, and the RA handles selecting the default.
certProfile, ok := ca.certProfiles[issueReq.CertProfileName]
if !ok {
return nil, fmt.Errorf("the CA is incapable of using a profile named %s", issueReq.CertProfileName)
}
precertDER, err := ca.issuePrecertificate(ctx, certProfile, issueReq)
if err != nil {
return nil, err
}
scts, err := ca.sctClient.GetSCTs(ctx, &rapb.SCTRequest{PrecertDER: precertDER})
if err != nil {
return nil, err
}
certDER, err := ca.issueCertificateForPrecertificate(ctx, certProfile, precertDER, scts.SctDER, issueReq.RegistrationID, issueReq.OrderID)
if err != nil {
return nil, err
}
return &capb.IssueCertificateResponse{DER: certDER}, nil
}
// issueCertificateForPrecertificate is final step in the [issuance cycle].
// IssueCertificateForPrecertificate final step in the [issuance cycle].
//
// Given a precertificate and a set of SCTs for that precertificate, it generates
// a linting final certificate, then signs a final certificate using a real issuer.
@ -348,11 +366,12 @@ func (ca *certificateAuthorityImpl) IssueCertificate(ctx context.Context, issueR
//
// It's critical not to sign two different final certificates for the same
// precertificate. This can happen, for instance, if the caller provides a
// different set of SCTs on subsequent calls to issueCertificateForPrecertificate.
// We rely on the RA not to call issueCertificateForPrecertificate twice for the
// different set of SCTs on subsequent calls to IssueCertificateForPrecertificate.
// We rely on the RA not to call IssueCertificateForPrecertificate twice for the
// same serial. This is accomplished by the fact that
// issueCertificateForPrecertificate is only ever called once per call to `IssueCertificate`.
// If there is any error, the whole certificate issuance attempt fails and any subsequent
// IssueCertificateForPrecertificate is only ever called in a straight-through
// RPC path without retries. If there is any error, including a networking
// error, the whole certificate issuance attempt fails and any subsequent
// issuance will use a different serial number.
//
// We also check that the provided serial number does not already exist as a
@ -360,17 +379,23 @@ func (ca *certificateAuthorityImpl) IssueCertificate(ctx context.Context, issueR
// there could be race conditions where two goroutines are issuing for the same
// serial number at the same time.
//
// Returns the final certificate's bytes as DER.
//
// [issuance cycle]: https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md
func (ca *certificateAuthorityImpl) issueCertificateForPrecertificate(ctx context.Context,
certProfile *certProfileWithID,
precertDER []byte,
sctBytes [][]byte,
regID int64,
orderID int64,
) ([]byte, error) {
precert, err := x509.ParseCertificate(precertDER)
func (ca *certificateAuthorityImpl) IssueCertificateForPrecertificate(ctx context.Context, req *capb.IssueCertificateForPrecertificateRequest) (*corepb.Certificate, error) {
// issueReq.orderID may be zero, for ACMEv1 requests.
if core.IsAnyNilOrZero(req, req.DER, req.SCTs, req.RegistrationID, req.CertProfileHash) {
return nil, berrors.InternalServerError("Incomplete cert for precertificate request")
}
// The certificate profile hash is checked here instead of the name because
// the hash is over the entire contents of a *ProfileConfig giving assurance
// that the certificate profile has remained unchanged during the roundtrip
// from a CA, to the RA, then back to a (potentially different) CA node.
certProfile, ok := ca.certProfiles.profileByHash[[32]byte(req.CertProfileHash)]
if !ok {
return nil, fmt.Errorf("the CA is incapable of using a profile with hash %d", req.CertProfileHash)
}
precert, err := x509.ParseCertificate(req.DER)
if err != nil {
return nil, err
}
@ -384,9 +409,9 @@ func (ca *certificateAuthorityImpl) issueCertificateForPrecertificate(ctx contex
return nil, fmt.Errorf("error checking for duplicate issuance of %s: %s", serialHex, err)
}
var scts []ct.SignedCertificateTimestamp
for _, singleSCTBytes := range sctBytes {
for _, sctBytes := range req.SCTs {
var sct ct.SignedCertificateTimestamp
_, err = cttls.Unmarshal(singleSCTBytes, &sct)
_, err = cttls.Unmarshal(sctBytes, &sct)
if err != nil {
return nil, err
}
@ -403,37 +428,28 @@ func (ca *certificateAuthorityImpl) issueCertificateForPrecertificate(ctx contex
return nil, err
}
names := strings.Join(issuanceReq.DNSNames, ", ")
ca.log.AuditInfof("Signing cert: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certProfileName=[%s] certProfileHash=[%x] precert=[%s]",
issuer.Name(), serialHex, req.RegistrationID, names, certProfile.name, certProfile.hash, hex.EncodeToString(precert.Raw))
lintCertBytes, issuanceToken, err := issuer.Prepare(certProfile.profile, issuanceReq)
if err != nil {
ca.log.AuditErrf("Preparing cert failed: serial=[%s] err=[%v]", serialHex, err)
ca.log.AuditErrf("Preparing cert failed: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certProfileName=[%s] certProfileHash=[%x] err=[%v]",
issuer.Name(), serialHex, req.RegistrationID, names, certProfile.name, certProfile.hash, err)
return nil, berrors.InternalServerError("failed to prepare certificate signing: %s", err)
}
logEvent := issuanceEvent{
IssuanceRequest: issuanceReq,
Issuer: issuer.Name(),
OrderID: orderID,
Profile: certProfile.name,
Requester: regID,
}
ca.log.AuditObject("Signing cert", logEvent)
var ipStrings []string
for _, ip := range issuanceReq.IPAddresses {
ipStrings = append(ipStrings, ip.String())
}
_, span := ca.tracer.Start(ctx, "signing cert", trace.WithAttributes(
attribute.String("serial", serialHex),
attribute.String("issuer", issuer.Name()),
attribute.String("certProfileName", certProfile.name),
attribute.StringSlice("names", issuanceReq.DNSNames),
attribute.StringSlice("ipAddresses", ipStrings),
))
certDER, err := issuer.Issue(issuanceToken)
if err != nil {
ca.metrics.noteSignError(err)
ca.log.AuditErrf("Signing cert failed: serial=[%s] err=[%v]", serialHex, err)
ca.log.AuditErrf("Signing cert failed: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certProfileName=[%s] certProfileHash=[%x] err=[%v]",
issuer.Name(), serialHex, req.RegistrationID, names, certProfile.name, certProfile.hash, err)
span.SetStatus(codes.Error, err.Error())
span.End()
return nil, berrors.InternalServerError("failed to sign certificate: %s", err)
@ -446,21 +462,28 @@ func (ca *certificateAuthorityImpl) issueCertificateForPrecertificate(ctx contex
}
ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(certType), "issuer": issuer.Name()}).Inc()
ca.metrics.certificates.With(prometheus.Labels{"profile": certProfile.name}).Inc()
logEvent.Result.Certificate = hex.EncodeToString(certDER)
ca.log.AuditObject("Signing cert success", logEvent)
ca.log.AuditInfof("Signing cert success: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certificate=[%s] certProfileName=[%s] certProfileHash=[%x]",
issuer.Name(), serialHex, req.RegistrationID, names, hex.EncodeToString(certDER), certProfile.name, certProfile.hash)
_, err = ca.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{
Der: certDER,
RegID: regID,
RegID: req.RegistrationID,
Issued: timestamppb.New(ca.clk.Now()),
})
if err != nil {
ca.log.AuditErrf("Failed RPC to store at SA: serial=[%s] err=[%v]", serialHex, hex.EncodeToString(certDER))
ca.log.AuditErrf("Failed RPC to store at SA: issuer=[%s] serial=[%s] cert=[%s] regID=[%d] orderID=[%d] certProfileName=[%s] certProfileHash=[%x] err=[%v]",
issuer.Name(), serialHex, hex.EncodeToString(certDER), req.RegistrationID, req.OrderID, certProfile.name, certProfile.hash, err)
return nil, err
}
return certDER, nil
return &corepb.Certificate{
RegistrationID: req.RegistrationID,
Serial: core.SerialToString(precert.SerialNumber),
Der: certDER,
Digest: core.Fingerprint256(certDER),
Issued: timestamppb.New(precert.NotBefore),
Expires: timestamppb.New(precert.NotAfter),
}, nil
}
// generateSerialNumber produces a big.Int which has more than 64 bits of
@ -543,26 +566,26 @@ func (ca *certificateAuthorityImpl) issuePrecertificateInner(ctx context.Context
serialHex := core.SerialToString(serialBigInt)
dnsNames, ipAddresses, err := identifier.FromCSR(csr).ToValues()
if err != nil {
return nil, nil, err
names := csrlib.NamesFromCSR(csr)
req := &issuance.IssuanceRequest{
PublicKey: csr.PublicKey,
SubjectKeyId: subjectKeyId,
Serial: serialBigInt.Bytes(),
DNSNames: names.SANs,
CommonName: names.CN,
IncludeCTPoison: true,
IncludeMustStaple: issuance.ContainsMustStaple(csr.Extensions),
NotBefore: notBefore,
NotAfter: notAfter,
}
req := &issuance.IssuanceRequest{
PublicKey: issuance.MarshalablePublicKey{PublicKey: csr.PublicKey},
SubjectKeyId: subjectKeyId,
Serial: serialBigInt.Bytes(),
DNSNames: dnsNames,
IPAddresses: ipAddresses,
CommonName: csrlib.CNFromCSR(csr),
IncludeCTPoison: true,
NotBefore: notBefore,
NotAfter: notAfter,
}
ca.log.AuditInfof("Signing precert: serial=[%s] regID=[%d] names=[%s] csr=[%s]",
serialHex, issueReq.RegistrationID, strings.Join(req.DNSNames, ", "), hex.EncodeToString(csr.Raw))
lintCertBytes, issuanceToken, err := issuer.Prepare(certProfile.profile, req)
if err != nil {
ca.log.AuditErrf("Preparing precert failed: serial=[%s] err=[%v]", serialHex, err)
ca.log.AuditErrf("Preparing precert failed: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certProfileName=[%s] certProfileHash=[%x] err=[%v]",
issuer.Name(), serialHex, issueReq.RegistrationID, strings.Join(req.DNSNames, ", "), certProfile.name, certProfile.hash, err)
if errors.Is(err, linter.ErrLinting) {
ca.metrics.lintErrorCount.Inc()
}
@ -585,32 +608,17 @@ func (ca *certificateAuthorityImpl) issuePrecertificateInner(ctx context.Context
return nil, nil, err
}
logEvent := issuanceEvent{
CSR: hex.EncodeToString(csr.Raw),
IssuanceRequest: req,
Issuer: issuer.Name(),
Profile: certProfile.name,
Requester: issueReq.RegistrationID,
OrderID: issueReq.OrderID,
}
ca.log.AuditObject("Signing precert", logEvent)
var ipStrings []string
for _, ip := range csr.IPAddresses {
ipStrings = append(ipStrings, ip.String())
}
_, span := ca.tracer.Start(ctx, "signing precert", trace.WithAttributes(
attribute.String("serial", serialHex),
attribute.String("issuer", issuer.Name()),
attribute.String("certProfileName", certProfile.name),
attribute.StringSlice("names", csr.DNSNames),
attribute.StringSlice("ipAddresses", ipStrings),
))
certDER, err := issuer.Issue(issuanceToken)
if err != nil {
ca.metrics.noteSignError(err)
ca.log.AuditErrf("Signing precert failed: serial=[%s] err=[%v]", serialHex, err)
ca.log.AuditErrf("Signing precert failed: issuer=[%s] serial=[%s] regID=[%d] names=[%s] certProfileName=[%s] certProfileHash=[%x] err=[%v]",
issuer.Name(), serialHex, issueReq.RegistrationID, strings.Join(req.DNSNames, ", "), certProfile.name, certProfile.hash, err)
span.SetStatus(codes.Error, err.Error())
span.End()
return nil, nil, berrors.InternalServerError("failed to sign precertificate: %s", err)
@ -623,13 +631,10 @@ func (ca *certificateAuthorityImpl) issuePrecertificateInner(ctx context.Context
}
ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(precertType), "issuer": issuer.Name()}).Inc()
ca.log.AuditInfof("Signing precert success: issuer=[%s] serial=[%s] regID=[%d] names=[%s] precert=[%s] certProfileName=[%s] certProfileHash=[%x]",
issuer.Name(), serialHex, issueReq.RegistrationID, strings.Join(req.DNSNames, ", "), hex.EncodeToString(certDER), certProfile.name, certProfile.hash)
logEvent.Result.Precertificate = hex.EncodeToString(certDER)
// The CSR is big and not that informative, so don't log it a second time.
logEvent.CSR = ""
ca.log.AuditObject("Signing precert success", logEvent)
return certDER, &certProfileWithID{certProfile.name, nil}, nil
return certDER, &certProfileWithID{certProfile.name, certProfile.hash, nil}, nil
}
// verifyTBSCertIsDeterministic verifies that x509.CreateCertificate signing

View File

@ -11,7 +11,6 @@ import (
"errors"
"fmt"
"math/big"
mrand "math/rand"
"os"
"strings"
"testing"
@ -33,13 +32,11 @@ import (
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/issuance"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/must"
"github.com/letsencrypt/boulder/policy"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
@ -94,22 +91,25 @@ var (
OIDExtensionSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
)
const arbitraryRegID int64 = 1001
func mustRead(path string) []byte {
return must.Do(os.ReadFile(path))
}
type testCtx struct {
pa core.PolicyAuthority
ocsp *ocspImpl
crl *crlImpl
certProfiles map[string]*issuance.ProfileConfig
serialPrefix byte
maxNames int
boulderIssuers []*issuance.Issuer
keyPolicy goodkey.KeyPolicy
fc clock.FakeClock
metrics *caMetrics
logger *blog.Mock
pa core.PolicyAuthority
ocsp *ocspImpl
crl *crlImpl
defaultCertProfileName string
certProfiles map[string]*issuance.ProfileConfig
serialPrefix byte
maxNames int
boulderIssuers []*issuance.Issuer
keyPolicy goodkey.KeyPolicy
fc clock.FakeClock
metrics *caMetrics
logger *blog.Mock
}
type mockSA struct {
@ -148,27 +148,27 @@ func setup(t *testing.T) *testCtx {
fc := clock.NewFake()
fc.Add(1 * time.Hour)
pa, err := policy.New(map[identifier.IdentifierType]bool{"dns": true}, nil, blog.NewMock())
pa, err := policy.New(nil, blog.NewMock())
test.AssertNotError(t, err, "Couldn't create PA")
err = pa.LoadHostnamePolicyFile("../test/hostname-policy.yaml")
test.AssertNotError(t, err, "Couldn't set hostname policy")
certProfiles := make(map[string]*issuance.ProfileConfig, 0)
certProfiles["legacy"] = &issuance.ProfileConfig{
IncludeCRLDistributionPoints: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 90},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_subject_common_name_included"},
AllowMustStaple: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 90},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_subject_common_name_included"},
}
certProfiles["modern"] = &issuance.ProfileConfig{
OmitCommonName: true,
OmitKeyEncipherment: true,
OmitClientAuth: true,
OmitSKID: true,
IncludeCRLDistributionPoints: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 6},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_ext_subject_key_identifier_missing_sub_cert"},
AllowMustStaple: true,
OmitCommonName: true,
OmitKeyEncipherment: true,
OmitClientAuth: true,
OmitSKID: true,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 6},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
IgnoredLints: []string{"w_ext_subject_key_identifier_missing_sub_cert"},
}
test.AssertEquals(t, len(certProfiles), 2)
@ -179,7 +179,6 @@ func setup(t *testing.T) *testCtx {
IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name),
OCSPURL: "http://not-example.com/o",
CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name),
CRLShards: 10,
Location: issuance.IssuerLoc{
File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name),
CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name),
@ -206,12 +205,7 @@ func setup(t *testing.T) *testCtx {
Name: "lint_errors",
Help: "Number of issuances that were halted by linting errors",
})
certificatesCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "certificates",
Help: "Number of certificates issued",
}, []string{"profile"})
cametrics := &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificatesCount}
cametrics := &caMetrics{signatureCount, signErrorCount, lintErrorCount}
ocsp, err := NewOCSPImpl(
boulderIssuers,
@ -238,17 +232,18 @@ func setup(t *testing.T) *testCtx {
test.AssertNotError(t, err, "Failed to create crl impl")
return &testCtx{
pa: pa,
ocsp: ocsp,
crl: crl,
certProfiles: certProfiles,
serialPrefix: 0x11,
maxNames: 2,
boulderIssuers: boulderIssuers,
keyPolicy: keyPolicy,
fc: fc,
metrics: cametrics,
logger: blog.NewMock(),
pa: pa,
ocsp: ocsp,
crl: crl,
defaultCertProfileName: "legacy",
certProfiles: certProfiles,
serialPrefix: 0x11,
maxNames: 2,
boulderIssuers: boulderIssuers,
keyPolicy: keyPolicy,
fc: fc,
metrics: cametrics,
logger: blog.NewMock(),
}
}
@ -260,7 +255,7 @@ func TestSerialPrefix(t *testing.T) {
nil,
nil,
nil,
nil,
"",
nil,
0x00,
testCtx.maxNames,
@ -274,7 +269,7 @@ func TestSerialPrefix(t *testing.T) {
nil,
nil,
nil,
nil,
"",
nil,
0x80,
testCtx.maxNames,
@ -316,6 +311,7 @@ func TestIssuePrecertificate(t *testing.T) {
{"IssuePrecertificate", CNandSANCSR, issueCertificateSubTestIssuePrecertificate},
{"ProfileSelectionRSA", CNandSANCSR, issueCertificateSubTestProfileSelectionRSA},
{"ProfileSelectionECDSA", ECDSACSR, issueCertificateSubTestProfileSelectionECDSA},
{"MustStaple", MustStapleCSR, issueCertificateSubTestMustStaple},
{"UnknownExtension", UnsupportedExtensionCSR, issueCertificateSubTestUnknownExtension},
{"CTPoisonExtension", CTPoisonExtensionCSR, issueCertificateSubTestCTPoisonExtension},
{"CTPoisonExtensionEmpty", CTPoisonExtensionEmptyCSR, issueCertificateSubTestCTPoisonExtension},
@ -332,11 +328,13 @@ func TestIssuePrecertificate(t *testing.T) {
t.Parallel()
req, err := x509.ParseCertificateRequest(testCase.csr)
test.AssertNotError(t, err, "Certificate request failed to parse")
issueReq := &capb.IssueCertificateRequest{Csr: testCase.csr, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()}
issueReq := &capb.IssueCertificateRequest{Csr: testCase.csr, RegistrationID: arbitraryRegID}
var certDER []byte
response, err := ca.IssuePrecertificate(ctx, issueReq)
profile := ca.certProfiles["legacy"]
certDER, err := ca.issuePrecertificate(ctx, profile, issueReq)
test.AssertNotError(t, err, "Failed to issue precertificate")
certDER = response.DER
cert, err := x509.ParseCertificate(certDER)
test.AssertNotError(t, err, "Certificate failed to parse")
@ -361,20 +359,14 @@ func TestIssuePrecertificate(t *testing.T) {
}
}
type mockSCTService struct{}
func (m mockSCTService) GetSCTs(ctx context.Context, sctRequest *rapb.SCTRequest, _ ...grpc.CallOption) (*rapb.SCTResponse, error) {
return &rapb.SCTResponse{}, nil
}
func issueCertificateSubTestSetup(t *testing.T) (*certificateAuthorityImpl, *mockSA) {
testCtx := setup(t)
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -411,9 +403,9 @@ func TestNoIssuers(t *testing.T) {
sa := &mockSA{}
_, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
nil, // No issuers
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -432,9 +424,9 @@ func TestMultipleIssuers(t *testing.T) {
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -444,11 +436,14 @@ func TestMultipleIssuers(t *testing.T) {
testCtx.fc)
test.AssertNotError(t, err, "Failed to remake CA")
selectedProfile := ca.certProfiles.defaultName
_, ok := ca.certProfiles.profileByName[selectedProfile]
test.Assert(t, ok, "Certificate profile was expected to exist")
// Test that an RSA CSR gets issuance from an RSA issuer.
profile := ca.certProfiles["legacy"]
issuedCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()})
issuedCert, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID, CertProfileName: selectedProfile})
test.AssertNotError(t, err, "Failed to issue certificate")
cert, err := x509.ParseCertificate(issuedCertDER)
cert, err := x509.ParseCertificate(issuedCert.DER)
test.AssertNotError(t, err, "Certificate failed to parse")
validated := false
for _, issuer := range ca.issuers.byAlg[x509.RSA] {
@ -462,9 +457,9 @@ func TestMultipleIssuers(t *testing.T) {
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
// Test that an ECDSA CSR gets issuance from an ECDSA issuer.
issuedCertDER, err = ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
issuedCert, err = ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID, CertProfileName: selectedProfile})
test.AssertNotError(t, err, "Failed to issue certificate")
cert, err = x509.ParseCertificate(issuedCertDER)
cert, err = x509.ParseCertificate(issuedCert.DER)
test.AssertNotError(t, err, "Certificate failed to parse")
validated = false
for _, issuer := range ca.issuers.byAlg[x509.ECDSA] {
@ -493,7 +488,6 @@ func TestUnpredictableIssuance(t *testing.T) {
IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name),
OCSPURL: "http://not-example.com/o",
CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name),
CRLShards: 10,
Location: issuance.IssuerLoc{
File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name),
CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name),
@ -504,9 +498,9 @@ func TestUnpredictableIssuance(t *testing.T) {
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -528,14 +522,13 @@ func TestUnpredictableIssuance(t *testing.T) {
// trials, the probability that all 20 issuances come from the same issuer is
// 0.5 ^ 20 = 9.5e-7 ~= 1e-6 = 1 in a million, so we do not consider this test
// to be flaky.
req := &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63()}
req := &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID}
seenE2 := false
seenR3 := false
profile := ca.certProfiles["legacy"]
for i := 0; i < 20; i++ {
precertDER, err := ca.issuePrecertificate(ctx, profile, req)
result, err := ca.IssuePrecertificate(ctx, req)
test.AssertNotError(t, err, "Failed to issue test certificate")
cert, err := x509.ParseCertificate(precertDER)
cert, err := x509.ParseCertificate(result.DER)
test.AssertNotError(t, err, "Failed to parse test certificate")
if strings.Contains(cert.Issuer.CommonName, "E1") {
t.Fatal("Issued certificate from inactive issuer")
@ -554,11 +547,23 @@ func TestMakeCertificateProfilesMap(t *testing.T) {
testCtx := setup(t)
test.AssertEquals(t, len(testCtx.certProfiles), 2)
testProfile := issuance.ProfileConfig{
AllowMustStaple: false,
MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 90},
MaxValidityBackdate: config.Duration{Duration: time.Hour},
}
type nameToHash struct {
name string
hash [32]byte
}
testCases := []struct {
name string
defaultName string
profileConfigs map[string]*issuance.ProfileConfig
expectedErrSubstr string
expectedProfiles []string
expectedProfiles []nameToHash
}{
{
name: "nil profile map",
@ -571,30 +576,56 @@ func TestMakeCertificateProfilesMap(t *testing.T) {
expectedErrSubstr: "at least one certificate profile",
},
{
name: "empty profile config",
name: "no profile matching default name",
defaultName: "default",
profileConfigs: map[string]*issuance.ProfileConfig{
"notDefault": &testProfile,
},
expectedErrSubstr: "profile object was not found for that name",
},
{
name: "duplicate hash",
defaultName: "default",
profileConfigs: map[string]*issuance.ProfileConfig{
"default": &testProfile,
"default2": &testProfile,
},
expectedErrSubstr: "duplicate certificate profile hash",
},
{
name: "empty profile config",
defaultName: "empty",
profileConfigs: map[string]*issuance.ProfileConfig{
"empty": {},
},
expectedErrSubstr: "at least one revocation mechanism must be included",
},
{
name: "minimal profile config",
profileConfigs: map[string]*issuance.ProfileConfig{
"empty": {IncludeCRLDistributionPoints: true},
expectedProfiles: []nameToHash{
{
name: "empty",
hash: [32]byte{0x25, 0x27, 0x72, 0xa1, 0xaf, 0x95, 0xfe, 0xc7, 0x32, 0x78, 0x38, 0x97, 0xd0, 0xf1, 0x83, 0x92, 0xc3, 0xac, 0x60, 0x91, 0x68, 0x4f, 0x22, 0xb6, 0x57, 0x2f, 0x89, 0x1a, 0x54, 0xe5, 0xd8, 0xa3},
},
},
expectedProfiles: []string{"empty"},
},
{
name: "default profiles from setup func",
profileConfigs: testCtx.certProfiles,
expectedProfiles: []string{"legacy", "modern"},
name: "default profiles from setup func",
defaultName: testCtx.defaultCertProfileName,
profileConfigs: testCtx.certProfiles,
expectedProfiles: []nameToHash{
{
name: "legacy",
hash: [32]byte{0x44, 0xc5, 0xbc, 0x73, 0x8, 0x95, 0xba, 0x4c, 0x13, 0x12, 0xc4, 0xc, 0x5d, 0x77, 0x2f, 0x54, 0xf8, 0x54, 0x1, 0xb8, 0x84, 0xaf, 0x6c, 0x58, 0x74, 0x6, 0xac, 0xda, 0x3e, 0x37, 0xfc, 0x88},
},
{
name: "modern",
hash: [32]byte{0x58, 0x7, 0xea, 0x3a, 0x85, 0xcd, 0xf9, 0xd1, 0x7a, 0x9a, 0x59, 0x76, 0xfc, 0x92, 0xea, 0x1b, 0x69, 0x54, 0xe4, 0xbe, 0xcf, 0xe3, 0x91, 0xfa, 0x85, 0x4, 0xbf, 0x1f, 0x55, 0x97, 0x2c, 0x8b},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
profiles, err := makeCertificateProfilesMap(tc.profileConfigs)
profiles, err := makeCertificateProfilesMap(tc.defaultName, tc.profileConfigs)
if tc.expectedErrSubstr != "" {
test.AssertError(t, err, "profile construction should have failed")
@ -604,14 +635,17 @@ func TestMakeCertificateProfilesMap(t *testing.T) {
}
if tc.expectedProfiles != nil {
test.AssertEquals(t, len(profiles), len(tc.expectedProfiles))
test.AssertEquals(t, len(profiles.profileByName), len(tc.expectedProfiles))
}
for _, expected := range tc.expectedProfiles {
cpwid, ok := profiles[expected]
test.Assert(t, ok, fmt.Sprintf("expected profile %q not found", expected))
cpwid, ok := profiles.profileByName[expected.name]
test.Assert(t, ok, fmt.Sprintf("expected profile %q not found", expected.name))
test.AssertEquals(t, cpwid.hash, expected.hash)
test.AssertEquals(t, cpwid.name, expected)
cpwid, ok = profiles.profileByHash[expected.hash]
test.Assert(t, ok, fmt.Sprintf("expected profile %q not found", expected.hash))
test.AssertEquals(t, cpwid.name, expected.name)
}
})
}
@ -669,9 +703,9 @@ func TestInvalidCSRs(t *testing.T) {
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -684,9 +718,8 @@ func TestInvalidCSRs(t *testing.T) {
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
serializedCSR := mustRead(testCase.csrPath)
profile := ca.certProfiles["legacy"]
issueReq := &capb.IssueCertificateRequest{Csr: serializedCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"}
_, err = ca.issuePrecertificate(ctx, profile, issueReq)
issueReq := &capb.IssueCertificateRequest{Csr: serializedCSR, RegistrationID: arbitraryRegID}
_, err = ca.IssuePrecertificate(ctx, issueReq)
test.AssertErrorIs(t, err, testCase.errorType)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "cert"}, 0)
@ -709,9 +742,9 @@ func TestRejectValidityTooLong(t *testing.T) {
ca, err := NewCertificateAuthorityImpl(
&mockSA{},
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -722,8 +755,7 @@ func TestRejectValidityTooLong(t *testing.T) {
test.AssertNotError(t, err, "Failed to create CA")
// Test that the CA rejects CSRs that would expire after the intermediate cert
profile := ca.certProfiles["legacy"]
_, err = ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
_, err = ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID})
test.AssertError(t, err, "Cannot issue a certificate that expires after the intermediate certificate")
test.AssertErrorIs(t, err, berrors.InternalServer)
}
@ -742,12 +774,30 @@ func issueCertificateSubTestProfileSelectionECDSA(t *testing.T, i *TestCertifica
test.AssertEquals(t, i.cert.KeyUsage, expectedKeyUsage)
}
func countMustStaple(t *testing.T, cert *x509.Certificate) (count int) {
oidTLSFeature := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
mustStapleFeatureValue := []byte{0x30, 0x03, 0x02, 0x01, 0x05}
for _, ext := range cert.Extensions {
if ext.Id.Equal(oidTLSFeature) {
test.Assert(t, !ext.Critical, "Extension was marked critical")
test.AssertByteEquals(t, ext.Value, mustStapleFeatureValue)
count++
}
}
return count
}
func issueCertificateSubTestMustStaple(t *testing.T, i *TestCertificateIssuance) {
test.AssertMetricWithLabelsEquals(t, i.ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1)
test.AssertEquals(t, countMustStaple(t, i.cert), 1)
}
func issueCertificateSubTestUnknownExtension(t *testing.T, i *TestCertificateIssuance) {
test.AssertMetricWithLabelsEquals(t, i.ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1)
// NOTE: The hard-coded value here will have to change over time as Boulder
// adds or removes (unrequested/default) extensions in certificates.
expectedExtensionCount := 10
expectedExtensionCount := 9
test.AssertEquals(t, len(i.cert.Extensions), expectedExtensionCount)
}
@ -785,9 +835,9 @@ func TestIssueCertificateForPrecertificate(t *testing.T) {
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -797,11 +847,13 @@ func TestIssueCertificateForPrecertificate(t *testing.T) {
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
profile := ca.certProfiles["legacy"]
issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"}
precertDER, err := ca.issuePrecertificate(ctx, profile, &issueReq)
_, ok := ca.certProfiles.profileByName[ca.certProfiles.defaultName]
test.Assert(t, ok, "Certificate profile was expected to exist")
issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID, OrderID: 0}
precert, err := ca.IssuePrecertificate(ctx, &issueReq)
test.AssertNotError(t, err, "Failed to issue precert")
parsedPrecert, err := x509.ParseCertificate(precertDER)
parsedPrecert, err := x509.ParseCertificate(precert.DER)
test.AssertNotError(t, err, "Failed to parse precert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 0)
@ -818,14 +870,15 @@ func TestIssueCertificateForPrecertificate(t *testing.T) {
}
test.AssertNotError(t, err, "Failed to marshal SCT")
certDER, err := ca.issueCertificateForPrecertificate(ctx,
profile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
cert, err := ca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{
DER: precert.DER,
SCTs: sctBytes,
RegistrationID: arbitraryRegID,
OrderID: 0,
CertProfileHash: precert.CertProfileHash,
})
test.AssertNotError(t, err, "Failed to issue cert from precert")
parsedCert, err := x509.ParseCertificate(certDER)
parsedCert, err := x509.ParseCertificate(cert.Der)
test.AssertNotError(t, err, "Failed to parse cert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 1)
@ -847,9 +900,9 @@ func TestIssueCertificateForPrecertificateWithSpecificCertificateProfile(t *test
sa := &mockSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -859,19 +912,19 @@ func TestIssueCertificateForPrecertificateWithSpecificCertificateProfile(t *test
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
selectedProfile := "modern"
certProfile, ok := ca.certProfiles[selectedProfile]
selectedProfile := "legacy"
certProfile, ok := ca.certProfiles.profileByName[selectedProfile]
test.Assert(t, ok, "Certificate profile was expected to exist")
issueReq := capb.IssueCertificateRequest{
Csr: CNandSANCSR,
RegistrationID: mrand.Int63(),
OrderID: mrand.Int63(),
RegistrationID: arbitraryRegID,
OrderID: 0,
CertProfileName: selectedProfile,
}
precertDER, err := ca.issuePrecertificate(ctx, certProfile, &issueReq)
precert, err := ca.IssuePrecertificate(ctx, &issueReq)
test.AssertNotError(t, err, "Failed to issue precert")
parsedPrecert, err := x509.ParseCertificate(precertDER)
parsedPrecert, err := x509.ParseCertificate(precert.DER)
test.AssertNotError(t, err, "Failed to parse precert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 0)
@ -888,14 +941,15 @@ func TestIssueCertificateForPrecertificateWithSpecificCertificateProfile(t *test
}
test.AssertNotError(t, err, "Failed to marshal SCT")
certDER, err := ca.issueCertificateForPrecertificate(ctx,
certProfile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
cert, err := ca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{
DER: precert.DER,
SCTs: sctBytes,
RegistrationID: arbitraryRegID,
OrderID: 0,
CertProfileHash: certProfile.hash[:],
})
test.AssertNotError(t, err, "Failed to issue cert from precert")
parsedCert, err := x509.ParseCertificate(certDER)
parsedCert, err := x509.ParseCertificate(cert.Der)
test.AssertNotError(t, err, "Failed to parse cert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 1)
@ -962,9 +1016,9 @@ func TestIssueCertificateForPrecertificateDuplicateSerial(t *testing.T) {
sa := &dupeSA{}
ca, err := NewCertificateAuthorityImpl(
sa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -979,17 +1033,21 @@ func TestIssueCertificateForPrecertificateDuplicateSerial(t *testing.T) {
t.Fatal(err)
}
profile := ca.certProfiles["legacy"]
issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"}
precertDER, err := ca.issuePrecertificate(ctx, profile, &issueReq)
selectedProfile := ca.certProfiles.defaultName
certProfile, ok := ca.certProfiles.profileByName[selectedProfile]
test.Assert(t, ok, "Certificate profile was expected to exist")
issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID, OrderID: 0}
precert, err := ca.IssuePrecertificate(ctx, &issueReq)
test.AssertNotError(t, err, "Failed to issue precert")
test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1)
_, err = ca.issueCertificateForPrecertificate(ctx,
profile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
_, err = ca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{
DER: precert.DER,
SCTs: sctBytes,
RegistrationID: arbitraryRegID,
OrderID: 0,
CertProfileHash: certProfile.hash[:],
})
if err == nil {
t.Error("Expected error issuing duplicate serial but got none.")
}
@ -1005,9 +1063,9 @@ func TestIssueCertificateForPrecertificateDuplicateSerial(t *testing.T) {
errorsa := &getCertErrorSA{}
errorca, err := NewCertificateAuthorityImpl(
errorsa,
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -1017,12 +1075,13 @@ func TestIssueCertificateForPrecertificateDuplicateSerial(t *testing.T) {
testCtx.fc)
test.AssertNotError(t, err, "Failed to create CA")
_, err = errorca.issueCertificateForPrecertificate(ctx,
profile,
precertDER,
sctBytes,
mrand.Int63(),
mrand.Int63())
_, err = errorca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{
DER: precert.DER,
SCTs: sctBytes,
RegistrationID: arbitraryRegID,
OrderID: 0,
CertProfileHash: certProfile.hash[:],
})
if err == nil {
t.Fatal("Expected error issuing duplicate serial but got none.")
}

View File

@ -4,7 +4,6 @@ import (
"context"
"crypto/x509"
"encoding/hex"
mrand "math/rand"
"testing"
"time"
@ -32,9 +31,9 @@ func TestOCSP(t *testing.T) {
testCtx := setup(t)
ca, err := NewCertificateAuthorityImpl(
&mockSA{},
mockSCTService{},
testCtx.pa,
testCtx.boulderIssuers,
testCtx.defaultCertProfileName,
testCtx.certProfiles,
testCtx.serialPrefix,
testCtx.maxNames,
@ -45,12 +44,11 @@ func TestOCSP(t *testing.T) {
test.AssertNotError(t, err, "Failed to create CA")
ocspi := testCtx.ocsp
profile := ca.certProfiles["legacy"]
// Issue a certificate from an RSA issuer, request OCSP from the same issuer,
// and make sure it works.
rsaCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
rsaCertPB, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID})
test.AssertNotError(t, err, "Failed to issue certificate")
rsaCert, err := x509.ParseCertificate(rsaCertDER)
rsaCert, err := x509.ParseCertificate(rsaCertPB.DER)
test.AssertNotError(t, err, "Failed to parse rsaCert")
rsaIssuerID := issuance.IssuerNameID(rsaCert)
rsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{
@ -71,9 +69,9 @@ func TestOCSP(t *testing.T) {
// Issue a certificate from an ECDSA issuer, request OCSP from the same issuer,
// and make sure it works.
ecdsaCertDER, err := ca.issuePrecertificate(ctx, profile, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: mrand.Int63(), OrderID: mrand.Int63(), CertProfileName: "legacy"})
ecdsaCertPB, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID})
test.AssertNotError(t, err, "Failed to issue certificate")
ecdsaCert, err := x509.ParseCertificate(ecdsaCertDER)
ecdsaCert, err := x509.ParseCertificate(ecdsaCertPB.DER)
test.AssertNotError(t, err, "Failed to parse ecdsaCert")
ecdsaIssuerID := issuance.IssuerNameID(ecdsaCert)
ecdsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc-gen-go v1.34.1
// protoc v3.20.1
// source: ca.proto
@ -13,7 +13,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -24,7 +23,10 @@ const (
)
type IssueCertificateRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 6
Csr []byte `protobuf:"bytes,1,opt,name=csr,proto3" json:"csr,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
@ -34,15 +36,15 @@ type IssueCertificateRequest struct {
// assigned inside the CA during *Profile construction if no name is provided.
// The value of this field should not be relied upon inside the RA.
CertProfileName string `protobuf:"bytes,5,opt,name=certProfileName,proto3" json:"certProfileName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IssueCertificateRequest) Reset() {
*x = IssueCertificateRequest{}
mi := &file_ca_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IssueCertificateRequest) String() string {
@ -53,7 +55,7 @@ func (*IssueCertificateRequest) ProtoMessage() {}
func (x *IssueCertificateRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[0]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -96,29 +98,41 @@ func (x *IssueCertificateRequest) GetCertProfileName() string {
return ""
}
type IssueCertificateResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"`
unknownFields protoimpl.UnknownFields
type IssuePrecertificateResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 4
DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"`
// certProfileHash is a hash over the exported fields of a certificate profile
// to ensure that the profile remains unchanged after multiple roundtrips
// through the RA and CA.
CertProfileHash []byte `protobuf:"bytes,2,opt,name=certProfileHash,proto3" json:"certProfileHash,omitempty"`
// certProfileName is a human readable name returned back to the RA for later
// use. If IssueCertificateRequest.certProfileName was an empty string, the
// CAs default profile name will be assigned.
CertProfileName string `protobuf:"bytes,3,opt,name=certProfileName,proto3" json:"certProfileName,omitempty"`
}
func (x *IssueCertificateResponse) Reset() {
*x = IssueCertificateResponse{}
mi := &file_ca_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
func (x *IssuePrecertificateResponse) Reset() {
*x = IssuePrecertificateResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IssueCertificateResponse) String() string {
func (x *IssuePrecertificateResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IssueCertificateResponse) ProtoMessage() {}
func (*IssuePrecertificateResponse) ProtoMessage() {}
func (x *IssueCertificateResponse) ProtoReflect() protoreflect.Message {
func (x *IssuePrecertificateResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[1]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -128,36 +142,136 @@ func (x *IssueCertificateResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
// Deprecated: Use IssueCertificateResponse.ProtoReflect.Descriptor instead.
func (*IssueCertificateResponse) Descriptor() ([]byte, []int) {
// Deprecated: Use IssuePrecertificateResponse.ProtoReflect.Descriptor instead.
func (*IssuePrecertificateResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{1}
}
func (x *IssueCertificateResponse) GetDER() []byte {
func (x *IssuePrecertificateResponse) GetDER() []byte {
if x != nil {
return x.DER
}
return nil
}
func (x *IssuePrecertificateResponse) GetCertProfileHash() []byte {
if x != nil {
return x.CertProfileHash
}
return nil
}
func (x *IssuePrecertificateResponse) GetCertProfileName() string {
if x != nil {
return x.CertProfileName
}
return ""
}
type IssueCertificateForPrecertificateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 6
DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"`
SCTs [][]byte `protobuf:"bytes,2,rep,name=SCTs,proto3" json:"SCTs,omitempty"`
RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
OrderID int64 `protobuf:"varint,4,opt,name=orderID,proto3" json:"orderID,omitempty"`
// certProfileHash is a hash over the exported fields of a certificate profile
// to ensure that the profile remains unchanged after multiple roundtrips
// through the RA and CA.
CertProfileHash []byte `protobuf:"bytes,5,opt,name=certProfileHash,proto3" json:"certProfileHash,omitempty"`
}
func (x *IssueCertificateForPrecertificateRequest) Reset() {
*x = IssueCertificateForPrecertificateRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IssueCertificateForPrecertificateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IssueCertificateForPrecertificateRequest) ProtoMessage() {}
func (x *IssueCertificateForPrecertificateRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IssueCertificateForPrecertificateRequest.ProtoReflect.Descriptor instead.
func (*IssueCertificateForPrecertificateRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{2}
}
func (x *IssueCertificateForPrecertificateRequest) GetDER() []byte {
if x != nil {
return x.DER
}
return nil
}
func (x *IssueCertificateForPrecertificateRequest) GetSCTs() [][]byte {
if x != nil {
return x.SCTs
}
return nil
}
func (x *IssueCertificateForPrecertificateRequest) GetRegistrationID() int64 {
if x != nil {
return x.RegistrationID
}
return 0
}
func (x *IssueCertificateForPrecertificateRequest) GetOrderID() int64 {
if x != nil {
return x.OrderID
}
return 0
}
func (x *IssueCertificateForPrecertificateRequest) GetCertProfileHash() []byte {
if x != nil {
return x.CertProfileHash
}
return nil
}
// Exactly one of certDER or [serial and issuerID] must be set.
type GenerateOCSPRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 8
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
Reason int32 `protobuf:"varint,3,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
Serial string `protobuf:"bytes,5,opt,name=serial,proto3" json:"serial,omitempty"`
IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 8
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
Reason int32 `protobuf:"varint,3,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
Serial string `protobuf:"bytes,5,opt,name=serial,proto3" json:"serial,omitempty"`
IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
}
func (x *GenerateOCSPRequest) Reset() {
*x = GenerateOCSPRequest{}
mi := &file_ca_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GenerateOCSPRequest) String() string {
@ -167,8 +281,8 @@ func (x *GenerateOCSPRequest) String() string {
func (*GenerateOCSPRequest) ProtoMessage() {}
func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[2]
if x != nil {
mi := &file_ca_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -180,7 +294,7 @@ func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GenerateOCSPRequest.ProtoReflect.Descriptor instead.
func (*GenerateOCSPRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{2}
return file_ca_proto_rawDescGZIP(), []int{3}
}
func (x *GenerateOCSPRequest) GetStatus() string {
@ -219,17 +333,20 @@ func (x *GenerateOCSPRequest) GetIssuerID() int64 {
}
type OCSPResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Response []byte `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Response []byte `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
}
func (x *OCSPResponse) Reset() {
*x = OCSPResponse{}
mi := &file_ca_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *OCSPResponse) String() string {
@ -239,8 +356,8 @@ func (x *OCSPResponse) String() string {
func (*OCSPResponse) ProtoMessage() {}
func (x *OCSPResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[3]
if x != nil {
mi := &file_ca_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -252,7 +369,7 @@ func (x *OCSPResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use OCSPResponse.ProtoReflect.Descriptor instead.
func (*OCSPResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{3}
return file_ca_proto_rawDescGZIP(), []int{4}
}
func (x *OCSPResponse) GetResponse() []byte {
@ -263,21 +380,24 @@ func (x *OCSPResponse) GetResponse() []byte {
}
type GenerateCRLRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Payload:
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Payload:
//
// *GenerateCRLRequest_Metadata
// *GenerateCRLRequest_Entry
Payload isGenerateCRLRequest_Payload `protobuf_oneof:"payload"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
Payload isGenerateCRLRequest_Payload `protobuf_oneof:"payload"`
}
func (x *GenerateCRLRequest) Reset() {
*x = GenerateCRLRequest{}
mi := &file_ca_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GenerateCRLRequest) String() string {
@ -287,8 +407,8 @@ func (x *GenerateCRLRequest) String() string {
func (*GenerateCRLRequest) ProtoMessage() {}
func (x *GenerateCRLRequest) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[4]
if x != nil {
mi := &file_ca_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -300,30 +420,26 @@ func (x *GenerateCRLRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GenerateCRLRequest.ProtoReflect.Descriptor instead.
func (*GenerateCRLRequest) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{4}
return file_ca_proto_rawDescGZIP(), []int{5}
}
func (x *GenerateCRLRequest) GetPayload() isGenerateCRLRequest_Payload {
if x != nil {
return x.Payload
func (m *GenerateCRLRequest) GetPayload() isGenerateCRLRequest_Payload {
if m != nil {
return m.Payload
}
return nil
}
func (x *GenerateCRLRequest) GetMetadata() *CRLMetadata {
if x != nil {
if x, ok := x.Payload.(*GenerateCRLRequest_Metadata); ok {
return x.Metadata
}
if x, ok := x.GetPayload().(*GenerateCRLRequest_Metadata); ok {
return x.Metadata
}
return nil
}
func (x *GenerateCRLRequest) GetEntry() *proto.CRLEntry {
if x != nil {
if x, ok := x.Payload.(*GenerateCRLRequest_Entry); ok {
return x.Entry
}
if x, ok := x.GetPayload().(*GenerateCRLRequest_Entry); ok {
return x.Entry
}
return nil
}
@ -345,20 +461,23 @@ func (*GenerateCRLRequest_Metadata) isGenerateCRLRequest_Payload() {}
func (*GenerateCRLRequest_Entry) isGenerateCRLRequest_Payload() {}
type CRLMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 5
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
ThisUpdate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=thisUpdate,proto3" json:"thisUpdate,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 5
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
ThisUpdate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=thisUpdate,proto3" json:"thisUpdate,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
}
func (x *CRLMetadata) Reset() {
*x = CRLMetadata{}
mi := &file_ca_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CRLMetadata) String() string {
@ -368,8 +487,8 @@ func (x *CRLMetadata) String() string {
func (*CRLMetadata) ProtoMessage() {}
func (x *CRLMetadata) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[5]
if x != nil {
mi := &file_ca_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -381,7 +500,7 @@ func (x *CRLMetadata) ProtoReflect() protoreflect.Message {
// Deprecated: Use CRLMetadata.ProtoReflect.Descriptor instead.
func (*CRLMetadata) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{5}
return file_ca_proto_rawDescGZIP(), []int{6}
}
func (x *CRLMetadata) GetIssuerNameID() int64 {
@ -406,17 +525,20 @@ func (x *CRLMetadata) GetShardIdx() int64 {
}
type GenerateCRLResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
}
func (x *GenerateCRLResponse) Reset() {
*x = GenerateCRLResponse{}
mi := &file_ca_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_ca_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GenerateCRLResponse) String() string {
@ -426,8 +548,8 @@ func (x *GenerateCRLResponse) String() string {
func (*GenerateCRLResponse) ProtoMessage() {}
func (x *GenerateCRLResponse) ProtoReflect() protoreflect.Message {
mi := &file_ca_proto_msgTypes[6]
if x != nil {
mi := &file_ca_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -439,7 +561,7 @@ func (x *GenerateCRLResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GenerateCRLResponse.ProtoReflect.Descriptor instead.
func (*GenerateCRLResponse) Descriptor() ([]byte, []int) {
return file_ca_proto_rawDescGZIP(), []int{6}
return file_ca_proto_rawDescGZIP(), []int{7}
}
func (x *GenerateCRLResponse) GetChunk() []byte {
@ -451,7 +573,7 @@ func (x *GenerateCRLResponse) GetChunk() []byte {
var File_ca_proto protoreflect.FileDescriptor
var file_ca_proto_rawDesc = string([]byte{
var file_ca_proto_rawDesc = []byte{
0x0a, 0x08, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x63, 0x61, 0x1a, 0x15,
0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
@ -466,106 +588,134 @@ var file_ca_proto_rawDesc = string([]byte{
0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72,
0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x2c, 0x0a, 0x18, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x03, 0x44, 0x45, 0x52, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74,
0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03,
0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09,
0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76,
0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x1a,
0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
0x22, 0x2a, 0x0a, 0x0c, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x12,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x61, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x48, 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x22, 0x8f, 0x01, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61,
0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x68, 0x69, 0x73,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78,
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78,
0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68,
0x75, 0x6e, 0x6b, 0x32, 0x67, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x49,
0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12,
0x1b, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63,
0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x4c, 0x0a, 0x0d,
0x4f, 0x43, 0x53, 0x50, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x0a,
0x0c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x12, 0x17, 0x2e,
0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x63, 0x61, 0x2e, 0x4f, 0x43, 0x53, 0x50,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x54, 0x0a, 0x0c, 0x43, 0x52,
0x4c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x47, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x12, 0x16, 0x2e, 0x63, 0x61, 0x2e, 0x47,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x17, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43,
0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c,
0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64,
0x65, 0x72, 0x2f, 0x63, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
})
0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x49, 0x73, 0x73, 0x75, 0x65,
0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x03, 0x44, 0x45, 0x52, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74,
0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x61,
0x73, 0x68, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72,
0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a,
0x28, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x65, 0x46, 0x6f, 0x72, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x44, 0x45, 0x52, 0x12, 0x12, 0x0a, 0x04, 0x53,
0x43, 0x54, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x53, 0x43, 0x54, 0x73, 0x12,
0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72,
0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49,
0x44, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65,
0x48, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74,
0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0xb9, 0x01, 0x0a, 0x13,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72,
0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61,
0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74,
0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a,
0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
0x44, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x2a, 0x0a, 0x0c, 0x4f, 0x43, 0x53, 0x50, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x12, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43,
0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x61,
0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08,
0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43,
0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79,
0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x8f, 0x01, 0x0a, 0x0b,
0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69,
0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28,
0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12,
0x3a, 0x0a, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73,
0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73,
0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x2b, 0x0a,
0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x32, 0xd5, 0x01, 0x0a, 0x14, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x12, 0x55, 0x0a, 0x13, 0x49, 0x73, 0x73, 0x75, 0x65, 0x50, 0x72, 0x65, 0x63,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x63, 0x61, 0x2e,
0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73,
0x75, 0x65, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x21, 0x49, 0x73,
0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x6f,
0x72, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12,
0x2c, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e,
0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
0x22, 0x00, 0x32, 0x4c, 0x0a, 0x0d, 0x4f, 0x43, 0x53, 0x50, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x0c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f,
0x43, 0x53, 0x50, 0x12, 0x17, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x63,
0x61, 0x2e, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x32, 0x54, 0x0a, 0x0c, 0x43, 0x52, 0x4c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72,
0x12, 0x44, 0x0a, 0x0b, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x12,
0x16, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_ca_proto_rawDescOnce sync.Once
file_ca_proto_rawDescData []byte
file_ca_proto_rawDescData = file_ca_proto_rawDesc
)
func file_ca_proto_rawDescGZIP() []byte {
file_ca_proto_rawDescOnce.Do(func() {
file_ca_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc)))
file_ca_proto_rawDescData = protoimpl.X.CompressGZIP(file_ca_proto_rawDescData)
})
return file_ca_proto_rawDescData
}
var file_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ca_proto_goTypes = []any{
(*IssueCertificateRequest)(nil), // 0: ca.IssueCertificateRequest
(*IssueCertificateResponse)(nil), // 1: ca.IssueCertificateResponse
(*GenerateOCSPRequest)(nil), // 2: ca.GenerateOCSPRequest
(*OCSPResponse)(nil), // 3: ca.OCSPResponse
(*GenerateCRLRequest)(nil), // 4: ca.GenerateCRLRequest
(*CRLMetadata)(nil), // 5: ca.CRLMetadata
(*GenerateCRLResponse)(nil), // 6: ca.GenerateCRLResponse
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
(*proto.CRLEntry)(nil), // 8: core.CRLEntry
var file_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ca_proto_goTypes = []interface{}{
(*IssueCertificateRequest)(nil), // 0: ca.IssueCertificateRequest
(*IssuePrecertificateResponse)(nil), // 1: ca.IssuePrecertificateResponse
(*IssueCertificateForPrecertificateRequest)(nil), // 2: ca.IssueCertificateForPrecertificateRequest
(*GenerateOCSPRequest)(nil), // 3: ca.GenerateOCSPRequest
(*OCSPResponse)(nil), // 4: ca.OCSPResponse
(*GenerateCRLRequest)(nil), // 5: ca.GenerateCRLRequest
(*CRLMetadata)(nil), // 6: ca.CRLMetadata
(*GenerateCRLResponse)(nil), // 7: ca.GenerateCRLResponse
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
(*proto.CRLEntry)(nil), // 9: core.CRLEntry
(*proto.Certificate)(nil), // 10: core.Certificate
}
var file_ca_proto_depIdxs = []int32{
7, // 0: ca.GenerateOCSPRequest.revokedAt:type_name -> google.protobuf.Timestamp
5, // 1: ca.GenerateCRLRequest.metadata:type_name -> ca.CRLMetadata
8, // 2: ca.GenerateCRLRequest.entry:type_name -> core.CRLEntry
7, // 3: ca.CRLMetadata.thisUpdate:type_name -> google.protobuf.Timestamp
0, // 4: ca.CertificateAuthority.IssueCertificate:input_type -> ca.IssueCertificateRequest
2, // 5: ca.OCSPGenerator.GenerateOCSP:input_type -> ca.GenerateOCSPRequest
4, // 6: ca.CRLGenerator.GenerateCRL:input_type -> ca.GenerateCRLRequest
1, // 7: ca.CertificateAuthority.IssueCertificate:output_type -> ca.IssueCertificateResponse
3, // 8: ca.OCSPGenerator.GenerateOCSP:output_type -> ca.OCSPResponse
6, // 9: ca.CRLGenerator.GenerateCRL:output_type -> ca.GenerateCRLResponse
7, // [7:10] is the sub-list for method output_type
4, // [4:7] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
8, // 0: ca.GenerateOCSPRequest.revokedAt:type_name -> google.protobuf.Timestamp
6, // 1: ca.GenerateCRLRequest.metadata:type_name -> ca.CRLMetadata
9, // 2: ca.GenerateCRLRequest.entry:type_name -> core.CRLEntry
8, // 3: ca.CRLMetadata.thisUpdate:type_name -> google.protobuf.Timestamp
0, // 4: ca.CertificateAuthority.IssuePrecertificate:input_type -> ca.IssueCertificateRequest
2, // 5: ca.CertificateAuthority.IssueCertificateForPrecertificate:input_type -> ca.IssueCertificateForPrecertificateRequest
3, // 6: ca.OCSPGenerator.GenerateOCSP:input_type -> ca.GenerateOCSPRequest
5, // 7: ca.CRLGenerator.GenerateCRL:input_type -> ca.GenerateCRLRequest
1, // 8: ca.CertificateAuthority.IssuePrecertificate:output_type -> ca.IssuePrecertificateResponse
10, // 9: ca.CertificateAuthority.IssueCertificateForPrecertificate:output_type -> core.Certificate
4, // 10: ca.OCSPGenerator.GenerateOCSP:output_type -> ca.OCSPResponse
7, // 11: ca.CRLGenerator.GenerateCRL:output_type -> ca.GenerateCRLResponse
8, // [8:12] is the sub-list for method output_type
4, // [4:8] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ca_proto_init() }
@ -573,7 +723,105 @@ func file_ca_proto_init() {
if File_ca_proto != nil {
return
}
file_ca_proto_msgTypes[4].OneofWrappers = []any{
if !protoimpl.UnsafeEnabled {
file_ca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IssueCertificateRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IssuePrecertificateResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IssueCertificateForPrecertificateRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GenerateOCSPRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OCSPResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GenerateCRLRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CRLMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_ca_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GenerateCRLResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_ca_proto_msgTypes[5].OneofWrappers = []interface{}{
(*GenerateCRLRequest_Metadata)(nil),
(*GenerateCRLRequest_Entry)(nil),
}
@ -581,9 +829,9 @@ func file_ca_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc)),
RawDescriptor: file_ca_proto_rawDesc,
NumEnums: 0,
NumMessages: 7,
NumMessages: 8,
NumExtensions: 0,
NumServices: 3,
},
@ -592,6 +840,7 @@ func file_ca_proto_init() {
MessageInfos: file_ca_proto_msgTypes,
}.Build()
File_ca_proto = out.File
file_ca_proto_rawDesc = nil
file_ca_proto_goTypes = nil
file_ca_proto_depIdxs = nil
}

View File

@ -8,8 +8,8 @@ import "google/protobuf/timestamp.proto";
// CertificateAuthority issues certificates.
service CertificateAuthority {
// IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that.
rpc IssueCertificate(IssueCertificateRequest) returns (IssueCertificateResponse) {}
rpc IssuePrecertificate(IssueCertificateRequest) returns (IssuePrecertificateResponse) {}
rpc IssueCertificateForPrecertificate(IssueCertificateForPrecertificateRequest) returns (core.Certificate) {}
}
message IssueCertificateRequest {
@ -26,8 +26,32 @@ message IssueCertificateRequest {
string certProfileName = 5;
}
message IssueCertificateResponse {
message IssuePrecertificateResponse {
// Next unused field number: 4
bytes DER = 1;
// certProfileHash is a hash over the exported fields of a certificate profile
// to ensure that the profile remains unchanged after multiple roundtrips
// through the RA and CA.
bytes certProfileHash = 2;
// certProfileName is a human readable name returned back to the RA for later
// use. If IssueCertificateRequest.certProfileName was an empty string, the
// CAs default profile name will be assigned.
string certProfileName = 3;
}
message IssueCertificateForPrecertificateRequest {
// Next unused field number: 6
bytes DER = 1;
repeated bytes SCTs = 2;
int64 registrationID = 3;
int64 orderID = 4;
// certProfileHash is a hash over the exported fields of a certificate profile
// to ensure that the profile remains unchanged after multiple roundtrips
// through the RA and CA.
bytes certProfileHash = 5;
}
// OCSPGenerator generates OCSP. We separate this out from

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: ca.proto
@ -8,6 +8,7 @@ package proto
import (
context "context"
proto "github.com/letsencrypt/boulder/core/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
@ -19,17 +20,16 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
CertificateAuthority_IssueCertificate_FullMethodName = "/ca.CertificateAuthority/IssueCertificate"
CertificateAuthority_IssuePrecertificate_FullMethodName = "/ca.CertificateAuthority/IssuePrecertificate"
CertificateAuthority_IssueCertificateForPrecertificate_FullMethodName = "/ca.CertificateAuthority/IssueCertificateForPrecertificate"
)
// CertificateAuthorityClient is the client API for CertificateAuthority service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// CertificateAuthority issues certificates.
type CertificateAuthorityClient interface {
// IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that.
IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error)
IssuePrecertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssuePrecertificateResponse, error)
IssueCertificateForPrecertificate(ctx context.Context, in *IssueCertificateForPrecertificateRequest, opts ...grpc.CallOption) (*proto.Certificate, error)
}
type certificateAuthorityClient struct {
@ -40,10 +40,20 @@ func NewCertificateAuthorityClient(cc grpc.ClientConnInterface) CertificateAutho
return &certificateAuthorityClient{cc}
}
func (c *certificateAuthorityClient) IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error) {
func (c *certificateAuthorityClient) IssuePrecertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssuePrecertificateResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(IssueCertificateResponse)
err := c.cc.Invoke(ctx, CertificateAuthority_IssueCertificate_FullMethodName, in, out, cOpts...)
out := new(IssuePrecertificateResponse)
err := c.cc.Invoke(ctx, CertificateAuthority_IssuePrecertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *certificateAuthorityClient) IssueCertificateForPrecertificate(ctx context.Context, in *IssueCertificateForPrecertificateRequest, opts ...grpc.CallOption) (*proto.Certificate, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(proto.Certificate)
err := c.cc.Invoke(ctx, CertificateAuthority_IssueCertificateForPrecertificate_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -52,27 +62,24 @@ func (c *certificateAuthorityClient) IssueCertificate(ctx context.Context, in *I
// CertificateAuthorityServer is the server API for CertificateAuthority service.
// All implementations must embed UnimplementedCertificateAuthorityServer
// for forward compatibility.
//
// CertificateAuthority issues certificates.
// for forward compatibility
type CertificateAuthorityServer interface {
// IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that.
IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error)
IssuePrecertificate(context.Context, *IssueCertificateRequest) (*IssuePrecertificateResponse, error)
IssueCertificateForPrecertificate(context.Context, *IssueCertificateForPrecertificateRequest) (*proto.Certificate, error)
mustEmbedUnimplementedCertificateAuthorityServer()
}
// UnimplementedCertificateAuthorityServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCertificateAuthorityServer struct{}
// UnimplementedCertificateAuthorityServer must be embedded to have forward compatible implementations.
type UnimplementedCertificateAuthorityServer struct {
}
func (UnimplementedCertificateAuthorityServer) IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method IssueCertificate not implemented")
func (UnimplementedCertificateAuthorityServer) IssuePrecertificate(context.Context, *IssueCertificateRequest) (*IssuePrecertificateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method IssuePrecertificate not implemented")
}
func (UnimplementedCertificateAuthorityServer) IssueCertificateForPrecertificate(context.Context, *IssueCertificateForPrecertificateRequest) (*proto.Certificate, error) {
return nil, status.Errorf(codes.Unimplemented, "method IssueCertificateForPrecertificate not implemented")
}
func (UnimplementedCertificateAuthorityServer) mustEmbedUnimplementedCertificateAuthorityServer() {}
func (UnimplementedCertificateAuthorityServer) testEmbeddedByValue() {}
// UnsafeCertificateAuthorityServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CertificateAuthorityServer will
@ -82,30 +89,41 @@ type UnsafeCertificateAuthorityServer interface {
}
func RegisterCertificateAuthorityServer(s grpc.ServiceRegistrar, srv CertificateAuthorityServer) {
// If the following call pancis, it indicates UnimplementedCertificateAuthorityServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CertificateAuthority_ServiceDesc, srv)
}
func _CertificateAuthority_IssueCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
func _CertificateAuthority_IssuePrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(IssueCertificateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CertificateAuthorityServer).IssueCertificate(ctx, in)
return srv.(CertificateAuthorityServer).IssuePrecertificate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CertificateAuthority_IssueCertificate_FullMethodName,
FullMethod: CertificateAuthority_IssuePrecertificate_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CertificateAuthorityServer).IssueCertificate(ctx, req.(*IssueCertificateRequest))
return srv.(CertificateAuthorityServer).IssuePrecertificate(ctx, req.(*IssueCertificateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CertificateAuthority_IssueCertificateForPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(IssueCertificateForPrecertificateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CertificateAuthorityServer).IssueCertificateForPrecertificate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CertificateAuthority_IssueCertificateForPrecertificate_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CertificateAuthorityServer).IssueCertificateForPrecertificate(ctx, req.(*IssueCertificateForPrecertificateRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -118,8 +136,12 @@ var CertificateAuthority_ServiceDesc = grpc.ServiceDesc{
HandlerType: (*CertificateAuthorityServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "IssueCertificate",
Handler: _CertificateAuthority_IssueCertificate_Handler,
MethodName: "IssuePrecertificate",
Handler: _CertificateAuthority_IssuePrecertificate_Handler,
},
{
MethodName: "IssueCertificateForPrecertificate",
Handler: _CertificateAuthority_IssueCertificateForPrecertificate_Handler,
},
},
Streams: []grpc.StreamDesc{},
@ -133,11 +155,6 @@ const (
// OCSPGeneratorClient is the client API for OCSPGenerator service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// OCSPGenerator generates OCSP. We separate this out from
// CertificateAuthority so that we can restrict access to a different subset of
// hosts, so the hosts that need to request OCSP generation don't need to be
// able to request certificate issuance.
type OCSPGeneratorClient interface {
GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error)
}
@ -162,29 +179,20 @@ func (c *oCSPGeneratorClient) GenerateOCSP(ctx context.Context, in *GenerateOCSP
// OCSPGeneratorServer is the server API for OCSPGenerator service.
// All implementations must embed UnimplementedOCSPGeneratorServer
// for forward compatibility.
//
// OCSPGenerator generates OCSP. We separate this out from
// CertificateAuthority so that we can restrict access to a different subset of
// hosts, so the hosts that need to request OCSP generation don't need to be
// able to request certificate issuance.
// for forward compatibility
type OCSPGeneratorServer interface {
GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error)
mustEmbedUnimplementedOCSPGeneratorServer()
}
// UnimplementedOCSPGeneratorServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedOCSPGeneratorServer struct{}
// UnimplementedOCSPGeneratorServer must be embedded to have forward compatible implementations.
type UnimplementedOCSPGeneratorServer struct {
}
func (UnimplementedOCSPGeneratorServer) GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GenerateOCSP not implemented")
}
func (UnimplementedOCSPGeneratorServer) mustEmbedUnimplementedOCSPGeneratorServer() {}
func (UnimplementedOCSPGeneratorServer) testEmbeddedByValue() {}
// UnsafeOCSPGeneratorServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to OCSPGeneratorServer will
@ -194,13 +202,6 @@ type UnsafeOCSPGeneratorServer interface {
}
func RegisterOCSPGeneratorServer(s grpc.ServiceRegistrar, srv OCSPGeneratorServer) {
// If the following call pancis, it indicates UnimplementedOCSPGeneratorServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&OCSPGenerator_ServiceDesc, srv)
}
@ -245,8 +246,6 @@ const (
// CRLGeneratorClient is the client API for CRLGenerator service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator.
type CRLGeneratorClient interface {
GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse], error)
}
@ -274,26 +273,20 @@ type CRLGenerator_GenerateCRLClient = grpc.BidiStreamingClient[GenerateCRLReques
// CRLGeneratorServer is the server API for CRLGenerator service.
// All implementations must embed UnimplementedCRLGeneratorServer
// for forward compatibility.
//
// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator.
// for forward compatibility
type CRLGeneratorServer interface {
GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error
mustEmbedUnimplementedCRLGeneratorServer()
}
// UnimplementedCRLGeneratorServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCRLGeneratorServer struct{}
// UnimplementedCRLGeneratorServer must be embedded to have forward compatible implementations.
type UnimplementedCRLGeneratorServer struct {
}
func (UnimplementedCRLGeneratorServer) GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error {
return status.Errorf(codes.Unimplemented, "method GenerateCRL not implemented")
}
func (UnimplementedCRLGeneratorServer) mustEmbedUnimplementedCRLGeneratorServer() {}
func (UnimplementedCRLGeneratorServer) testEmbeddedByValue() {}
// UnsafeCRLGeneratorServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CRLGeneratorServer will
@ -303,13 +296,6 @@ type UnsafeCRLGeneratorServer interface {
}
func RegisterCRLGeneratorServer(s grpc.ServiceRegistrar, srv CRLGeneratorServer) {
// If the following call pancis, it indicates UnimplementedCRLGeneratorServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CRLGenerator_ServiceDesc, srv)
}

16
canceled/canceled.go Normal file
View File

@ -0,0 +1,16 @@
package canceled
import (
"context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Is returns true if err is non-nil and is either context.Canceled, or has a
// grpc code of Canceled. This is useful because cancellations propagate through
// gRPC boundaries, and if we choose to treat in-process cancellations a certain
// way, we usually want to treat cross-process cancellations the same way.
func Is(err error) bool {
return err == context.Canceled || status.Code(err) == codes.Canceled
}

22
canceled/canceled_test.go Normal file
View File

@ -0,0 +1,22 @@
package canceled
import (
"context"
"errors"
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestCanceled(t *testing.T) {
if !Is(context.Canceled) {
t.Errorf("Expected context.Canceled to be canceled, but wasn't.")
}
if !Is(status.Errorf(codes.Canceled, "hi")) {
t.Errorf("Expected gRPC cancellation to be cancelled, but wasn't.")
}
if Is(errors.New("hi")) {
t.Errorf("Expected random error to not be cancelled, but was.")
}
}

70
cmd/admin-revoker/main.go Normal file
View File

@ -0,0 +1,70 @@
package notmain
import (
"fmt"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
)
type Config struct {
Revoker struct {
DB cmd.DBConfig
// Similarly, the Revoker needs a TLSConfig to set up its GRPC client
// certs, but doesn't get the TLS field from ServiceConfig, so declares
// its own.
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
Features features.Config
}
Syslog cmd.SyslogConfig
}
func main() {
if len(os.Args) == 1 {
fmt.Println("use `admin -h` to learn how to use the new admin tool")
os.Exit(1)
}
command := os.Args[1]
switch {
case command == "serial-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -serial deadbeef -reason X` instead")
case command == "batched-serial-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -serials-file path -reason X` instead")
case command == "reg-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -reg-id Y -reason X` instead")
case command == "malformed-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -serial deadbeef -reason X -malformed` instead")
case command == "list-reasons":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -h` instead")
case command == "private-key-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -private-key path -reason X` instead")
case command == "private-key-block":
fmt.Println("use `admin -config path/to/cfg.json block-key -private-key path -comment foo` instead")
case command == "incident-table-revoke":
fmt.Println("use `admin -config path/to/cfg.json revoke-cert -incident-table tablename -reason X` instead")
case command == "clear-email":
fmt.Println("use `admin -config path/to/cfg.json update-email -address foo@bar.org -clear` instead")
default:
fmt.Println("use `admin -h` to see a list of flags and subcommands for the new admin tool")
}
}
func init() {
cmd.RegisterCommand("admin-revoker", main, &cmd.ConfigValidator{Config: &Config{}})
}

View File

@ -2,7 +2,6 @@ package main
import (
"context"
"errors"
"fmt"
"github.com/jmhodges/clock"
@ -48,7 +47,7 @@ func newAdmin(configFile string, dryRun bool) (*admin, error) {
return nil, fmt.Errorf("parsing config file: %w", err)
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, "")
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.Admin.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
@ -95,22 +94,3 @@ func newAdmin(configFile string, dryRun bool) (*admin, error) {
log: logger,
}, nil
}
// findActiveInputMethodFlag returns a single key from setInputs with a value of `true`,
// if exactly one exists. Otherwise it returns an error.
func findActiveInputMethodFlag(setInputs map[string]bool) (string, error) {
var activeFlags []string
for flag, isSet := range setInputs {
if isSet {
activeFlags = append(activeFlags, flag)
}
}
if len(activeFlags) == 0 {
return "", errors.New("at least one input method flag must be specified")
} else if len(activeFlags) > 1 {
return "", fmt.Errorf("more than one input method flag specified: %v", activeFlags)
}
return activeFlags[0], nil
}

View File

@ -1,59 +0,0 @@
package main
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func Test_findActiveInputMethodFlag(t *testing.T) {
tests := []struct {
name string
setInputs map[string]bool
expected string
wantErr bool
}{
{
name: "No active flags",
setInputs: map[string]bool{
"-private-key": false,
"-spki-file": false,
"-cert-file": false,
},
expected: "",
wantErr: true,
},
{
name: "Multiple active flags",
setInputs: map[string]bool{
"-private-key": true,
"-spki-file": true,
"-cert-file": false,
},
expected: "",
wantErr: true,
},
{
name: "Single active flag",
setInputs: map[string]bool{
"-private-key": true,
"-spki-file": false,
"-cert-file": false,
},
expected: "-private-key",
wantErr: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result, err := findActiveInputMethodFlag(tc.setInputs)
if tc.wantErr {
test.AssertError(t, err, "findActiveInputMethodFlag() should have errored")
} else {
test.AssertNotError(t, err, "findActiveInputMethodFlag() should not have errored")
test.AssertEquals(t, result, tc.expected)
}
})
}
}

View File

@ -15,6 +15,7 @@ import (
"unicode"
"golang.org/x/crypto/ocsp"
"golang.org/x/exp/maps"
core "github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
@ -42,9 +43,8 @@ type subcommandRevokeCert struct {
incidentTable string
serialsFile string
privKey string
regID int64
regID uint
certFile string
crlShard int64
}
var _ subcommand = (*subcommandRevokeCert)(nil)
@ -59,14 +59,13 @@ func (s *subcommandRevokeCert) Flags(flag *flag.FlagSet) {
flag.StringVar(&s.reasonStr, "reason", "unspecified", "Revocation reason (unspecified, keyCompromise, superseded, cessationOfOperation, or privilegeWithdrawn)")
flag.BoolVar(&s.skipBlock, "skip-block-key", false, "Skip blocking the key, if revoked for keyCompromise - use with extreme caution")
flag.BoolVar(&s.malformed, "malformed", false, "Indicates that the cert cannot be parsed - use with caution")
flag.Int64Var(&s.crlShard, "crl-shard", 0, "For malformed certs, the CRL shard the certificate belongs to")
// Flags specifying the input method for the certificates to be revoked.
flag.StringVar(&s.serial, "serial", "", "Revoke the certificate with this hex serial")
flag.StringVar(&s.incidentTable, "incident-table", "", "Revoke all certificates whose serials are in this table")
flag.StringVar(&s.serialsFile, "serials-file", "", "Revoke all certificates whose hex serials are in this file")
flag.StringVar(&s.privKey, "private-key", "", "Revoke all certificates whose pubkey matches this private key")
flag.Int64Var(&s.regID, "reg-id", 0, "Revoke all certificates issued to this account")
flag.UintVar(&s.regID, "reg-id", 0, "Revoke all certificates issued to this account")
flag.StringVar(&s.certFile, "cert-file", "", "Revoke the single PEM-formatted certificate in this file")
}
@ -110,13 +109,16 @@ func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error {
"-reg-id": s.regID != 0,
"-cert-file": s.certFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
maps.DeleteFunc(setInputs, func(_ string, v bool) bool { return !v })
if len(setInputs) == 0 {
return errors.New("at least one input method flag must be specified")
} else if len(setInputs) > 1 {
return fmt.Errorf("more than one input method flag specified: %v", maps.Keys(setInputs))
}
var serials []string
switch activeFlag {
var err error
switch maps.Keys(setInputs)[0] {
case "-serial":
serials, err = []string{s.serial}, nil
case "-incident-table":
@ -126,7 +128,7 @@ func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error {
case "-private-key":
serials, err = a.serialsFromPrivateKey(ctx, s.privKey)
case "-reg-id":
serials, err = a.serialsFromRegID(ctx, s.regID)
serials, err = a.serialsFromRegID(ctx, int64(s.regID))
case "-cert-file":
serials, err = a.serialsFromCertPEM(ctx, s.certFile)
default:
@ -136,22 +138,12 @@ func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error {
return fmt.Errorf("collecting serials to revoke: %w", err)
}
serials, err = cleanSerials(serials)
if err != nil {
return err
}
if len(serials) == 0 {
return errors.New("no serials to revoke found")
}
a.log.Infof("Found %d certificates to revoke", len(serials))
if s.malformed {
return s.revokeMalformed(ctx, a, serials, reasonCode)
}
err = a.revokeSerials(ctx, serials, reasonCode, s.skipBlock, s.parallelism)
err = a.revokeSerials(ctx, serials, reasonCode, s.malformed, s.skipBlock, s.parallelism)
if err != nil {
return fmt.Errorf("revoking serials: %w", err)
}
@ -159,31 +151,6 @@ func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error {
return nil
}
func (s *subcommandRevokeCert) revokeMalformed(ctx context.Context, a *admin, serials []string, reasonCode revocation.Reason) error {
u, err := user.Current()
if err != nil {
return fmt.Errorf("getting admin username: %w", err)
}
if s.crlShard == 0 {
return errors.New("when revoking malformed certificates, a nonzero CRL shard must be specified")
}
if len(serials) > 1 {
return errors.New("when revoking malformed certificates, only one cert at a time is allowed")
}
_, err = a.rac.AdministrativelyRevokeCertificate(
ctx,
&rapb.AdministrativelyRevokeCertificateRequest{
Serial: serials[0],
Code: int64(reasonCode),
AdminName: u.Username,
SkipBlockKey: s.skipBlock,
Malformed: true,
CrlShard: s.crlShard,
},
)
return err
}
func (a *admin) serialsFromIncidentTable(ctx context.Context, tableName string) ([]string, error) {
stream, err := a.saroc.SerialsForIncident(ctx, &sapb.SerialsForIncidentRequest{IncidentTable: tableName})
if err != nil {
@ -285,9 +252,7 @@ func (a *admin) serialsFromCertPEM(_ context.Context, filename string) ([]string
return []string{core.SerialToString(cert.SerialNumber)}, nil
}
// cleanSerials removes non-alphanumeric characters from the serials and checks
// that all resulting serials are valid (hex encoded, and the correct length).
func cleanSerials(serials []string) ([]string, error) {
func cleanSerial(serial string) (string, error) {
serialStrip := func(r rune) rune {
switch {
case unicode.IsLetter(r):
@ -297,19 +262,14 @@ func cleanSerials(serials []string) ([]string, error) {
}
return rune(-1)
}
var ret []string
for _, s := range serials {
cleaned := strings.Map(serialStrip, s)
if !core.ValidSerial(cleaned) {
return nil, fmt.Errorf("cleaned serial %q is not valid", cleaned)
}
ret = append(ret, cleaned)
strippedSerial := strings.Map(serialStrip, serial)
if !core.ValidSerial(strippedSerial) {
return "", fmt.Errorf("cleaned serial %q is not valid", strippedSerial)
}
return ret, nil
return strippedSerial, nil
}
func (a *admin) revokeSerials(ctx context.Context, serials []string, reason revocation.Reason, skipBlockKey bool, parallelism uint) error {
func (a *admin) revokeSerials(ctx context.Context, serials []string, reason revocation.Reason, malformed bool, skipBlockKey bool, parallelism uint) error {
u, err := user.Current()
if err != nil {
return fmt.Errorf("getting admin username: %w", err)
@ -323,17 +283,19 @@ func (a *admin) revokeSerials(ctx context.Context, serials []string, reason revo
go func() {
defer wg.Done()
for serial := range work {
_, err := a.rac.AdministrativelyRevokeCertificate(
cleanedSerial, err := cleanSerial(serial)
if err != nil {
a.log.Errf("skipping serial %q: %s", serial, err)
continue
}
_, err = a.rac.AdministrativelyRevokeCertificate(
ctx,
&rapb.AdministrativelyRevokeCertificateRequest{
Serial: serial,
Serial: cleanedSerial,
Code: int64(reason),
AdminName: u.Username,
SkipBlockKey: skipBlockKey,
// This is a well-formed certificate so send CrlShard 0
// to let the RA figure out the right shard from the cert.
Malformed: false,
CrlShard: 0,
Malformed: malformed,
},
)
if err != nil {

View File

@ -10,7 +10,6 @@ import (
"errors"
"os"
"path"
"reflect"
"slices"
"strings"
"sync"
@ -199,20 +198,20 @@ func (mra *mockRARecordingRevocations) reset() {
func TestRevokeSerials(t *testing.T) {
t.Parallel()
serials := []string{
"2a18592b7f4bf596fb1a1df135567acd825a",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
"048c3f6388afb7695dd4d6bbe3d264f1e5e5",
"2a:18:59:2b:7f:4b:f5:96:fb:1a:1d:f1:35:56:7a:cd:82:5a",
"03:8c:3f:63:88:af:b7:69:5d:d4:d6:bb:e3:d2:64:f1:e4:e2",
"048c3f6388afb7695dd4d6bbe3d264f1e5e5!",
}
mra := mockRARecordingRevocations{}
log := blog.NewMock()
a := admin{rac: &mra, log: log}
assertRequestsContain := func(reqs []*rapb.AdministrativelyRevokeCertificateRequest, code revocation.Reason, skipBlockKey bool) {
t.Helper()
assertRequestsContain := func(reqs []*rapb.AdministrativelyRevokeCertificateRequest, code revocation.Reason, skipBlockKey bool, malformed bool) {
for _, req := range reqs {
test.AssertEquals(t, len(req.Cert), 0)
test.AssertEquals(t, req.Code, int64(code))
test.AssertEquals(t, req.SkipBlockKey, skipBlockKey)
test.AssertEquals(t, req.Malformed, malformed)
}
}
@ -220,113 +219,49 @@ func TestRevokeSerials(t *testing.T) {
mra.reset()
log.Clear()
a.dryRun = false
err := a.revokeSerials(context.Background(), serials, 0, false, 1)
err := a.revokeSerials(context.Background(), serials, 0, false, false, 1)
test.AssertEquals(t, len(log.GetAllMatching("invalid serial format")), 0)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAll()), 0)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
assertRequestsContain(mra.revocationRequests, 0, false, false)
// Revoking an already-revoked serial should result in one log line.
mra.reset()
log.Clear()
mra.alreadyRevoked = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
t.Logf("error: %s", err)
t.Logf("logs: %s", strings.Join(log.GetAll(), ""))
err = a.revokeSerials(context.Background(), serials, 0, false, false, 1)
test.AssertError(t, err, "already-revoked should result in error")
test.AssertEquals(t, len(log.GetAllMatching("not revoking")), 1)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
assertRequestsContain(mra.revocationRequests, 0, false, false)
// Revoking a doomed-to-fail serial should also result in one log line.
mra.reset()
log.Clear()
mra.doomedToFail = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
err = a.revokeSerials(context.Background(), serials, 0, false, false, 1)
test.AssertError(t, err, "gRPC error should result in error")
test.AssertEquals(t, len(log.GetAllMatching("failed to revoke")), 1)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
assertRequestsContain(mra.revocationRequests, 0, false, false)
// Revoking with other parameters should get carried through.
mra.reset()
log.Clear()
err = a.revokeSerials(context.Background(), serials, 1, true, 3)
err = a.revokeSerials(context.Background(), serials, 1, true, true, 3)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 1, true)
assertRequestsContain(mra.revocationRequests, 1, true, true)
// Revoking in dry-run mode should result in no gRPC requests and three logs.
mra.reset()
log.Clear()
a.dryRun = true
a.rac = dryRunRAC{log: log}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
err = a.revokeSerials(context.Background(), serials, 0, false, false, 1)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAllMatching("dry-run:")), 3)
test.AssertEquals(t, len(mra.revocationRequests), 0)
assertRequestsContain(mra.revocationRequests, 0, false)
}
func TestRevokeMalformed(t *testing.T) {
t.Parallel()
mra := mockRARecordingRevocations{}
log := blog.NewMock()
a := &admin{
rac: &mra,
log: log,
dryRun: false,
}
s := subcommandRevokeCert{
crlShard: 623,
}
serial := "0379c3dfdd518be45948f2dbfa6ea3e9b209"
err := s.revokeMalformed(context.Background(), a, []string{serial}, 1)
if err != nil {
t.Errorf("revokedMalformed with crlShard 623: want success, got %s", err)
}
if len(mra.revocationRequests) != 1 {
t.Errorf("revokeMalformed: want 1 revocation request to SA, got %v", mra.revocationRequests)
}
if mra.revocationRequests[0].Serial != serial {
t.Errorf("revokeMalformed: want %s to be revoked, got %s", serial, mra.revocationRequests[0])
}
s = subcommandRevokeCert{
crlShard: 0,
}
err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2"}, 1)
if err == nil {
t.Errorf("revokedMalformed with crlShard 0: want error, got none")
}
s = subcommandRevokeCert{
crlShard: 623,
}
err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2", "28a94f966eae14e525777188512ddf5a0a3b"}, 1)
if err == nil {
t.Errorf("revokedMalformed with multiple serials: want error, got none")
}
}
func TestCleanSerials(t *testing.T) {
input := []string{
"2a:18:59:2b:7f:4b:f5:96:fb:1a:1d:f1:35:56:7a:cd:82:5a",
"03:8c:3f:63:88:af:b7:69:5d:d4:d6:bb:e3:d2:64:f1:e4:e2",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
}
expected := []string{
"2a18592b7f4bf596fb1a1df135567acd825a",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
}
output, err := cleanSerials(input)
if err != nil {
t.Errorf("cleanSerials(%s): %s, want %s", input, err, expected)
}
if !reflect.DeepEqual(output, expected) {
t.Errorf("cleanSerials(%s)=%s, want %s", input, output, expected)
}
assertRequestsContain(mra.revocationRequests, 0, false, false)
}

View File

@ -32,6 +32,10 @@ type dryRunSAC struct {
}
func (d dryRunSAC) AddBlockedKey(_ context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
d.log.Infof("dry-run: Block SPKI hash %x by %s %s", req.KeyHash, req.Comment, req.Source)
b, err := prototext.Marshal(req)
if err != nil {
return nil, err
}
d.log.Infof("dry-run: %#v", string(b))
return &emptypb.Empty{}, nil
}

84
cmd/admin/email.go Normal file
View File

@ -0,0 +1,84 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"github.com/letsencrypt/boulder/sa"
)
// subcommandUpdateEmail encapsulates the "admin update-email" command.
//
// Note that this command may be very slow, as the initial query to find the set
// of accounts which have a matching contact email address does not use a
// database index. Therefore, when updating the found accounts, it does not exit
// on failure, preferring to continue and make as much progress as possible.
type subcommandUpdateEmail struct {
address string
clear bool
}
var _ subcommand = (*subcommandUpdateEmail)(nil)
func (s *subcommandUpdateEmail) Desc() string {
return "Change or remove an email address across all accounts"
}
func (s *subcommandUpdateEmail) Flags(flag *flag.FlagSet) {
flag.StringVar(&s.address, "address", "", "Email address to update")
flag.BoolVar(&s.clear, "clear", false, "If set, remove the address")
}
func (s *subcommandUpdateEmail) Run(ctx context.Context, a *admin) error {
if s.address == "" {
return errors.New("the -address flag is required")
}
if s.clear {
return a.clearEmail(ctx, s.address)
}
return errors.New("no action to perform on the given email was specified")
}
func (a *admin) clearEmail(ctx context.Context, address string) error {
a.log.AuditInfof("Scanning database for accounts with email addresses matching %q in order to clear the email addresses.", address)
// We use SQL `CONCAT` rather than interpolating with `+` or `%s` because we want to
// use a `?` placeholder for the email, which prevents SQL injection.
// Since this uses a substring match, it is important
// to subsequently parse the JSON list of addresses and look for exact matches.
// Because this does not use an index, it is very slow.
var regIDs []int64
_, err := a.dbMap.Select(ctx, &regIDs, "SELECT id FROM registrations WHERE contact LIKE CONCAT('%\"mailto:', ?, '\"%')", address)
if err != nil {
return fmt.Errorf("identifying matching accounts: %w", err)
}
a.log.Infof("Found %d registration IDs matching email %q.", len(regIDs), address)
failures := 0
for _, regID := range regIDs {
if a.dryRun {
a.log.Infof("dry-run: remove %q from account %d", address, regID)
continue
}
err := sa.ClearEmail(ctx, a.dbMap, regID, address)
if err != nil {
// Log, but don't fail, because it took a long time to find the relevant registration IDs
// and we don't want to have to redo that work.
a.log.AuditErrf("failed to clear email %q for registration ID %d: %s", address, regID, err)
failures++
} else {
a.log.AuditInfof("cleared email %q for registration ID %d", address, regID)
}
}
if failures > 0 {
return fmt.Errorf("failed to clear email for %d out of %d registration IDs", failures, len(regIDs))
}
return nil
}

View File

@ -15,6 +15,7 @@ import (
"sync"
"sync/atomic"
"golang.org/x/exp/maps"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
@ -68,13 +69,16 @@ func (s *subcommandBlockKey) Run(ctx context.Context, a *admin) error {
"-cert-file": s.certFile != "",
"-csr-file": s.csrFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
maps.DeleteFunc(setInputs, func(_ string, v bool) bool { return !v })
if len(setInputs) == 0 {
return errors.New("at least one input method flag must be specified")
} else if len(setInputs) > 1 {
return fmt.Errorf("more than one input method flag specified: %v", maps.Keys(setInputs))
}
var spkiHashes [][]byte
switch activeFlag {
var err error
switch maps.Keys(setInputs)[0] {
case "-private-key":
var spkiHash []byte
spkiHash, err = a.spkiHashFromPrivateKey(s.privKey)

View File

@ -178,6 +178,6 @@ func TestBlockSPKIHash(t *testing.T) {
err = a.blockSPKIHash(context.Background(), keyHash[:], u, "")
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAllMatching("Found 0 unexpired certificates")), 1)
test.AssertEquals(t, len(log.GetAllMatching("dry-run: Block SPKI hash "+hex.EncodeToString(keyHash[:]))), 1)
test.AssertEquals(t, len(log.GetAllMatching("dry-run:")), 1)
test.AssertEquals(t, len(msa.blockRequests), 0)
}

View File

@ -31,6 +31,8 @@ type Config struct {
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
DebugAddr string
Features features.Config
}
@ -70,6 +72,7 @@ func main() {
subcommands := map[string]subcommand{
"revoke-cert": &subcommandRevokeCert{},
"block-key": &subcommandBlockKey{},
"update-email": &subcommandUpdateEmail{},
"pause-identifier": &subcommandPauseIdentifier{},
"unpause-account": &subcommandUnpauseAccount{},
}

View File

@ -39,12 +39,12 @@ func (p *subcommandPauseIdentifier) Run(ctx context.Context, a *admin) error {
return errors.New("the -batch-file flag is required")
}
idents, err := a.readPausedAccountFile(p.batchFile)
identifiers, err := a.readPausedAccountFile(p.batchFile)
if err != nil {
return err
}
_, err = a.pauseIdentifiers(ctx, idents, p.parallelism)
_, err = a.pauseIdentifiers(ctx, identifiers, p.parallelism)
if err != nil {
return err
}
@ -60,19 +60,19 @@ func (a *admin) pauseIdentifiers(ctx context.Context, entries []pauseCSVData, pa
return nil, errors.New("cannot pause identifiers because no pauseData was sent")
}
accountToIdents := make(map[int64][]*corepb.Identifier)
accountToIdentifiers := make(map[int64][]*corepb.Identifier)
for _, entry := range entries {
accountToIdents[entry.accountID] = append(accountToIdents[entry.accountID], &corepb.Identifier{
accountToIdentifiers[entry.accountID] = append(accountToIdentifiers[entry.accountID], &corepb.Identifier{
Type: string(entry.identifierType),
Value: entry.identifierValue,
})
}
var errCount atomic.Uint64
respChan := make(chan *sapb.PauseIdentifiersResponse, len(accountToIdents))
respChan := make(chan *sapb.PauseIdentifiersResponse, len(accountToIdentifiers))
work := make(chan struct {
accountID int64
idents []*corepb.Identifier
accountID int64
identifiers []*corepb.Identifier
}, parallelism)
var wg sync.WaitGroup
@ -83,11 +83,11 @@ func (a *admin) pauseIdentifiers(ctx context.Context, entries []pauseCSVData, pa
for data := range work {
response, err := a.sac.PauseIdentifiers(ctx, &sapb.PauseRequest{
RegistrationID: data.accountID,
Identifiers: data.idents,
Identifiers: data.identifiers,
})
if err != nil {
errCount.Add(1)
a.log.Errf("error pausing identifier(s) %q for account %d: %v", data.idents, data.accountID, err)
a.log.Errf("error pausing identifier(s) %q for account %d: %v", data.identifiers, data.accountID, err)
} else {
respChan <- response
}
@ -95,11 +95,11 @@ func (a *admin) pauseIdentifiers(ctx context.Context, entries []pauseCSVData, pa
}()
}
for accountID, idents := range accountToIdents {
for accountID, identifiers := range accountToIdentifiers {
work <- struct {
accountID int64
idents []*corepb.Identifier
}{accountID, idents}
accountID int64
identifiers []*corepb.Identifier
}{accountID, identifiers}
}
close(work)
wg.Wait()

View File

@ -14,6 +14,7 @@ import (
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
"golang.org/x/exp/maps"
)
// subcommandUnpauseAccount encapsulates the "admin unpause-account" command.
@ -43,13 +44,16 @@ func (u *subcommandUnpauseAccount) Run(ctx context.Context, a *admin) error {
"-account": u.accountID != 0,
"-batch-file": u.batchFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
maps.DeleteFunc(setInputs, func(_ string, v bool) bool { return !v })
if len(setInputs) == 0 {
return errors.New("at least one input method flag must be specified")
} else if len(setInputs) > 1 {
return fmt.Errorf("more than one input method flag specified: %v", maps.Keys(setInputs))
}
var regIDs []int64
switch activeFlag {
var err error
switch maps.Keys(setInputs)[0] {
case "-account":
regIDs = []int64{u.accountID}
case "-batch-file":

View File

@ -1,10 +1,15 @@
package notmain
import (
"bytes"
"context"
"crypto/x509"
"flag"
"fmt"
"html/template"
netmail "net/mail"
"os"
"strings"
"time"
"github.com/jmhodges/clock"
@ -19,6 +24,7 @@ import (
"github.com/letsencrypt/boulder/db"
bgrpc "github.com/letsencrypt/boulder/grpc"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/mail"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/sa"
)
@ -37,6 +43,10 @@ var certsRevoked = prometheus.NewCounter(prometheus.CounterOpts{
Name: "bad_keys_certs_revoked",
Help: "A counter of certificates associated with rows in blockedKeys that have been revoked",
})
var mailErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "bad_keys_mail_errors",
Help: "A counter of email send errors",
})
// revoker is an interface used to reduce the scope of a RA gRPC client
// to only the single method we need to use, this makes testing significantly
@ -50,6 +60,9 @@ type badKeyRevoker struct {
maxRevocations int
serialBatchSize int
raClient revoker
mailer mail.Mailer
emailSubject string
emailTemplate *template.Template
logger blog.Logger
clk clock.Clock
backoffIntervalBase time.Duration
@ -177,27 +190,109 @@ func (bkr *badKeyRevoker) markRowChecked(ctx context.Context, unchecked unchecke
return err
}
// revokeCerts revokes all the provided certificates. It uses reason
// keyCompromise and includes note indicating that they were revoked by
// bad-key-revoker.
func (bkr *badKeyRevoker) revokeCerts(certs []unrevokedCertificate) error {
for _, cert := range certs {
_, err := bkr.raClient.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{
Cert: cert.DER,
Serial: cert.Serial,
Code: int64(ocsp.KeyCompromise),
AdminName: "bad-key-revoker",
})
if err != nil {
return err
// resolveContacts builds a map of id -> email addresses
func (bkr *badKeyRevoker) resolveContacts(ctx context.Context, ids []int64) (map[int64][]string, error) {
idToEmail := map[int64][]string{}
for _, id := range ids {
var emails struct {
Contact []string
}
certsRevoked.Inc()
err := bkr.dbMap.SelectOne(ctx, &emails, "SELECT contact FROM registrations WHERE id = ?", id)
if err != nil {
// ErrNoRows is not acceptable here since there should always be a
// row for the registration, even if there are no contacts
return nil, err
}
if len(emails.Contact) != 0 {
for _, email := range emails.Contact {
idToEmail[id] = append(idToEmail[id], strings.TrimPrefix(email, "mailto:"))
}
} else {
// if the account has no contacts add a placeholder empty contact
// so that we don't skip any certificates
idToEmail[id] = append(idToEmail[id], "")
continue
}
}
return idToEmail, nil
}
var maxSerials = 100
// sendMessage sends a single email to the provided address with the revoked
// serials
func (bkr *badKeyRevoker) sendMessage(addr string, serials []string) error {
conn, err := bkr.mailer.Connect()
if err != nil {
return err
}
defer func() {
_ = conn.Close()
}()
mutSerials := make([]string, len(serials))
copy(mutSerials, serials)
if len(mutSerials) > maxSerials {
more := len(mutSerials) - maxSerials
mutSerials = mutSerials[:maxSerials]
mutSerials = append(mutSerials, fmt.Sprintf("and %d more certificates.", more))
}
message := bytes.NewBuffer(nil)
err = bkr.emailTemplate.Execute(message, mutSerials)
if err != nil {
return err
}
err = conn.SendMail([]string{addr}, bkr.emailSubject, message.String())
if err != nil {
return err
}
return nil
}
// invoke exits early and returns true if there is no work to be done.
// Otherwise, it processes a single key in the blockedKeys table and returns false.
// revokeCerts revokes all the certificates associated with a particular key hash and sends
// emails to the users that issued the certificates. Emails are not sent to the user which
// requested revocation of the original certificate which marked the key as compromised.
func (bkr *badKeyRevoker) revokeCerts(revokerEmails []string, emailToCerts map[string][]unrevokedCertificate) error {
revokerEmailsMap := map[string]bool{}
for _, email := range revokerEmails {
revokerEmailsMap[email] = true
}
alreadyRevoked := map[int]bool{}
for email, certs := range emailToCerts {
var revokedSerials []string
for _, cert := range certs {
revokedSerials = append(revokedSerials, cert.Serial)
if alreadyRevoked[cert.ID] {
continue
}
_, err := bkr.raClient.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{
Cert: cert.DER,
Serial: cert.Serial,
Code: int64(ocsp.KeyCompromise),
AdminName: "bad-key-revoker",
})
if err != nil {
return err
}
certsRevoked.Inc()
alreadyRevoked[cert.ID] = true
}
// don't send emails to the person who revoked the certificate
if revokerEmailsMap[email] || email == "" {
continue
}
err := bkr.sendMessage(email, revokedSerials)
if err != nil {
mailErrors.Inc()
bkr.logger.Errf("failed to send message to %q: %s", email, err)
continue
}
}
return nil
}
// invoke processes a single key in the blockedKeys table and returns whether
// there were any rows to process or not.
func (bkr *badKeyRevoker) invoke(ctx context.Context) (bool, error) {
// Gather a count of rows to be processed.
uncheckedCount, err := bkr.countUncheckedKeys(ctx)
@ -242,14 +337,45 @@ func (bkr *badKeyRevoker) invoke(ctx context.Context) (bool, error) {
return false, nil
}
var serials []string
// build a map of registration ID -> certificates, and collect a
// list of unique registration IDs
ownedBy := map[int64][]unrevokedCertificate{}
var ids []int64
for _, cert := range unrevokedCerts {
serials = append(serials, cert.Serial)
if ownedBy[cert.RegistrationID] == nil {
ids = append(ids, cert.RegistrationID)
}
ownedBy[cert.RegistrationID] = append(ownedBy[cert.RegistrationID], cert)
}
// if the account that revoked the original certificate isn't an owner of any
// extant certificates, still add them to ids so that we can resolve their
// email and avoid sending emails later. If RevokedBy == 0 it was a row
// inserted by admin-revoker with a dummy ID, since there won't be a registration
// to look up, don't bother adding it to ids.
if _, present := ownedBy[unchecked.RevokedBy]; !present && unchecked.RevokedBy != 0 {
ids = append(ids, unchecked.RevokedBy)
}
// get contact addresses for the list of IDs
idToEmails, err := bkr.resolveContacts(ctx, ids)
if err != nil {
return false, err
}
bkr.logger.AuditInfo(fmt.Sprintf("revoking serials %v for key with hash %x", serials, unchecked.KeyHash))
// revoke each certificate
err = bkr.revokeCerts(unrevokedCerts)
// build a map of email -> certificates, this de-duplicates accounts with
// the same email addresses
emailsToCerts := map[string][]unrevokedCertificate{}
for id, emails := range idToEmails {
for _, email := range emails {
emailsToCerts[email] = append(emailsToCerts[email], ownedBy[id]...)
}
}
revokerEmails := idToEmails[unchecked.RevokedBy]
bkr.logger.AuditInfo(fmt.Sprintf("revoking certs. revoked emails=%v, emailsToCerts=%s",
revokerEmails, emailsToCerts))
// revoke each certificate and send emails to their owners
err = bkr.revokeCerts(idToEmails[unchecked.RevokedBy], emailsToCerts)
if err != nil {
return false, err
}
@ -289,14 +415,15 @@ type Config struct {
// or no work to do.
BackoffIntervalMax config.Duration `validate:"-"`
// Deprecated: the bad-key-revoker no longer sends emails; we use ARI.
// TODO(#8199): Remove this config stanza entirely.
Mailer struct {
cmd.SMTPConfig `validate:"-"`
cmd.SMTPConfig
// Path to a file containing a list of trusted root certificates for use
// during the SMTP connection (as opposed to the gRPC connections).
SMTPTrustedRootFile string
From string
EmailSubject string
EmailTemplate string
From string `validate:"required"`
EmailSubject string `validate:"required"`
EmailTemplate string `validate:"required"`
}
}
@ -328,6 +455,7 @@ func main() {
scope.MustRegister(keysProcessed)
scope.MustRegister(certsRevoked)
scope.MustRegister(mailErrors)
dbMap, err := sa.InitWrappedDb(config.BadKeyRevoker.DB, scope, logger)
cmd.FailOnError(err, "While initializing dbMap")
@ -339,11 +467,50 @@ func main() {
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
rac := rapb.NewRegistrationAuthorityClient(conn)
var smtpRoots *x509.CertPool
if config.BadKeyRevoker.Mailer.SMTPTrustedRootFile != "" {
pem, err := os.ReadFile(config.BadKeyRevoker.Mailer.SMTPTrustedRootFile)
cmd.FailOnError(err, "Loading trusted roots file")
smtpRoots = x509.NewCertPool()
if !smtpRoots.AppendCertsFromPEM(pem) {
cmd.FailOnError(nil, "Failed to parse root certs PEM")
}
}
fromAddress, err := netmail.ParseAddress(config.BadKeyRevoker.Mailer.From)
cmd.FailOnError(err, fmt.Sprintf("Could not parse from address: %s", config.BadKeyRevoker.Mailer.From))
smtpPassword, err := config.BadKeyRevoker.Mailer.PasswordConfig.Pass()
cmd.FailOnError(err, "Failed to load SMTP password")
mailClient := mail.New(
config.BadKeyRevoker.Mailer.Server,
config.BadKeyRevoker.Mailer.Port,
config.BadKeyRevoker.Mailer.Username,
smtpPassword,
smtpRoots,
*fromAddress,
logger,
scope,
1*time.Second, // reconnection base backoff
5*60*time.Second, // reconnection maximum backoff
)
if config.BadKeyRevoker.Mailer.EmailSubject == "" {
cmd.Fail("BadKeyRevoker.Mailer.EmailSubject must be populated")
}
templateBytes, err := os.ReadFile(config.BadKeyRevoker.Mailer.EmailTemplate)
cmd.FailOnError(err, fmt.Sprintf("failed to read email template %q: %s", config.BadKeyRevoker.Mailer.EmailTemplate, err))
emailTemplate, err := template.New("email").Parse(string(templateBytes))
cmd.FailOnError(err, fmt.Sprintf("failed to parse email template %q: %s", config.BadKeyRevoker.Mailer.EmailTemplate, err))
bkr := &badKeyRevoker{
dbMap: dbMap,
maxRevocations: config.BadKeyRevoker.MaximumRevocations,
serialBatchSize: config.BadKeyRevoker.FindCertificatesBatchSize,
raClient: rac,
mailer: mailClient,
emailSubject: config.BadKeyRevoker.Mailer.EmailSubject,
emailTemplate: emailTemplate,
logger: logger,
clk: clk,
backoffIntervalMax: config.BadKeyRevoker.BackoffIntervalMax.Duration,

View File

@ -4,22 +4,24 @@ import (
"context"
"crypto/rand"
"fmt"
"html/template"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/mocks"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
func randHash(t *testing.T) []byte {
@ -79,17 +81,27 @@ func TestSelectUncheckedRows(t *testing.T) {
test.AssertEquals(t, row.RevokedBy, int64(1))
}
func insertRegistration(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock) int64 {
func insertRegistration(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, addrs ...string) int64 {
t.Helper()
jwkHash := make([]byte, 32)
_, err := rand.Read(jwkHash)
test.AssertNotError(t, err, "failed to read rand")
contactStr := "[]"
if len(addrs) > 0 {
contacts := []string{}
for _, addr := range addrs {
contacts = append(contacts, fmt.Sprintf(`"mailto:%s"`, addr))
}
contactStr = fmt.Sprintf("[%s]", strings.Join(contacts, ","))
}
res, err := dbMap.ExecContext(
context.Background(),
"INSERT INTO registrations (jwk, jwk_sha256, agreement, createdAt, status, LockCol) VALUES (?, ?, ?, ?, ?, ?)",
"INSERT INTO registrations (jwk, jwk_sha256, contact, agreement, initialIP, createdAt, status, LockCol) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
[]byte{},
fmt.Sprintf("%x", jwkHash),
contactStr,
"yes",
[]byte{},
fc.Now(),
string(core.StatusValid),
0,
@ -233,6 +245,47 @@ func TestFindUnrevoked(t *testing.T) {
test.AssertEquals(t, err.Error(), fmt.Sprintf("too many certificates to revoke associated with %x: got 1, max 0", hashA))
}
func TestResolveContacts(t *testing.T) {
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
bkr := &badKeyRevoker{dbMap: dbMap, clk: fc}
regIDA := insertRegistration(t, dbMap, fc)
regIDB := insertRegistration(t, dbMap, fc, "example.com", "example-2.com")
regIDC := insertRegistration(t, dbMap, fc, "example.com")
regIDD := insertRegistration(t, dbMap, fc, "example-2.com")
idToEmail, err := bkr.resolveContacts(context.Background(), []int64{regIDA, regIDB, regIDC, regIDD})
test.AssertNotError(t, err, "resolveContacts failed")
test.AssertDeepEquals(t, idToEmail, map[int64][]string{
regIDA: {""},
regIDB: {"example.com", "example-2.com"},
regIDC: {"example.com"},
regIDD: {"example-2.com"},
})
}
var testTemplate = template.Must(template.New("testing").Parse("{{range .}}{{.}}\n{{end}}"))
func TestSendMessage(t *testing.T) {
mm := &mocks.Mailer{}
fc := clock.NewFake()
bkr := &badKeyRevoker{mailer: mm, emailSubject: "testing", emailTemplate: testTemplate, clk: fc}
maxSerials = 2
err := bkr.sendMessage("example.com", []string{"a", "b", "c"})
test.AssertNotError(t, err, "sendMessages failed")
test.AssertEquals(t, len(mm.Messages), 1)
test.AssertEquals(t, mm.Messages[0].To, "example.com")
test.AssertEquals(t, mm.Messages[0].Subject, bkr.emailSubject)
test.AssertEquals(t, mm.Messages[0].Body, "a\nb\nand 1 more certificates.\n")
}
type mockRevoker struct {
revoked int
mu sync.Mutex
@ -251,15 +304,20 @@ func TestRevokeCerts(t *testing.T) {
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
mm := &mocks.Mailer{}
mr := &mockRevoker{}
bkr := &badKeyRevoker{dbMap: dbMap, raClient: mr, clk: fc}
bkr := &badKeyRevoker{dbMap: dbMap, raClient: mr, mailer: mm, emailSubject: "testing", emailTemplate: testTemplate, clk: fc}
err = bkr.revokeCerts([]unrevokedCertificate{
{ID: 0, Serial: "ff"},
{ID: 1, Serial: "ee"},
err = bkr.revokeCerts([]string{"revoker@example.com", "revoker-b@example.com"}, map[string][]unrevokedCertificate{
"revoker@example.com": {{ID: 0, Serial: "ff"}},
"revoker-b@example.com": {{ID: 0, Serial: "ff"}},
"other@example.com": {{ID: 1, Serial: "ee"}},
})
test.AssertNotError(t, err, "revokeCerts failed")
test.AssertEquals(t, mr.revoked, 2)
test.AssertEquals(t, len(mm.Messages), 1)
test.AssertEquals(t, mm.Messages[0].To, "other@example.com")
test.AssertEquals(t, mm.Messages[0].Subject, bkr.emailSubject)
test.AssertEquals(t, mm.Messages[0].Body, "ee\n")
}
func TestCertificateAbsent(t *testing.T) {
@ -272,7 +330,7 @@ func TestCertificateAbsent(t *testing.T) {
fc := clock.NewFake()
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
regIDA := insertRegistration(t, dbMap, fc, "example.com")
hashA := randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, regIDA, false)
@ -292,6 +350,9 @@ func TestCertificateAbsent(t *testing.T) {
maxRevocations: 1,
serialBatchSize: 1,
raClient: &mockRevoker{},
mailer: &mocks.Mailer{},
emailSubject: "testing",
emailTemplate: testTemplate,
logger: blog.NewMock(),
clk: fc,
}
@ -308,20 +369,24 @@ func TestInvoke(t *testing.T) {
fc := clock.NewFake()
mm := &mocks.Mailer{}
mr := &mockRevoker{}
bkr := &badKeyRevoker{
dbMap: dbMap,
maxRevocations: 10,
serialBatchSize: 1,
raClient: mr,
mailer: mm,
emailSubject: "testing",
emailTemplate: testTemplate,
logger: blog.NewMock(),
clk: fc,
}
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
regIDB := insertRegistration(t, dbMap, fc)
regIDC := insertRegistration(t, dbMap, fc)
regIDA := insertRegistration(t, dbMap, fc, "example.com")
regIDB := insertRegistration(t, dbMap, fc, "example.com")
regIDC := insertRegistration(t, dbMap, fc, "other.example.com", "uno.example.com")
regIDD := insertRegistration(t, dbMap, fc)
hashA := randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, regIDC, false)
@ -334,6 +399,8 @@ func TestInvoke(t *testing.T) {
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, false)
test.AssertEquals(t, mr.revoked, 4)
test.AssertEquals(t, len(mm.Messages), 1)
test.AssertEquals(t, mm.Messages[0].To, "example.com")
test.AssertMetricWithLabelsEquals(t, keysToProcess, prometheus.Labels{}, 1)
var checked struct {
@ -374,19 +441,23 @@ func TestInvokeRevokerHasNoExtantCerts(t *testing.T) {
fc := clock.NewFake()
mm := &mocks.Mailer{}
mr := &mockRevoker{}
bkr := &badKeyRevoker{dbMap: dbMap,
maxRevocations: 10,
serialBatchSize: 1,
raClient: mr,
mailer: mm,
emailSubject: "testing",
emailTemplate: testTemplate,
logger: blog.NewMock(),
clk: fc,
}
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
regIDB := insertRegistration(t, dbMap, fc)
regIDC := insertRegistration(t, dbMap, fc)
regIDA := insertRegistration(t, dbMap, fc, "a@example.com")
regIDB := insertRegistration(t, dbMap, fc, "a@example.com")
regIDC := insertRegistration(t, dbMap, fc, "b@example.com")
hashA := randHash(t)
@ -401,6 +472,8 @@ func TestInvokeRevokerHasNoExtantCerts(t *testing.T) {
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, false)
test.AssertEquals(t, mr.revoked, 4)
test.AssertEquals(t, len(mm.Messages), 1)
test.AssertEquals(t, mm.Messages[0].To, "b@example.com")
}
func TestBackoffPolicy(t *testing.T) {

View File

@ -3,8 +3,8 @@ package notmain
import (
"context"
"flag"
"fmt"
"os"
"reflect"
"strconv"
"time"
@ -19,7 +19,6 @@ import (
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/policy"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
@ -33,26 +32,43 @@ type Config struct {
SAService *cmd.GRPCClientConfig
SCTService *cmd.GRPCClientConfig
// Issuance contains all information necessary to load and initialize issuers.
Issuance struct {
// The name of the certificate profile to use if one wasn't provided
// by the RA during NewOrder and Finalize requests. Must match a
// configured certificate profile or boulder-ca will fail to start.
//
// Deprecated: set the defaultProfileName in the RA config instead.
DefaultCertificateProfileName string `validate:"omitempty,alphanum,min=1,max=32"`
// One of the profile names must match the value of ra.defaultProfileName
// or large amounts of issuance will fail.
// TODO(#7414) Remove this deprecated field.
// Deprecated: Use CertProfiles instead. Profile implicitly takes
// the internal Boulder default value of ca.DefaultCertProfileName.
Profile issuance.ProfileConfig `validate:"required_without=CertProfiles,structonly"`
// One of the profile names must match the value of
// DefaultCertificateProfileName or boulder-ca will fail to start.
CertProfiles map[string]*issuance.ProfileConfig `validate:"dive,keys,alphanum,min=1,max=32,endkeys,required_without=Profile,structonly"`
// TODO(#7159): Make this required once all live configs are using it.
CRLProfile issuance.CRLProfileConfig `validate:"-"`
Issuers []issuance.IssuerConfig `validate:"min=1,dive"`
// LintConfig is a path to a zlint config file.
// Deprecated: Use CertProfiles.LintConfig instead.
LintConfig string
// IgnoredLints is a list of lint names for which any errors should be
// ignored.
// Deprecated: Use CertProfiles.IgnoredLints instead.
IgnoredLints []string
}
// How long issued certificates are valid for.
// Deprecated: Use Issuance.CertProfiles.MaxValidityPeriod instead.
Expiry config.Duration
// How far back certificates should be backdated.
// Deprecated: Use Issuance.CertProfiles.MaxValidityBackdate instead.
Backdate config.Duration
// What digits we should prepend to serials after randomly generating them.
// Deprecated: Use SerialPrefixHex instead.
SerialPrefix int `validate:"required_without=SerialPrefixHex,omitempty,min=1,max=127"`
@ -78,6 +94,12 @@ type Config struct {
// Section 4.9.10, it MUST NOT be more than 10 days. Default 96h.
LifespanOCSP config.Duration
// LifespanCRL is how long CRLs are valid for. It should be longer than the
// `period` field of the CRL Updater. Per the BRs, Section 4.9.7, it MUST
// NOT be more than 10 days.
// Deprecated: Use Config.CA.Issuance.CRLProfile.ValidityInterval instead.
LifespanCRL config.Duration `validate:"-"`
// GoodKey is an embedded config stanza for the goodkey library.
GoodKey goodkey.Config
@ -157,6 +179,15 @@ func main() {
c.CA.LifespanOCSP.Duration = 96 * time.Hour
}
// TODO(#7159): Remove these fallbacks once all live configs are setting the
// CRL validity interval inside the Issuance.CRLProfile Config.
if c.CA.Issuance.CRLProfile.ValidityInterval.Duration == 0 && c.CA.LifespanCRL.Duration != 0 {
c.CA.Issuance.CRLProfile.ValidityInterval = c.CA.LifespanCRL
}
if c.CA.Issuance.CRLProfile.MaxBackdate.Duration == 0 && c.CA.Backdate.Duration != 0 {
c.CA.Issuance.CRLProfile.MaxBackdate = c.CA.Backdate
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
@ -164,9 +195,8 @@ func main() {
metrics := ca.NewCAMetrics(scope)
cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration")
pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger)
pa, err := policy.New(c.PA.Challenges, logger)
cmd.FailOnError(err, "Couldn't create PA")
if c.CA.HostnamePolicyFile == "" {
@ -183,40 +213,48 @@ func main() {
}
clk := cmd.Clock()
var crlShards int
issuers := make([]*issuance.Issuer, 0, len(c.CA.Issuance.Issuers))
for i, issuerConfig := range c.CA.Issuance.Issuers {
for _, issuerConfig := range c.CA.Issuance.Issuers {
issuer, err := issuance.LoadIssuer(issuerConfig, clk)
cmd.FailOnError(err, "Loading issuer")
// All issuers should have the same number of CRL shards, because
// crl-updater assumes they all have the same number.
if issuerConfig.CRLShards != 0 && crlShards == 0 {
crlShards = issuerConfig.CRLShards
}
if issuerConfig.CRLShards != crlShards {
cmd.Fail(fmt.Sprintf("issuer %d has %d shards, want %d", i, issuerConfig.CRLShards, crlShards))
}
issuers = append(issuers, issuer)
logger.Infof("Loaded issuer: name=[%s] keytype=[%s] nameID=[%v] isActive=[%t]", issuer.Name(), issuer.KeyType(), issuer.NameID(), issuer.IsActive())
}
if c.CA.Issuance.DefaultCertificateProfileName == "" {
c.CA.Issuance.DefaultCertificateProfileName = "defaultBoulderCertificateProfile"
}
logger.Infof("Configured default certificate profile name set to: %s", c.CA.Issuance.DefaultCertificateProfileName)
// TODO(#7414) Remove this check.
if !reflect.ValueOf(c.CA.Issuance.Profile).IsZero() && len(c.CA.Issuance.CertProfiles) > 0 {
cmd.Fail("Only one of Issuance.Profile or Issuance.CertProfiles can be configured")
}
// If no individual cert profiles are configured, pretend that the deprecated
// top-level profile as the only individual profile instead.
// TODO(#7414) Remove this fallback.
if len(c.CA.Issuance.CertProfiles) == 0 {
cmd.Fail("At least one profile must be configured")
c.CA.Issuance.CertProfiles = make(map[string]*issuance.ProfileConfig, 0)
c.CA.Issuance.CertProfiles[c.CA.Issuance.DefaultCertificateProfileName] = &c.CA.Issuance.Profile
}
// If any individual cert profile doesn't have its own lint configuration,
// instead copy in the deprecated top-level lint configuration.
// TODO(#7414): Remove this fallback.
for _, prof := range c.CA.Issuance.CertProfiles {
if prof.LintConfig == "" && len(prof.IgnoredLints) == 0 {
prof.LintConfig = c.CA.Issuance.LintConfig
prof.IgnoredLints = c.CA.Issuance.IgnoredLints
}
}
tlsConfig, err := c.CA.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
saConn, err := bgrpc.ClientSetup(c.CA.SAService, tlsConfig, scope, clk)
conn, err := bgrpc.ClientSetup(c.CA.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sa := sapb.NewStorageAuthorityClient(saConn)
var sctService rapb.SCTProviderClient
if c.CA.SCTService != nil {
sctConn, err := bgrpc.ClientSetup(c.CA.SCTService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA for SCTs")
sctService = rapb.NewSCTProviderClient(sctConn)
}
sa := sapb.NewStorageAuthorityClient(conn)
kp, err := sagoodkey.NewPolicy(&c.CA.GoodKey, sa.KeyBlocked)
cmd.FailOnError(err, "Unable to create key policy")
@ -257,9 +295,9 @@ func main() {
if !c.CA.DisableCertService {
cai, err := ca.NewCertificateAuthorityImpl(
sa,
sctService,
pa,
issuers,
c.CA.Issuance.DefaultCertificateProfileName,
c.CA.Issuance.CertProfiles,
serialPrefix,
c.CA.MaxNames,

View File

@ -4,6 +4,7 @@ import (
"context"
"flag"
"os"
"time"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
capb "github.com/letsencrypt/boulder/ca/proto"
@ -24,7 +25,6 @@ import (
"github.com/letsencrypt/boulder/ratelimits"
bredis "github.com/letsencrypt/boulder/redis"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/va"
vapb "github.com/letsencrypt/boulder/va/proto"
)
@ -33,8 +33,7 @@ type Config struct {
cmd.ServiceConfig
cmd.HostnamePolicyConfig
// RateLimitPoliciesFilename is deprecated.
RateLimitPoliciesFilename string
RateLimitPoliciesFilename string `validate:"required"`
MaxContactsPerRegistration int
@ -77,35 +76,26 @@ type Config struct {
// limits are per section 7.1 of our combined CP/CPS, under "DV-SSL
// Subscriber Certificate". The value must match the CA and WFE
// configurations.
//
// Deprecated: Set ValidationProfiles[*].MaxNames instead.
MaxNames int `validate:"omitempty,min=1,max=100"`
MaxNames int `validate:"required,min=1,max=100"`
// ValidationProfiles is a map of validation profiles to their
// respective issuance allow lists. If a profile is not included in this
// mapping, it cannot be used by any account. If this field is left
// empty, all profiles are open to all accounts.
ValidationProfiles map[string]*ra.ValidationProfileConfig `validate:"required"`
// AuthorizationLifetimeDays defines how long authorizations will be
// considered valid for. Given a value of 300 days when used with a 90-day
// cert lifetime, this allows creation of certs that will cover a whole
// year, plus a grace period of a month.
AuthorizationLifetimeDays int `validate:"required,min=1,max=397"`
// DefaultProfileName sets the profile to use if one wasn't provided by the
// client in the new-order request. Must match a configured validation
// profile or the RA will fail to start. Must match a certificate profile
// configured in the CA or finalization will fail for orders using this
// default.
DefaultProfileName string `validate:"required"`
// MustStapleAllowList specified the path to a YAML file containing a
// list of account IDs permitted to request certificates with the OCSP
// Must-Staple extension.
//
// Deprecated: This field no longer has any effect, all Must-Staple requests
// are rejected.
// TODO(#8177): Remove this field.
MustStapleAllowList string `validate:"omitempty"`
// PendingAuthorizationLifetimeDays defines how long authorizations may be in
// the pending state. If you can't respond to a challenge this quickly, then
// you need to request a new challenge.
PendingAuthorizationLifetimeDays int `validate:"required,min=1,max=29"`
// GoodKey is an embedded config stanza for the goodkey library.
GoodKey goodkey.Config
// OrderLifetime is how far in the future an Order's expiration date should
// be set when it is first created.
OrderLifetime config.Duration
// FinalizeTimeout is how long the RA is willing to wait for the Order
// finalization process to take. This config parameter only has an effect
// if the AsyncFinalization feature flag is enabled. Any systems which
@ -123,6 +113,11 @@ type Config struct {
// a `Stagger` value controlling how long we wait for one operator group
// to respond before trying a different one.
CTLogs ctconfig.CTConfig
// InformationalCTLogs are a set of CT logs we will always submit to
// but won't ever use the SCTs from. This may be because we want to
// test them or because they are not yet approved by a browser/root
// program but we still want our certs to end up there.
InformationalCTLogs []ctconfig.LogDescription
// IssuerCerts are paths to all intermediate certificates which may have
// been used to issue certificates in the last 90 days. These are used to
@ -167,9 +162,8 @@ func main() {
// Validate PA config and set defaults if needed
cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration")
pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger)
pa, err := policy.New(c.PA.Challenges, logger)
cmd.FailOnError(err, "Couldn't create PA")
if c.RA.HostnamePolicyFile == "" {
@ -238,22 +232,23 @@ func main() {
ctp = ctpolicy.New(pubc, sctLogs, infoLogs, finalLogs, c.RA.CTLogs.Stagger.Duration, logger, scope)
if len(c.RA.ValidationProfiles) == 0 {
cmd.Fail("At least one profile must be configured")
// Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document,
// or completed validation MUST be obtained no more than 398 days prior
// to issuing the Certificate". If unconfigured or the configured value is
// greater than 397 days, bail out.
if c.RA.AuthorizationLifetimeDays <= 0 || c.RA.AuthorizationLifetimeDays > 397 {
cmd.Fail("authorizationLifetimeDays value must be greater than 0 and less than 398")
}
authorizationLifetime := time.Duration(c.RA.AuthorizationLifetimeDays) * 24 * time.Hour
// TODO(#7993): Remove this fallback and make ValidationProfile.MaxNames a
// required config field. We don't do any validation on the value of this
// top-level MaxNames because that happens inside the call to
// NewValidationProfiles below.
for _, pc := range c.RA.ValidationProfiles {
if pc.MaxNames == 0 {
pc.MaxNames = c.RA.MaxNames
}
// The Baseline Requirements v1.8.1 state that validation tokens "MUST
// NOT be used for more than 30 days from its creation". If unconfigured
// or the configured value pendingAuthorizationLifetimeDays is greater
// than 29 days, bail out.
if c.RA.PendingAuthorizationLifetimeDays <= 0 || c.RA.PendingAuthorizationLifetimeDays > 29 {
cmd.Fail("pendingAuthorizationLifetimeDays value must be greater than 0 and less than 30")
}
validationProfiles, err := ra.NewValidationProfiles(c.RA.DefaultProfileName, c.RA.ValidationProfiles)
cmd.FailOnError(err, "Failed to load validation profiles")
pendingAuthorizationLifetime := time.Duration(c.RA.PendingAuthorizationLifetimeDays) * 24 * time.Hour
if features.Get().AsyncFinalize && c.RA.FinalizeTimeout.Duration == 0 {
cmd.Fail("finalizeTimeout must be supplied when AsyncFinalize feature is enabled")
@ -262,6 +257,10 @@ func main() {
kp, err := sagoodkey.NewPolicy(&c.RA.GoodKey, sac.KeyBlocked)
cmd.FailOnError(err, "Unable to create key policy")
if c.RA.MaxNames == 0 {
cmd.Fail("Error in RA config: MaxNames must not be 0")
}
var limiter *ratelimits.Limiter
var txnBuilder *ratelimits.TransactionBuilder
var limiterRedis *bredis.Ring
@ -273,7 +272,7 @@ func main() {
source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, scope)
limiter, err = ratelimits.NewLimiter(clk, source, scope)
cmd.FailOnError(err, "Failed to create rate limiter")
txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.RA.Limiter.Defaults, c.RA.Limiter.Overrides)
txnBuilder, err = ratelimits.NewTransactionBuilder(c.RA.Limiter.Defaults, c.RA.Limiter.Overrides)
cmd.FailOnError(err, "Failed to create rate limits transaction builder")
}
@ -286,29 +285,29 @@ func main() {
limiter,
txnBuilder,
c.RA.MaxNames,
validationProfiles,
authorizationLifetime,
pendingAuthorizationLifetime,
pubc,
caaClient,
c.RA.OrderLifetime.Duration,
c.RA.FinalizeTimeout.Duration,
ctp,
apc,
issuerCerts,
)
defer rai.Drain()
defer rai.DrainFinalize()
policyErr := rai.LoadRateLimitPoliciesFile(c.RA.RateLimitPoliciesFilename)
cmd.FailOnError(policyErr, "Couldn't load rate limit policies file")
rai.PA = pa
rai.VA = va.RemoteClients{
VAClient: vac,
CAAClient: caaClient,
}
rai.VA = vac
rai.CA = cac
rai.OCSP = ocspc
rai.SA = sac
start, err := bgrpc.NewServer(c.RA.GRPC, logger).Add(
&rapb.RegistrationAuthority_ServiceDesc, rai).Add(
&rapb.SCTProvider_ServiceDesc, rai).
Build(tlsConfig, scope, clk)
&rapb.RegistrationAuthority_ServiceDesc, rai).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup RA gRPC server")
cmd.FailOnError(start(), "RA gRPC service failed")

View File

@ -10,48 +10,16 @@ import (
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/va"
vaConfig "github.com/letsencrypt/boulder/va/config"
vapb "github.com/letsencrypt/boulder/va/proto"
)
// RemoteVAGRPCClientConfig contains the information necessary to setup a gRPC
// client connection. The following GRPC client configuration field combinations
// are allowed:
//
// ServerIPAddresses, [Timeout]
// ServerAddress, DNSAuthority, [Timeout], [HostOverride]
// SRVLookup, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
// SRVLookups, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
type RemoteVAGRPCClientConfig struct {
cmd.GRPCClientConfig
// Perspective uniquely identifies the Network Perspective used to
// perform the validation, as specified in BRs Section 5.4.1,
// Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts
// from each Network Perspective"). It should uniquely identify a group
// of RVAs deployed in the same datacenter.
Perspective string `validate:"required"`
// RIR indicates the Regional Internet Registry where this RVA is
// located. This field is used to identify the RIR region from which a
// given validation was performed, as specified in the "Phased
// Implementation Timeline" in BRs Section 3.2.2.9. It must be one of
// the following values:
// - ARIN
// - RIPE
// - APNIC
// - LACNIC
// - AFRINIC
RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"`
}
type Config struct {
VA struct {
vaConfig.Common
RemoteVAs []RemoteVAGRPCClientConfig `validate:"omitempty,dive"`
// Deprecated and ignored
MaxRemoteValidationFailures int `validate:"omitempty,min=0,required_with=RemoteVAs"`
RemoteVAs []cmd.GRPCClientConfig `validate:"omitempty,dive"`
MaxRemoteValidationFailures int `validate:"omitempty,min=0,required_with=RemoteVAs"`
Features features.Config
}
@ -82,12 +50,16 @@ func main() {
clk := cmd.Clock()
var servers bdns.ServerProvider
proto := "udp"
if features.Get().DOH {
proto = "tcp"
}
if len(c.VA.DNSStaticResolvers) != 0 {
servers, err = bdns.NewStaticProvider(c.VA.DNSStaticResolvers)
cmd.FailOnError(err, "Couldn't start static DNS server resolver")
} else {
servers, err = bdns.StartDynamicProvider(c.VA.DNSProvider, 60*time.Second, "tcp")
servers, err = bdns.StartDynamicProvider(c.VA.DNSProvider, 60*time.Second, proto)
cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver")
}
defer servers.Stop()
@ -103,7 +75,6 @@ func main() {
scope,
clk,
c.VA.DNSTries,
c.VA.UserAgent,
logger,
tlsConfig)
} else {
@ -113,7 +84,6 @@ func main() {
scope,
clk,
c.VA.DNSTries,
c.VA.UserAgent,
logger,
tlsConfig)
}
@ -121,7 +91,7 @@ func main() {
if len(c.VA.RemoteVAs) > 0 {
for _, rva := range c.VA.RemoteVAs {
rva := rva
vaConn, err := bgrpc.ClientSetup(&rva.GRPCClientConfig, tlsConfig, scope, clk)
vaConn, err := bgrpc.ClientSetup(&rva, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create remote VA client")
remotes = append(
remotes,
@ -130,9 +100,7 @@ func main() {
VAClient: vapb.NewVAClient(vaConn),
CAAClient: vapb.NewCAAClient(vaConn),
},
Address: rva.ServerAddress,
Perspective: rva.Perspective,
RIR: rva.RIR,
Address: rva.ServerAddress,
},
)
}
@ -141,6 +109,7 @@ func main() {
vai, err := va.NewValidationAuthorityImpl(
resolver,
remotes,
c.VA.MaxRemoteValidationFailures,
c.VA.UserAgent,
c.VA.IssuerDomain,
scope,
@ -148,8 +117,7 @@ func main() {
logger,
c.VA.AccountURIPrefixes,
va.PrimaryPerspective,
"",
iana.IsReservedAddr)
"")
cmd.FailOnError(err, "Unable to create VA server")
start, err := bgrpc.NewServer(c.VA.GRPC, logger).Add(

View File

@ -12,7 +12,6 @@ import (
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
emailpb "github.com/letsencrypt/boulder/email/proto"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
@ -43,26 +42,22 @@ type Config struct {
TLSListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
// lower than the upstream's timeout when making requests to the WFE.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
ShutdownStopTimeout config.Duration
ServerCertificatePath string `validate:"required_with=TLSListenAddress"`
ServerKeyPath string `validate:"required_with=TLSListenAddress"`
AllowOrigins []string
ShutdownStopTimeout config.Duration
SubscriberAgreementURL string
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
EmailExporter *cmd.GRPCClientConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
// GetNonceService is a gRPC config which contains a single SRV name
// used to lookup nonce-service instances used exclusively for nonce
@ -76,13 +71,14 @@ type Config struct {
// local and remote nonce-service instances.
RedeemNonceService *cmd.GRPCClientConfig `validate:"required"`
// NonceHMACKey is a path to a file containing an HMAC key which is a
// secret used for deriving the prefix of each nonce instance. It should
// contain 256 bits (32 bytes) of random data to be suitable as an
// HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a
// NoncePrefixKey is a secret used for deriving the prefix of each nonce
// instance. It should contain 256 bits of random data to be suitable as
// an HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a
// multi-DC deployment this value should be the same across all
// boulder-wfe and nonce-service instances.
NonceHMACKey cmd.HMACKeyConfig `validate:"-"`
//
// TODO(#7632) Update this to use the new HMACKeyConfig.
NoncePrefixKey cmd.PasswordConfig `validate:"-"`
// Chains is a list of lists of certificate filenames. Each inner list is
// a chain (starting with the issuing intermediate, followed by one or
@ -119,18 +115,17 @@ type Config struct {
// StaleTimeout determines how old should data be to be accessed via Boulder-specific GET-able APIs
StaleTimeout config.Duration `validate:"-"`
// AuthorizationLifetimeDays duplicates the RA's config of the same name.
// Deprecated: This field no longer has any effect.
AuthorizationLifetimeDays int `validate:"-"`
// AuthorizationLifetimeDays defines how long authorizations will be
// considered valid for. The WFE uses this to find the creation date of
// authorizations by subtracing this value from the expiry. It should match
// the value configured in the RA.
AuthorizationLifetimeDays int `validate:"required,min=1,max=397"`
// PendingAuthorizationLifetimeDays duplicates the RA's config of the same name.
// Deprecated: This field no longer has any effect.
PendingAuthorizationLifetimeDays int `validate:"-"`
// MaxContactsPerRegistration limits the number of contact addresses which
// can be provided in a single NewAccount request. Requests containing more
// contacts than this are rejected. Default: 10.
MaxContactsPerRegistration int `validate:"omitempty,min=1"`
// PendingAuthorizationLifetimeDays defines how long authorizations may be in
// the pending state before expiry. The WFE uses this to find the creation
// date of pending authorizations by subtracting this value from the expiry.
// It should match the value configured in the RA.
PendingAuthorizationLifetimeDays int `validate:"required,min=1,max=29"`
AccountCache *CacheConfig
@ -156,6 +151,13 @@ type Config struct {
Overrides string
}
// MaxNames is the maximum number of subjectAltNames in a single cert.
// The value supplied SHOULD be greater than 0 and no more than 100,
// defaults to 100. These limits are per section 7.1 of our combined
// CP/CPS, under "DV-SSL Subscriber Certificate". The value must match
// the CA and RA configurations.
MaxNames int `validate:"min=0,max=100"`
// CertProfiles is a map of acceptable certificate profile names to
// descriptions (perhaps including URLs) of those profiles. NewOrder
// Requests with a profile name not present in this map will be rejected.
@ -241,6 +243,11 @@ func main() {
if *debugAddr != "" {
c.WFE.DebugAddr = *debugAddr
}
maxNames := c.WFE.MaxNames
if maxNames == 0 {
// Default to 100 names per cert.
maxNames = 100
}
certChains := map[issuance.NameID][][]byte{}
issuerCerts := map[issuance.NameID]*issuance.Certificate{}
@ -280,13 +287,6 @@ func main() {
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityReadOnlyClient(saConn)
var eec emailpb.ExporterClient
if c.WFE.EmailExporter != nil {
emailExporterConn, err := bgrpc.ClientSetup(c.WFE.EmailExporter, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to email-exporter")
eec = emailpb.NewExporterClient(emailExporterConn)
}
if c.WFE.RedeemNonceService == nil {
cmd.Fail("'redeemNonceService' must be configured.")
}
@ -294,8 +294,11 @@ func main() {
cmd.Fail("'getNonceService' must be configured")
}
noncePrefixKey, err := c.WFE.NonceHMACKey.Load()
cmd.FailOnError(err, "Failed to load nonceHMACKey file")
var noncePrefixKey string
if c.WFE.NoncePrefixKey.PasswordFile != "" {
noncePrefixKey, err = c.WFE.NoncePrefixKey.Pass()
cmd.FailOnError(err, "Failed to load noncePrefixKey")
}
getNonceConn, err := bgrpc.ClientSetup(c.WFE.GetNonceService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to get nonce service")
@ -317,9 +320,23 @@ func main() {
c.WFE.StaleTimeout.Duration = time.Minute * 10
}
if c.WFE.MaxContactsPerRegistration == 0 {
c.WFE.MaxContactsPerRegistration = 10
// Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document,
// or completed validation MUST be obtained no more than 398 days prior
// to issuing the Certificate". If unconfigured or the configured value is
// greater than 397 days, bail out.
if c.WFE.AuthorizationLifetimeDays <= 0 || c.WFE.AuthorizationLifetimeDays > 397 {
cmd.Fail("authorizationLifetimeDays value must be greater than 0 and less than 398")
}
authorizationLifetime := time.Duration(c.WFE.AuthorizationLifetimeDays) * 24 * time.Hour
// The Baseline Requirements v1.8.1 state that validation tokens "MUST
// NOT be used for more than 30 days from its creation". If unconfigured
// or the configured value pendingAuthorizationLifetimeDays is greater
// than 29 days, bail out.
if c.WFE.PendingAuthorizationLifetimeDays <= 0 || c.WFE.PendingAuthorizationLifetimeDays > 29 {
cmd.Fail("pendingAuthorizationLifetimeDays value must be greater than 0 and less than 30")
}
pendingAuthorizationLifetime := time.Duration(c.WFE.PendingAuthorizationLifetimeDays) * 24 * time.Hour
var limiter *ratelimits.Limiter
var txnBuilder *ratelimits.TransactionBuilder
@ -332,7 +349,7 @@ func main() {
source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, stats)
limiter, err = ratelimits.NewLimiter(clk, source, stats)
cmd.FailOnError(err, "Failed to create rate limiter")
txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.WFE.Limiter.Defaults, c.WFE.Limiter.Overrides)
txnBuilder, err = ratelimits.NewTransactionBuilder(c.WFE.Limiter.Defaults, c.WFE.Limiter.Overrides)
cmd.FailOnError(err, "Failed to create rate limits transaction builder")
}
@ -355,16 +372,17 @@ func main() {
logger,
c.WFE.Timeout.Duration,
c.WFE.StaleTimeout.Duration,
c.WFE.MaxContactsPerRegistration,
authorizationLifetime,
pendingAuthorizationLifetime,
rac,
sac,
eec,
gnc,
rnc,
noncePrefixKey,
accountGetter,
limiter,
txnBuilder,
maxNames,
c.WFE.CertProfiles,
unpauseSigner,
c.WFE.Unpause.JWTLifetime.Duration,

View File

@ -5,6 +5,7 @@ import (
"os"
"strings"
_ "github.com/letsencrypt/boulder/cmd/admin-revoker"
_ "github.com/letsencrypt/boulder/cmd/akamai-purger"
_ "github.com/letsencrypt/boulder/cmd/bad-key-revoker"
_ "github.com/letsencrypt/boulder/cmd/boulder-ca"
@ -15,12 +16,15 @@ import (
_ "github.com/letsencrypt/boulder/cmd/boulder-va"
_ "github.com/letsencrypt/boulder/cmd/boulder-wfe2"
_ "github.com/letsencrypt/boulder/cmd/cert-checker"
_ "github.com/letsencrypt/boulder/cmd/contact-auditor"
_ "github.com/letsencrypt/boulder/cmd/crl-checker"
_ "github.com/letsencrypt/boulder/cmd/crl-storer"
_ "github.com/letsencrypt/boulder/cmd/crl-updater"
_ "github.com/letsencrypt/boulder/cmd/email-exporter"
_ "github.com/letsencrypt/boulder/cmd/expiration-mailer"
_ "github.com/letsencrypt/boulder/cmd/id-exporter"
_ "github.com/letsencrypt/boulder/cmd/log-validator"
_ "github.com/letsencrypt/boulder/cmd/nonce-service"
_ "github.com/letsencrypt/boulder/cmd/notify-mailer"
_ "github.com/letsencrypt/boulder/cmd/ocsp-responder"
_ "github.com/letsencrypt/boulder/cmd/remoteva"
_ "github.com/letsencrypt/boulder/cmd/reversed-hostname-checker"
@ -81,30 +85,36 @@ var boulderUsage = fmt.Sprintf(`Usage: %s <subcommand> [flags]
func main() {
defer cmd.AuditPanic()
if len(os.Args) <= 1 {
// No arguments passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
if os.Args[1] == "--help" || os.Args[1] == "-help" {
// Help flag passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
if os.Args[1] == "--list" || os.Args[1] == "-list" {
// List flag passed.
for _, c := range cmd.AvailableCommands() {
fmt.Println(c)
var command string
if core.Command() == "boulder" {
// Operator passed the boulder component as a subcommand.
if len(os.Args) <= 1 {
// No arguments passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
return
}
// Remove the subcommand from the arguments.
command := os.Args[1]
os.Args = os.Args[1:]
if os.Args[1] == "--help" || os.Args[1] == "-help" {
// Help flag passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
if os.Args[1] == "--list" || os.Args[1] == "-list" {
// List flag passed.
for _, c := range cmd.AvailableCommands() {
fmt.Println(c)
}
return
}
command = os.Args[1]
// Remove the subcommand from the arguments.
os.Args = os.Args[1:]
} else {
// Operator ran a boulder component using a symlink.
command = core.Command()
}
config := getConfigPath()
if config != "" {

View File

@ -40,7 +40,11 @@ func TestConfigValidation(t *testing.T) {
case "boulder-sa":
fileNames = []string{"sa.json"}
case "boulder-va":
fileNames = []string{"va.json"}
fileNames = []string{
"va.json",
"va-remote-a.json",
"va-remote-b.json",
}
case "remoteva":
fileNames = []string{
"remoteva-a.json",

View File

@ -305,11 +305,12 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, tbc
case crlCert:
cert.IsCA = false
case requestCert, intermediateCert:
// id-kp-serverAuth is included in intermediate certificates, as required by
// Section 7.1.2.10.6 of the CA/BF Baseline Requirements.
// id-kp-clientAuth is excluded, as required by section 3.2.1 of the Chrome
// Root Program Requirements.
cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
// id-kp-serverAuth and id-kp-clientAuth are included in intermediate
// certificates in order to technically constrain them. id-kp-serverAuth
// is required by 7.1.2.2.g of the CABF Baseline Requirements, but
// id-kp-clientAuth isn't. We include id-kp-clientAuth as we also include
// it in our end-entity certificates.
cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
cert.MaxPathLenZero = true
case crossCert:
cert.ExtKeyUsage = tbcs.ExtKeyUsage
@ -317,11 +318,11 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, tbc
}
for _, policyConfig := range profile.Policies {
x509OID, err := x509.ParseOID(policyConfig.OID)
oid, err := parseOID(policyConfig.OID)
if err != nil {
return nil, fmt.Errorf("failed to parse %s as OID: %w", policyConfig.OID, err)
return nil, err
}
cert.Policies = append(cert.Policies, x509OID)
cert.PolicyIdentifiers = append(cert.PolicyIdentifiers, oid)
}
return cert, nil

View File

@ -2,9 +2,8 @@ package main
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
@ -127,14 +126,15 @@ func TestMakeTemplateRoot(t *testing.T) {
test.AssertEquals(t, len(cert.IssuingCertificateURL), 1)
test.AssertEquals(t, cert.IssuingCertificateURL[0], profile.IssuerURL)
test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature|x509.KeyUsageCRLSign)
test.AssertEquals(t, len(cert.Policies), 2)
test.AssertEquals(t, len(cert.PolicyIdentifiers), 2)
test.AssertEquals(t, len(cert.ExtKeyUsage), 0)
cert, err = makeTemplate(randReader, profile, pubKey, nil, intermediateCert)
test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected")
test.Assert(t, cert.MaxPathLenZero, "MaxPathLenZero not set in intermediate template")
test.AssertEquals(t, len(cert.ExtKeyUsage), 1)
test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageServerAuth)
test.AssertEquals(t, len(cert.ExtKeyUsage), 2)
test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageClientAuth)
test.AssertEquals(t, cert.ExtKeyUsage[1], x509.ExtKeyUsageServerAuth)
}
func TestMakeTemplateRestrictedCrossCertificate(t *testing.T) {
@ -551,7 +551,7 @@ func TestGenerateCSR(t *testing.T) {
Country: "country",
}
signer, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
signer, err := rsa.GenerateKey(rand.Reader, 1024)
test.AssertNotError(t, err, "failed to generate test key")
csrBytes, err := generateCSR(profile, &wrappedSigner{signer})

View File

@ -96,7 +96,7 @@ func postIssuanceLinting(fc *x509.Certificate, skipLints []string) error {
type keyGenConfig struct {
Type string `yaml:"type"`
RSAModLength int `yaml:"rsa-mod-length"`
RSAModLength uint `yaml:"rsa-mod-length"`
ECDSACurve string `yaml:"ecdsa-curve"`
}

View File

@ -6,9 +6,8 @@ import (
"log"
"math/big"
"github.com/miekg/pkcs11"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/miekg/pkcs11"
)
const (
@ -19,10 +18,10 @@ const (
// device and specifies which mechanism should be used. modulusLen specifies the
// length of the modulus to be generated on the device in bits and exponent
// specifies the public exponent that should be used.
func rsaArgs(label string, modulusLen int, keyID []byte) generateArgs {
func rsaArgs(label string, modulusLen, exponent uint, keyID []byte) generateArgs {
// Encode as unpadded big endian encoded byte slice
expSlice := big.NewInt(rsaExp).Bytes()
log.Printf("\tEncoded public exponent (%d) as: %0X\n", rsaExp, expSlice)
expSlice := big.NewInt(int64(exponent)).Bytes()
log.Printf("\tEncoded public exponent (%d) as: %0X\n", exponent, expSlice)
return generateArgs{
mechanism: []*pkcs11.Mechanism{
pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil),
@ -56,15 +55,15 @@ func rsaArgs(label string, modulusLen int, keyID []byte) generateArgs {
// handle, and constructs a rsa.PublicKey. It also checks that the key has the
// correct length modulus and that the public exponent is what was requested in
// the public key template.
func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusLen int) (*rsa.PublicKey, error) {
func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusLen, exponent uint) (*rsa.PublicKey, error) {
pubKey, err := session.GetRSAPublicKey(object)
if err != nil {
return nil, err
}
if pubKey.E != rsaExp {
if pubKey.E != int(exponent) {
return nil, errors.New("returned CKA_PUBLIC_EXPONENT doesn't match expected exponent")
}
if pubKey.N.BitLen() != modulusLen {
if pubKey.N.BitLen() != int(modulusLen) {
return nil, errors.New("returned CKA_MODULUS isn't of the expected bit length")
}
log.Printf("\tPublic exponent: %d\n", pubKey.E)
@ -76,21 +75,21 @@ func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusL
// specified by modulusLen and with the exponent 65537.
// It returns the public part of the generated key pair as a rsa.PublicKey
// and the random key ID that the HSM uses to identify the key pair.
func rsaGenerate(session *pkcs11helpers.Session, label string, modulusLen int) (*rsa.PublicKey, []byte, error) {
func rsaGenerate(session *pkcs11helpers.Session, label string, modulusLen uint) (*rsa.PublicKey, []byte, error) {
keyID := make([]byte, 4)
_, err := newRandReader(session).Read(keyID)
if err != nil {
return nil, nil, err
}
log.Printf("Generating RSA key with %d bit modulus and public exponent %d and ID %x\n", modulusLen, rsaExp, keyID)
args := rsaArgs(label, modulusLen, keyID)
args := rsaArgs(label, modulusLen, rsaExp, keyID)
pub, _, err := session.GenerateKeyPair(args.mechanism, args.publicAttrs, args.privateAttrs)
if err != nil {
return nil, nil, err
}
log.Println("Key generated")
log.Println("Extracting public key")
pk, err := rsaPub(session, pub, modulusLen)
pk, err := rsaPub(session, pub, modulusLen, rsaExp)
if err != nil {
return nil, nil, err
}

View File

@ -8,15 +8,24 @@ import (
"math/big"
"testing"
"github.com/miekg/pkcs11"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/test"
"github.com/miekg/pkcs11"
)
func TestRSAPub(t *testing.T) {
s, ctx := pkcs11helpers.NewSessionWithMock()
// test we fail to construct key with non-matching exp
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}),
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}),
}, nil
}
_, err := rsaPub(s, 0, 0, 255)
test.AssertError(t, err, "rsaPub didn't fail with non-matching exp")
// test we fail to construct key with non-matching modulus
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
@ -24,7 +33,7 @@ func TestRSAPub(t *testing.T) {
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}),
}, nil
}
_, err := rsaPub(s, 0, 16)
_, err = rsaPub(s, 0, 16, 65537)
test.AssertError(t, err, "rsaPub didn't fail with non-matching modulus size")
// test we don't fail with the correct attributes
@ -34,7 +43,7 @@ func TestRSAPub(t *testing.T) {
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}),
}, nil
}
_, err = rsaPub(s, 0, 8)
_, err = rsaPub(s, 0, 8, 65537)
test.AssertNotError(t, err, "rsaPub failed with valid attributes")
}

View File

@ -8,7 +8,6 @@ import (
"encoding/json"
"flag"
"fmt"
"net/netip"
"os"
"regexp"
"slices"
@ -30,8 +29,7 @@ import (
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/linter"
_ "github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/precert"
@ -79,7 +77,7 @@ func (r *report) dump() error {
type reportEntry struct {
Valid bool `json:"valid"`
SANs []string `json:"sans"`
DNSNames []string `json:"dnsNames"`
Problems []string `json:"problems,omitempty"`
}
@ -101,13 +99,12 @@ type certChecker struct {
kp goodkey.KeyPolicy
dbMap certDB
getPrecert precertGetter
certs chan *corepb.Certificate
certs chan core.Certificate
clock clock.Clock
rMu *sync.Mutex
issuedReport report
checkPeriod time.Duration
acceptableValidityDurations map[time.Duration]bool
lints lint.Registry
logger blog.Logger
}
@ -117,7 +114,6 @@ func newChecker(saDbMap certDB,
kp goodkey.KeyPolicy,
period time.Duration,
avd map[time.Duration]bool,
lints lint.Registry,
logger blog.Logger,
) certChecker {
precertGetter := func(ctx context.Context, serial string) ([]byte, error) {
@ -125,20 +121,19 @@ func newChecker(saDbMap certDB,
if err != nil {
return nil, err
}
return precertPb.Der, nil
return precertPb.DER, nil
}
return certChecker{
pa: pa,
kp: kp,
dbMap: saDbMap,
getPrecert: precertGetter,
certs: make(chan *corepb.Certificate, batchSize),
certs: make(chan core.Certificate, batchSize),
rMu: new(sync.Mutex),
clock: clk,
issuedReport: report{Entries: make(map[string]reportEntry)},
checkPeriod: period,
acceptableValidityDurations: avd,
lints: lints,
logger: logger,
}
}
@ -215,7 +210,7 @@ func (c *certChecker) getCerts(ctx context.Context) error {
batchStartID := initialID
var retries int
for {
certs, highestID, err := sa.SelectCertificates(
certs, err := sa.SelectCertificates(
ctx,
c.dbMap,
`WHERE id > :id AND
@ -240,16 +235,16 @@ func (c *certChecker) getCerts(ctx context.Context) error {
}
retries = 0
for _, cert := range certs {
c.certs <- cert
c.certs <- cert.Certificate
}
if len(certs) == 0 {
break
}
lastCert := certs[len(certs)-1]
if lastCert.Issued.AsTime().After(c.issuedReport.end) {
batchStartID = lastCert.ID
if lastCert.Issued.After(c.issuedReport.end) {
break
}
batchStartID = highestID
}
// Close channel so range operations won't block once the channel empties out
@ -257,15 +252,15 @@ func (c *certChecker) getCerts(ctx context.Context) error {
return nil
}
func (c *certChecker) processCerts(ctx context.Context, wg *sync.WaitGroup, badResultsOnly bool) {
func (c *certChecker) processCerts(ctx context.Context, wg *sync.WaitGroup, badResultsOnly bool, ignoredLints map[string]bool) {
for cert := range c.certs {
sans, problems := c.checkCert(ctx, cert)
dnsNames, problems := c.checkCert(ctx, cert, ignoredLints)
valid := len(problems) == 0
c.rMu.Lock()
if !badResultsOnly || (badResultsOnly && !valid) {
c.issuedReport.Entries[cert.Serial] = reportEntry{
Valid: valid,
SANs: sans,
DNSNames: dnsNames,
Problems: problems,
}
}
@ -303,8 +298,8 @@ var expectedExtensionContent = map[string][]byte{
// likely valid at the time the certificate was issued. Authorizations with
// status = "deactivated" are counted for this, so long as their validatedAt
// is before the issuance and expiration is after.
func (c *certChecker) checkValidations(ctx context.Context, cert *corepb.Certificate, idents identifier.ACMEIdentifiers) error {
authzs, err := sa.SelectAuthzsMatchingIssuance(ctx, c.dbMap, cert.RegistrationID, cert.Issued.AsTime(), idents)
func (c *certChecker) checkValidations(ctx context.Context, cert core.Certificate, dnsNames []string) error {
authzs, err := sa.SelectAuthzsMatchingIssuance(ctx, c.dbMap, cert.RegistrationID, cert.Issued, dnsNames)
if err != nil {
return fmt.Errorf("error checking authzs for certificate %s: %w", cert.Serial, err)
}
@ -313,18 +308,18 @@ func (c *certChecker) checkValidations(ctx context.Context, cert *corepb.Certifi
return fmt.Errorf("no relevant authzs found valid at %s", cert.Issued)
}
// We may get multiple authorizations for the same identifier, but that's
// okay. Any authorization for a given identifier is sufficient.
identToAuthz := make(map[identifier.ACMEIdentifier]*corepb.Authorization)
// We may get multiple authorizations for the same name, but that's okay.
// Any authorization for a given name is sufficient.
nameToAuthz := make(map[string]*corepb.Authorization)
for _, m := range authzs {
identToAuthz[identifier.FromProto(m.Identifier)] = m
nameToAuthz[m.DnsName] = m
}
var errors []error
for _, ident := range idents {
_, ok := identToAuthz[ident]
for _, name := range dnsNames {
_, ok := nameToAuthz[name]
if !ok {
errors = append(errors, fmt.Errorf("missing authz for %q", ident.Value))
errors = append(errors, fmt.Errorf("missing authz for %q", name))
continue
}
}
@ -334,196 +329,155 @@ func (c *certChecker) checkValidations(ctx context.Context, cert *corepb.Certifi
return nil
}
// checkCert returns a list of Subject Alternative Names in the certificate and a list of problems with the certificate.
func (c *certChecker) checkCert(ctx context.Context, cert *corepb.Certificate) ([]string, []string) {
// checkCert returns a list of DNS names in the certificate and a list of problems with the certificate.
func (c *certChecker) checkCert(ctx context.Context, cert core.Certificate, ignoredLints map[string]bool) ([]string, []string) {
var dnsNames []string
var problems []string
// Check that the digests match.
if cert.Digest != core.Fingerprint256(cert.Der) {
if cert.Digest != core.Fingerprint256(cert.DER) {
problems = append(problems, "Stored digest doesn't match certificate digest")
}
// Parse the certificate.
parsedCert, err := zX509.ParseCertificate(cert.Der)
if err != nil {
problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err))
// This is a fatal error, we can't do any further processing.
return nil, problems
}
// Now that it's parsed, we can extract the SANs.
sans := slices.Clone(parsedCert.DNSNames)
for _, ip := range parsedCert.IPAddresses {
sans = append(sans, ip.String())
}
// Run zlint checks.
results := zlint.LintCertificateEx(parsedCert, c.lints)
for name, res := range results.Results {
if res.Status <= lint.Pass {
continue
}
prob := fmt.Sprintf("zlint %s: %s", res.Status, name)
if res.Details != "" {
prob = fmt.Sprintf("%s %s", prob, res.Details)
}
problems = append(problems, prob)
}
// Check if stored serial is correct.
storedSerial, err := core.StringToSerial(cert.Serial)
if err != nil {
problems = append(problems, "Stored serial is invalid")
} else if parsedCert.SerialNumber.Cmp(storedSerial) != 0 {
problems = append(problems, "Stored serial doesn't match certificate serial")
}
// Check that we have the correct expiration time.
if !parsedCert.NotAfter.Equal(cert.Expires.AsTime()) {
problems = append(problems, "Stored expiration doesn't match certificate NotAfter")
}
// Check if basic constraints are set.
if !parsedCert.BasicConstraintsValid {
problems = append(problems, "Certificate doesn't have basic constraints set")
}
// Check that the cert isn't able to sign other certificates.
if parsedCert.IsCA {
problems = append(problems, "Certificate can sign other certificates")
}
// Check that the cert has a valid validity period. The validity
// period is computed inclusive of the whole final second indicated by
// notAfter.
validityDuration := parsedCert.NotAfter.Add(time.Second).Sub(parsedCert.NotBefore)
_, ok := c.acceptableValidityDurations[validityDuration]
if !ok {
problems = append(problems, "Certificate has unacceptable validity period")
}
// Check that the stored issuance time isn't too far back/forward dated.
if parsedCert.NotBefore.Before(cert.Issued.AsTime().Add(-6*time.Hour)) || parsedCert.NotBefore.After(cert.Issued.AsTime().Add(6*time.Hour)) {
problems = append(problems, "Stored issuance date is outside of 6 hour window of certificate NotBefore")
}
// Check that the cert doesn't contain any SANs of unexpected types.
if len(parsedCert.EmailAddresses) != 0 || len(parsedCert.URIs) != 0 {
problems = append(problems, "Certificate contains SAN of unacceptable type (email or URI)")
}
if parsedCert.Subject.CommonName != "" {
// Check if the CommonName is <= 64 characters.
if len(parsedCert.Subject.CommonName) > 64 {
problems = append(
problems,
fmt.Sprintf("Certificate has common name >64 characters long (%d)", len(parsedCert.Subject.CommonName)),
)
}
// Check that the CommonName is included in the SANs.
if !slices.Contains(sans, parsedCert.Subject.CommonName) {
problems = append(problems, fmt.Sprintf("Certificate Common Name does not appear in Subject Alternative Names: %q !< %v",
parsedCert.Subject.CommonName, parsedCert.DNSNames))
}
}
// Check that the PA is still willing to issue for each DNS name and IP
// address in the SANs. We do not check the CommonName here, as (if it exists)
// we already checked that it is identical to one of the DNSNames in the SAN.
for _, name := range parsedCert.DNSNames {
err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS(name)})
if err != nil {
problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err))
continue
}
// For defense-in-depth, even if the PA was willing to issue for a name
// we double check it against a list of forbidden domains. This way even
// if the hostnamePolicyFile malfunctions we will flag the forbidden
// domain matches
if forbidden, pattern := isForbiddenDomain(name); forbidden {
problems = append(problems, fmt.Sprintf(
"Policy Authority was willing to issue but domain '%s' matches "+
"forbiddenDomains entry %q", name, pattern))
}
}
for _, name := range parsedCert.IPAddresses {
ip, ok := netip.AddrFromSlice(name)
if !ok {
problems = append(problems, fmt.Sprintf("SANs contain malformed IP %q", name))
continue
}
err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewIP(ip)})
if err != nil {
problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err))
continue
}
}
// Check the cert has the correct key usage extensions
serverAndClient := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth, zX509.ExtKeyUsageClientAuth})
serverOnly := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth})
if !(serverAndClient || serverOnly) {
problems = append(problems, "Certificate has incorrect key usage extensions")
}
for _, ext := range parsedCert.Extensions {
_, ok := allowedExtensions[ext.Id.String()]
if !ok {
problems = append(problems, fmt.Sprintf("Certificate contains an unexpected extension: %s", ext.Id))
}
expectedContent, ok := expectedExtensionContent[ext.Id.String()]
if ok {
if !bytes.Equal(ext.Value, expectedContent) {
problems = append(problems, fmt.Sprintf("Certificate extension %s contains unexpected content: has %x, expected %x", ext.Id, ext.Value, expectedContent))
}
}
}
// Check that the cert has a good key. Note that this does not perform
// checks which rely on external resources such as weak or blocked key
// lists, or the list of blocked keys in the database. This only performs
// static checks, such as against the RSA key size and the ECDSA curve.
p, err := x509.ParseCertificate(cert.Der)
parsedCert, err := zX509.ParseCertificate(cert.DER)
if err != nil {
problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err))
} else {
dnsNames = parsedCert.DNSNames
// Run zlint checks.
results := zlint.LintCertificate(parsedCert)
for name, res := range results.Results {
if ignoredLints[name] || res.Status <= lint.Pass {
continue
}
prob := fmt.Sprintf("zlint %s: %s", res.Status, name)
if res.Details != "" {
prob = fmt.Sprintf("%s %s", prob, res.Details)
}
problems = append(problems, prob)
}
// Check if stored serial is correct.
storedSerial, err := core.StringToSerial(cert.Serial)
if err != nil {
problems = append(problems, "Stored serial is invalid")
} else if parsedCert.SerialNumber.Cmp(storedSerial) != 0 {
problems = append(problems, "Stored serial doesn't match certificate serial")
}
// Check that we have the correct expiration time.
if !parsedCert.NotAfter.Equal(cert.Expires) {
problems = append(problems, "Stored expiration doesn't match certificate NotAfter")
}
// Check if basic constraints are set.
if !parsedCert.BasicConstraintsValid {
problems = append(problems, "Certificate doesn't have basic constraints set")
}
// Check that the cert isn't able to sign other certificates.
if parsedCert.IsCA {
problems = append(problems, "Certificate can sign other certificates")
}
// Check that the cert has a valid validity period. The validity
// period is computed inclusive of the whole final second indicated by
// notAfter.
validityDuration := parsedCert.NotAfter.Add(time.Second).Sub(parsedCert.NotBefore)
_, ok := c.acceptableValidityDurations[validityDuration]
if !ok {
problems = append(problems, "Certificate has unacceptable validity period")
}
// Check that the stored issuance time isn't too far back/forward dated.
if parsedCert.NotBefore.Before(cert.Issued.Add(-6*time.Hour)) || parsedCert.NotBefore.After(cert.Issued.Add(6*time.Hour)) {
problems = append(problems, "Stored issuance date is outside of 6 hour window of certificate NotBefore")
}
if parsedCert.Subject.CommonName != "" {
// Check if the CommonName is <= 64 characters.
if len(parsedCert.Subject.CommonName) > 64 {
problems = append(
problems,
fmt.Sprintf("Certificate has common name >64 characters long (%d)", len(parsedCert.Subject.CommonName)),
)
}
// Check that the CommonName is included in the SANs.
if !slices.Contains(parsedCert.DNSNames, parsedCert.Subject.CommonName) {
problems = append(problems, fmt.Sprintf("Certificate Common Name does not appear in Subject Alternative Names: %q !< %v",
parsedCert.Subject.CommonName, parsedCert.DNSNames))
}
}
// Check that the PA is still willing to issue for each name in DNSNames.
// We do not check the CommonName here, as (if it exists) we already checked
// that it is identical to one of the DNSNames in the SAN.
for _, name := range parsedCert.DNSNames {
err = c.pa.WillingToIssue([]string{name})
if err != nil {
problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err))
} else {
// For defense-in-depth, even if the PA was willing to issue for a name
// we double check it against a list of forbidden domains. This way even
// if the hostnamePolicyFile malfunctions we will flag the forbidden
// domain matches
if forbidden, pattern := isForbiddenDomain(name); forbidden {
problems = append(problems, fmt.Sprintf(
"Policy Authority was willing to issue but domain '%s' matches "+
"forbiddenDomains entry %q", name, pattern))
}
}
}
// Check the cert has the correct key usage extensions
if !slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth, zX509.ExtKeyUsageClientAuth}) {
problems = append(problems, "Certificate has incorrect key usage extensions")
}
for _, ext := range parsedCert.Extensions {
_, ok := allowedExtensions[ext.Id.String()]
if !ok {
problems = append(problems, fmt.Sprintf("Certificate contains an unexpected extension: %s", ext.Id))
}
expectedContent, ok := expectedExtensionContent[ext.Id.String()]
if ok {
if !bytes.Equal(ext.Value, expectedContent) {
problems = append(problems, fmt.Sprintf("Certificate extension %s contains unexpected content: has %x, expected %x", ext.Id, ext.Value, expectedContent))
}
}
}
// Check that the cert has a good key. Note that this does not perform
// checks which rely on external resources such as weak or blocked key
// lists, or the list of blocked keys in the database. This only performs
// static checks, such as against the RSA key size and the ECDSA curve.
p, err := x509.ParseCertificate(cert.DER)
if err != nil {
problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err))
}
err = c.kp.GoodKey(ctx, p.PublicKey)
if err != nil {
problems = append(problems, fmt.Sprintf("Key Policy isn't willing to issue for public key: %s", err))
}
}
precertDER, err := c.getPrecert(ctx, cert.Serial)
if err != nil {
// Log and continue, since we want the problems slice to only contains
// problems with the cert itself.
c.logger.Errf("fetching linting precertificate for %s: %s", cert.Serial, err)
atomic.AddInt64(&c.issuedReport.DbErrs, 1)
} else {
err = precert.Correspond(precertDER, cert.Der)
precertDER, err := c.getPrecert(ctx, cert.Serial)
if err != nil {
problems = append(problems, fmt.Sprintf("Certificate does not correspond to precert for %s: %s", cert.Serial, err))
// Log and continue, since we want the problems slice to only contains
// problems with the cert itself.
c.logger.Errf("fetching linting precertificate for %s: %s", cert.Serial, err)
atomic.AddInt64(&c.issuedReport.DbErrs, 1)
} else {
err = precert.Correspond(precertDER, cert.DER)
if err != nil {
problems = append(problems,
fmt.Sprintf("Certificate does not correspond to precert for %s: %s", cert.Serial, err))
}
}
}
if features.Get().CertCheckerChecksValidations {
idents := identifier.FromCert(p)
err = c.checkValidations(ctx, cert, idents)
if err != nil {
if features.Get().CertCheckerRequiresValidations {
problems = append(problems, err.Error())
} else {
var identValues []string
for _, ident := range idents {
identValues = append(identValues, ident.Value)
if features.Get().CertCheckerChecksValidations {
err = c.checkValidations(ctx, cert, parsedCert.DNSNames)
if err != nil {
if features.Get().CertCheckerRequiresValidations {
problems = append(problems, err.Error())
} else {
c.logger.Errf("Certificate %s %s: %s", cert.Serial, parsedCert.DNSNames, err)
}
c.logger.Errf("Certificate %s %s: %s", cert.Serial, identValues, err)
}
}
}
return sans, problems
return dnsNames, problems
}
type Config struct {
@ -546,9 +500,6 @@ type Config struct {
// public keys in the certs it checks.
GoodKey goodkey.Config
// LintConfig is a path to a zlint config file, which can be used to control
// the behavior of zlint's "customizable lints".
LintConfig string
// IgnoredLints is a list of zlint names. Any lint results from a lint in
// the IgnoredLists list are ignored regardless of LintStatus level.
IgnoredLints []string
@ -595,8 +546,13 @@ func main() {
// Validate PA config and set defaults if needed.
cmd.FailOnError(config.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(config.PA.CheckIdentifiers(), "Invalid PA configuration")
if config.CertChecker.GoodKey.WeakKeyFile != "" {
cmd.Fail("cert-checker does not support checking against weak key files")
}
if config.CertChecker.GoodKey.BlockedKeyFile != "" {
cmd.Fail("cert-checker does not support checking against blocked key files")
}
kp, err := sagoodkey.NewPolicy(&config.CertChecker.GoodKey, nil)
cmd.FailOnError(err, "Unable to create key policy")
@ -609,7 +565,7 @@ func main() {
})
prometheus.DefaultRegisterer.MustRegister(checkerLatency)
pa, err := policy.New(config.PA.Identifiers, config.PA.Challenges, logger)
pa, err := policy.New(config.PA.Challenges, logger)
cmd.FailOnError(err, "Failed to create PA")
err = pa.LoadHostnamePolicyFile(config.CertChecker.HostnamePolicyFile)
@ -620,14 +576,6 @@ func main() {
cmd.FailOnError(err, "Failed to load CT Log List")
}
lints, err := linter.NewRegistry(config.CertChecker.IgnoredLints)
cmd.FailOnError(err, "Failed to create zlint registry")
if config.CertChecker.LintConfig != "" {
lintconfig, err := lint.NewConfigFromFile(config.CertChecker.LintConfig)
cmd.FailOnError(err, "Failed to load zlint config file")
lints.SetConfiguration(lintconfig)
}
checker := newChecker(
saDbMap,
cmd.Clock(),
@ -635,11 +583,15 @@ func main() {
kp,
config.CertChecker.CheckPeriod.Duration,
acceptableValidityDurations,
lints,
logger,
)
fmt.Fprintf(os.Stderr, "# Getting certificates issued in the last %s\n", config.CertChecker.CheckPeriod)
ignoredLintsMap := make(map[string]bool)
for _, name := range config.CertChecker.IgnoredLints {
ignoredLintsMap[name] = true
}
// Since we grab certificates in batches we don't want this to block, when it
// is finished it will close the certificate channel which allows the range
// loops in checker.processCerts to break
@ -654,7 +606,7 @@ func main() {
wg.Add(1)
go func() {
s := checker.clock.Now()
checker.processCerts(context.TODO(), wg, config.CertChecker.BadResultsOnly)
checker.processCerts(context.TODO(), wg, config.CertChecker.BadResultsOnly, ignoredLintsMap)
checkerLatency.Observe(checker.clock.Since(s).Seconds())
}()
}

View File

@ -18,6 +18,7 @@ import (
mrand "math/rand/v2"
"os"
"slices"
"sort"
"strings"
"sync"
"testing"
@ -27,12 +28,9 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/linter"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/policy"
@ -53,10 +51,7 @@ var (
func init() {
var err error
pa, err = policy.New(
map[identifier.IdentifierType]bool{identifier.TypeDNS: true, identifier.TypeIP: true},
map[core.AcmeChallenge]bool{},
blog.NewMock())
pa, err = policy.New(map[core.AcmeChallenge]bool{}, blog.NewMock())
if err != nil {
log.Fatal(err)
}
@ -71,8 +66,8 @@ func init() {
}
func BenchmarkCheckCert(b *testing.B) {
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
testKey, _ := rsa.GenerateKey(rand.Reader, 1024)
expiry := time.Now().AddDate(0, 0, 1)
serial := big.NewInt(1337)
rawCert := x509.Certificate{
@ -84,16 +79,16 @@ func BenchmarkCheckCert(b *testing.B) {
SerialNumber: serial,
}
certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey)
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(certDer),
Der: certDer,
Issued: timestamppb.New(time.Now()),
Expires: timestamppb.New(expiry),
DER: certDer,
Issued: time.Now(),
Expires: expiry,
}
b.ResetTimer()
for range b.N {
checker.checkCert(context.Background(), cert)
checker.checkCert(context.Background(), cert, nil)
}
}
@ -107,7 +102,7 @@ func TestCheckWildcardCert(t *testing.T) {
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
fc := clock.NewFake()
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, blog.NewMock())
issued := checker.clock.Now().Add(-time.Minute)
goodExpiry := issued.Add(testValidityDuration - time.Second)
serial := big.NewInt(1337)
@ -130,27 +125,27 @@ func TestCheckWildcardCert(t *testing.T) {
test.AssertNotError(t, err, "Couldn't create certificate")
parsed, err := x509.ParseCertificate(wildcardCertDer)
test.AssertNotError(t, err, "Couldn't parse created certificate")
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(wildcardCertDer),
Expires: timestamppb.New(parsed.NotAfter),
Issued: timestamppb.New(parsed.NotBefore),
Der: wildcardCertDer,
Expires: parsed.NotAfter,
Issued: parsed.NotBefore,
DER: wildcardCertDer,
}
_, problems := checker.checkCert(context.Background(), cert)
_, problems := checker.checkCert(context.Background(), cert, nil)
for _, p := range problems {
t.Error(p)
}
}
func TestCheckCertReturnsSANs(t *testing.T) {
func TestCheckCertReturnsDNSNames(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
saCleanup := test.ResetBoulderTestDatabase(t)
defer func() {
saCleanup()
}()
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
certPEM, err := os.ReadFile("testdata/quite_invalid.pem")
if err != nil {
@ -162,16 +157,16 @@ func TestCheckCertReturnsSANs(t *testing.T) {
t.Fatal("failed to parse cert PEM")
}
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: "00000000000",
Digest: core.Fingerprint256(block.Bytes),
Expires: timestamppb.New(time.Now().Add(time.Hour)),
Issued: timestamppb.New(time.Now()),
Der: block.Bytes,
Expires: time.Now().Add(time.Hour),
Issued: time.Now(),
DER: block.Bytes,
}
names, problems := checker.checkCert(context.Background(), cert)
if !slices.Equal(names, []string{"quite_invalid.com", "al--so--wr--ong.com", "127.0.0.1"}) {
names, problems := checker.checkCert(context.Background(), cert, nil)
if !slices.Equal(names, []string{"quite_invalid.com", "al--so--wr--ong.com"}) {
t.Errorf("didn't get expected DNS names. other problems: %s", strings.Join(problems, "\n"))
}
}
@ -217,7 +212,7 @@ func TestCheckCert(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
testKey, _ := tc.key.genKey()
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
// Create a RFC 7633 OCSP Must Staple Extension.
// OID 1.3.6.1.5.5.7.1.24
@ -267,14 +262,14 @@ func TestCheckCert(t *testing.T) {
// Serial doesn't match
// Expiry doesn't match
// Issued doesn't match
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: "8485f2687eba29ad455ae4e31c8679206fec",
Der: brokenCertDer,
Issued: timestamppb.New(issued.Add(12 * time.Hour)),
Expires: timestamppb.New(goodExpiry.AddDate(0, 0, 2)), // Expiration doesn't match
DER: brokenCertDer,
Issued: issued.Add(12 * time.Hour),
Expires: goodExpiry.AddDate(0, 0, 2), // Expiration doesn't match
}
_, problems := checker.checkCert(context.Background(), cert)
_, problems := checker.checkCert(context.Background(), cert, nil)
problemsMap := map[string]int{
"Stored digest doesn't match certificate digest": 1,
@ -296,12 +291,12 @@ func TestCheckCert(t *testing.T) {
delete(problemsMap, p)
}
for k := range problemsMap {
t.Errorf("Expected problem but didn't find '%s' in problems: %q.", k, problems)
t.Errorf("Expected problem but didn't find it: '%s'.", k)
}
// Same settings as above, but the stored serial number in the DB is invalid.
cert.Serial = "not valid"
_, problems = checker.checkCert(context.Background(), cert)
_, problems = checker.checkCert(context.Background(), cert, nil)
foundInvalidSerialProblem := false
for _, p := range problems {
if p == "Stored serial is invalid" {
@ -323,10 +318,10 @@ func TestCheckCert(t *testing.T) {
test.AssertNotError(t, err, "Couldn't parse created certificate")
cert.Serial = core.SerialToString(serial)
cert.Digest = core.Fingerprint256(goodCertDer)
cert.Der = goodCertDer
cert.Expires = timestamppb.New(parsed.NotAfter)
cert.Issued = timestamppb.New(parsed.NotBefore)
_, problems = checker.checkCert(context.Background(), cert)
cert.DER = goodCertDer
cert.Expires = parsed.NotAfter
cert.Issued = parsed.NotBefore
_, problems = checker.checkCert(context.Background(), cert, nil)
test.AssertEquals(t, len(problems), 0)
})
}
@ -338,7 +333,7 @@ func TestGetAndProcessCerts(t *testing.T) {
fc := clock.NewFake()
fc.Set(fc.Now().Add(time.Hour))
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, blog.NewMock())
sa, err := sa.NewSQLStorageAuthority(saDbMap, saDbMap, nil, 1, 0, fc, blog.NewMock(), metrics.NoopRegisterer)
test.AssertNotError(t, err, "Couldn't create SA to insert certificates")
saCleanUp := test.ResetBoulderTestDatabase(t)
@ -346,7 +341,7 @@ func TestGetAndProcessCerts(t *testing.T) {
saCleanUp()
}()
testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
testKey, _ := rsa.GenerateKey(rand.Reader, 1024)
// Problems
// Expiry period is too long
rawCert := x509.Certificate{
@ -377,7 +372,7 @@ func TestGetAndProcessCerts(t *testing.T) {
test.AssertEquals(t, len(checker.certs), 5)
wg := new(sync.WaitGroup)
wg.Add(1)
checker.processCerts(context.Background(), wg, false)
checker.processCerts(context.Background(), wg, false, nil)
test.AssertEquals(t, checker.issuedReport.BadCerts, int64(5))
test.AssertEquals(t, len(checker.issuedReport.Entries), 5)
}
@ -401,6 +396,9 @@ func (db mismatchedCountDB) SelectNullInt(_ context.Context, _ string, _ ...inte
// `getCerts` then calls `Select` to retrieve the Certificate rows. We pull
// a dastardly switch-a-roo here and return an empty set
func (db mismatchedCountDB) Select(_ context.Context, output interface{}, _ string, _ ...interface{}) ([]interface{}, error) {
// But actually return nothing
outputPtr, _ := output.(*[]sa.CertWithID)
*outputPtr = []sa.CertWithID{}
return nil, nil
}
@ -429,7 +427,7 @@ func (db mismatchedCountDB) SelectOne(_ context.Context, _ interface{}, _ string
func TestGetCertsEmptyResults(t *testing.T) {
saDbMap, err := sa.DBMapForTest(vars.DBConnSA)
test.AssertNotError(t, err, "Couldn't connect to database")
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
checker.dbMap = mismatchedCountDB{}
batchSize = 3
@ -455,7 +453,7 @@ func (db emptyDB) SelectNullInt(_ context.Context, _ string, _ ...interface{}) (
// expected if the DB finds no certificates to match the SELECT query and
// should return an error.
func TestGetCertsNullResults(t *testing.T) {
checker := newChecker(emptyDB{}, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(emptyDB{}, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
err := checker.getCerts(context.Background())
test.AssertError(t, err, "Should have gotten error from empty DB")
@ -499,7 +497,7 @@ func TestGetCertsLate(t *testing.T) {
clk := clock.NewFake()
db := &lateDB{issuedTime: clk.Now().Add(-time.Hour)}
checkPeriod := 24 * time.Hour
checker := newChecker(db, clk, pa, kp, checkPeriod, testValidityDurations, nil, blog.NewMock())
checker := newChecker(db, clk, pa, kp, checkPeriod, testValidityDurations, blog.NewMock())
err := checker.getCerts(context.Background())
test.AssertNotError(t, err, "getting certs")
@ -584,22 +582,21 @@ func TestIgnoredLint(t *testing.T) {
err = loglist.InitLintList("../../test/ct-test-srv/log_list.json")
test.AssertNotError(t, err, "failed to load ct log list")
testKey, _ := rsa.GenerateKey(rand.Reader, 2048)
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
serial := big.NewInt(1337)
x509OID, err := x509.OIDFromInts([]uint64{1, 2, 3})
test.AssertNotError(t, err, "failed to create x509.OID")
template := &x509.Certificate{
Subject: pkix.Name{
CommonName: "CPU's Cool CA",
},
SerialNumber: serial,
NotBefore: time.Now(),
NotAfter: time.Now().Add(testValidityDuration - time.Second),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
Policies: []x509.OID{x509OID},
SerialNumber: serial,
NotBefore: time.Now(),
NotAfter: time.Now().Add(testValidityDuration - time.Second),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
PolicyIdentifiers: []asn1.ObjectIdentifier{
{1, 2, 3},
},
BasicConstraintsValid: true,
IsCA: true,
IssuingCertificateURL: []string{"http://aia.example.org"},
@ -626,46 +623,43 @@ func TestIgnoredLint(t *testing.T) {
subjectCert, err := x509.ParseCertificate(subjectCertDer)
test.AssertNotError(t, err, "failed to parse EE cert")
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: core.SerialToString(serial),
Der: subjectCertDer,
DER: subjectCertDer,
Digest: core.Fingerprint256(subjectCertDer),
Issued: timestamppb.New(subjectCert.NotBefore),
Expires: timestamppb.New(subjectCert.NotAfter),
Issued: subjectCert.NotBefore,
Expires: subjectCert.NotAfter,
}
// Without any ignored lints we expect several errors and warnings about SCTs,
// the common name, and the subject key identifier extension.
// Without any ignored lints we expect one error level result due to the
// missing OCSP url in the template.
expectedProblems := []string{
"zlint error: e_sub_cert_aia_does_not_contain_ocsp_url",
"zlint warn: w_subject_common_name_included",
"zlint warn: w_ext_subject_key_identifier_not_recommended_subscriber",
"zlint info: w_ct_sct_policy_count_unsatisfied Certificate had 0 embedded SCTs. Browser policy may require 2 for this certificate.",
"zlint error: e_scts_from_same_operator Certificate had too few embedded SCTs; browser policy requires 2.",
}
slices.Sort(expectedProblems)
sort.Strings(expectedProblems)
// Check the certificate with a nil ignore map. This should return the
// expected zlint problems.
_, problems := checker.checkCert(context.Background(), cert)
slices.Sort(problems)
_, problems := checker.checkCert(context.Background(), cert, nil)
sort.Strings(problems)
test.AssertDeepEquals(t, problems, expectedProblems)
// Check the certificate again with an ignore map that excludes the affected
// lints. This should return no problems.
lints, err := linter.NewRegistry([]string{
"w_subject_common_name_included",
"w_ext_subject_key_identifier_not_recommended_subscriber",
"w_ct_sct_policy_count_unsatisfied",
"e_scts_from_same_operator",
_, problems = checker.checkCert(context.Background(), cert, map[string]bool{
"e_sub_cert_aia_does_not_contain_ocsp_url": true,
"w_subject_common_name_included": true,
"w_ct_sct_policy_count_unsatisfied": true,
"e_scts_from_same_operator": true,
})
test.AssertNotError(t, err, "creating test lint registry")
checker.lints = lints
_, problems = checker.checkCert(context.Background(), cert)
test.AssertEquals(t, len(problems), 0)
}
func TestPrecertCorrespond(t *testing.T) {
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock())
checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, blog.NewMock())
checker.getPrecert = func(_ context.Context, _ string) ([]byte, error) {
return []byte("hello"), nil
}
@ -681,14 +675,14 @@ func TestPrecertCorrespond(t *testing.T) {
SerialNumber: serial,
}
certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey)
cert := &corepb.Certificate{
cert := core.Certificate{
Serial: core.SerialToString(serial),
Digest: core.Fingerprint256(certDer),
Der: certDer,
Issued: timestamppb.New(time.Now()),
Expires: timestamppb.New(expiry),
DER: certDer,
Issued: time.Now(),
Expires: expiry,
}
_, problems := checker.checkCert(context.Background(), cert)
_, problems := checker.checkCert(context.Background(), cert, nil)
if len(problems) == 0 {
t.Errorf("expected precert correspondence problem")
}

View File

@ -1,5 +1,5 @@
-----BEGIN CERTIFICATE-----
MIIDWTCCAkGgAwIBAgIILgLqdMwyzT4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
MIIDUzCCAjugAwIBAgIILgLqdMwyzT4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
AxMVbWluaWNhIHJvb3QgY2EgOTMzZTM5MB4XDTIxMTExMTIwMjMzMloXDTIzMTIx
MTIwMjMzMlowHDEaMBgGA1UEAwwRcXVpdGVfaW52YWxpZC5jb20wggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDi4jBbqMyvhMonDngNsvie9SHPB16mdpiy
@ -7,14 +7,14 @@ Y/agreU84xUz/roKK07TpVmeqvwWvDkvHTFov7ytKdnCY+z/NXKJ3hNqflWCwU7h
Uk9TmpBp0vg+5NvalYul/+bq/B4qDhEvTBzAX3k/UYzd0GQdMyAbwXtG41f5cSK6
cWTQYfJL3gGR5/KLoTz3/VemLgEgAP/CvgcUJPbQceQViiZ4opi9hFIfUqxX2NsD
49klw8cDFu/BG2LEC+XtbdT8XevD0aGIOuYVr+Pa2mxb2QCDXu4tXOsDXH9Y/Cmk
8103QbdB8Y+usOiHG/IXxK2q4J7QNPal4ER4/PGA06V0gwrjNH8BAgMBAAGjgZow
gZcwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD
8103QbdB8Y+usOiHG/IXxK2q4J7QNPal4ER4/PGA06V0gwrjNH8BAgMBAAGjgZQw
gZEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD
AjAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFNIcaCjv32YRafE065dZO57ONWuk
MDcGA1UdEQQwMC6CEXF1aXRlX2ludmFsaWQuY29tghNhbC0tc28tLXdyLS1vbmcu
Y29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAjSv0o5G4VuLnnwHON4P53bLvG
nYqaqYjuTEafi3hSgHAfBuhOQUVgwujoYpPp1w1fm5spfcbSwNNRte79HgV97kAu
Z4R4RHk15Xux1ITLalaHR/ilu002N0eJ7dFYawBgV2xMudULzohwmW2RjPJ5811i
WwtiVf1bA3V5SZJWSJll1BhANBs7R0pBbyTSNHR470N8TGG0jfXqgTKd0xZaH91H
rwEMo+96llbfp90Y5OfHIfym/N1sH2hVgd+ZAkhiVEiNBWZlbSyOgbZ1cCBvBXg6
TuwpQMZK9RWjlpni8yuzLGduPl8qHG1dqsUvbVqcG+WhHLbaZMNhiMfiWInL
MDEGA1UdEQQqMCiCEXF1aXRlX2ludmFsaWQuY29tghNhbC0tc28tLXdyLS1vbmcu
Y29tMA0GCSqGSIb3DQEBCwUAA4IBAQAjSv0o5G4VuLnnwHON4P53bLvGnYqaqYju
TEafi3hSgHAfBuhOQUVgwujoYpPp1w1fm5spfcbSwNNRte79HgV97kAuZ4R4RHk1
5Xux1ITLalaHR/ilu002N0eJ7dFYawBgV2xMudULzohwmW2RjPJ5811iWwtiVf1b
A3V5SZJWSJll1BhANBs7R0pBbyTSNHR470N8TGG0jfXqgTKd0xZaH91HrwEMo+96
llbfp90Y5OfHIfym/N1sH2hVgd+ZAkhiVEiNBWZlbSyOgbZ1cCBvBXg6TuwpQMZK
9RWjlpni8yuzLGduPl8qHG1dqsUvbVqcG+WhHLbaZMNhiMfiWInL
-----END CERTIFICATE-----

View File

@ -3,7 +3,6 @@ package cmd
import (
"crypto/tls"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"net"
@ -16,7 +15,6 @@ import (
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
)
// PasswordConfig contains a path to a file containing a password.
@ -89,8 +87,6 @@ func (d *DBConfig) URL() (string, error) {
return strings.TrimSpace(string(url)), err
}
// SMTPConfig is deprecated.
// TODO(#8199): Delete this when it is removed from bad-key-revoker's config.
type SMTPConfig struct {
PasswordConfig
Server string `validate:"required"`
@ -102,9 +98,8 @@ type SMTPConfig struct {
// database, what policies it should enforce, and what challenges
// it should offer.
type PAConfig struct {
DBConfig `validate:"-"`
Challenges map[core.AcmeChallenge]bool `validate:"omitempty,dive,keys,oneof=http-01 dns-01 tls-alpn-01,endkeys"`
Identifiers map[identifier.IdentifierType]bool `validate:"omitempty,dive,keys,oneof=dns ip,endkeys"`
DBConfig `validate:"-"`
Challenges map[core.AcmeChallenge]bool `validate:"omitempty,dive,keys,oneof=http-01 dns-01 tls-alpn-01,endkeys"`
}
// CheckChallenges checks whether the list of challenges in the PA config
@ -121,17 +116,6 @@ func (pc PAConfig) CheckChallenges() error {
return nil
}
// CheckIdentifiers checks whether the list of identifiers in the PA config
// actually contains valid identifier type names
func (pc PAConfig) CheckIdentifiers() error {
for i := range pc.Identifiers {
if !i.IsValid() {
return fmt.Errorf("invalid identifier type in PA config: %s", i)
}
}
return nil
}
// HostnamePolicyConfig specifies a file from which to load a policy regarding
// what hostnames to issue for.
type HostnamePolicyConfig struct {
@ -299,7 +283,7 @@ type GRPCClientConfig struct {
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig @10.77.77.10 -t SRV _foo._tcp.service.consul +short
// $ dig @10.55.55.10 -t SRV _foo._tcp.service.consul +short
// 1 1 8080 0a585858.addr.dc1.consul.
// 1 1 8080 0a4d4d4d.addr.dc1.consul.
SRVLookup *ServiceDomain `validate:"required_without_all=SRVLookups ServerAddress ServerIPAddresses"`
@ -339,7 +323,7 @@ type GRPCClientConfig struct {
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig A @10.77.77.10 foo.service.consul +short
// $ dig A @10.55.55.10 foo.service.consul +short
// 10.77.77.77
// 10.88.88.88
ServerAddress string `validate:"required_without_all=ServerIPAddresses SRVLookup SRVLookups,omitempty,hostname_port"`
@ -465,7 +449,7 @@ type GRPCServerConfig struct {
// These service names must match the service names advertised by gRPC itself,
// which are identical to the names set in our gRPC .proto files prefixed by
// the package names set in those files (e.g. "ca.CertificateAuthority").
Services map[string]*GRPCServiceConfig `json:"services" validate:"required,dive,required"`
Services map[string]GRPCServiceConfig `json:"services" validate:"required,dive,required"`
// MaxConnectionAge specifies how long a connection may live before the server sends a GoAway to the
// client. Because gRPC connections re-resolve DNS after a connection close,
// this controls how long it takes before a client learns about changes to its
@ -476,10 +460,10 @@ type GRPCServerConfig struct {
// GRPCServiceConfig contains the information needed to configure a gRPC service.
type GRPCServiceConfig struct {
// ClientNames is the list of accepted gRPC client certificate SANs.
// Connections from clients not in this list will be rejected by the
// upstream listener, and RPCs from unlisted clients will be denied by the
// server interceptor.
// PerServiceClientNames is a map of gRPC service names to client certificate
// SANs. The upstream listening server will reject connections from clients
// which do not appear in this list, and the server interceptor will reject
// RPC calls for this service from clients which are not listed here.
ClientNames []string `json:"clientNames" validate:"min=1,dive,hostname,required"`
}
@ -564,38 +548,33 @@ type DNSProvider struct {
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig @10.77.77.10 -t SRV _unbound._udp.service.consul +short
// $ dig @10.55.55.10 -t SRV _unbound._udp.service.consul +short
// 1 1 8053 0a4d4d4d.addr.dc1.consul.
// 1 1 8153 0a4d4d4d.addr.dc1.consul.
SRVLookup ServiceDomain `validate:"required"`
}
// HMACKeyConfig specifies a path to a file containing a hexadecimal-encoded
// HMAC key. The key must represent exactly 256 bits (32 bytes) of random data
// to be suitable for use as a 256-bit hashing key (e.g., the output of `openssl
// rand -hex 32`).
// HMACKeyConfig specifies a path to a file containing an HMAC key. The key must
// consist of 256 bits of random data to be suitable for use as a 256-bit
// hashing key (e.g., the output of `openssl rand -hex 32`).
type HMACKeyConfig struct {
KeyFile string `validate:"required"`
}
// Load reads the HMAC key from the file, decodes it from hexadecimal, ensures
// it represents exactly 256 bits (32 bytes), and returns it as a byte slice.
// Load loads the HMAC key from the file, ensures it is exactly 32 characters
// in length, and returns it as a byte slice.
func (hc *HMACKeyConfig) Load() ([]byte, error) {
contents, err := os.ReadFile(hc.KeyFile)
if err != nil {
return nil, err
}
trimmed := strings.TrimRight(string(contents), "\n")
decoded, err := hex.DecodeString(strings.TrimSpace(string(contents)))
if err != nil {
return nil, fmt.Errorf("invalid hexadecimal encoding: %w", err)
}
if len(decoded) != 32 {
if len(trimmed) != 32 {
return nil, fmt.Errorf(
"validating HMAC key, must be exactly 256 bits (32 bytes) after decoding, got %d",
len(decoded),
"validating unpauseHMACKey, length must be 32 alphanumeric characters, got %d",
len(trimmed),
)
}
return decoded, nil
return []byte(trimmed), nil
}

View File

@ -136,58 +136,3 @@ func TestTLSConfigLoad(t *testing.T) {
})
}
}
func TestHMACKeyConfigLoad(t *testing.T) {
t.Parallel()
tests := []struct {
name string
content string
expectedErr bool
}{
{
name: "Valid key",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
expectedErr: false,
},
{
name: "Empty file",
content: "",
expectedErr: true,
},
{
name: "Just under 256-bit",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab",
expectedErr: true,
},
{
name: "Just over 256-bit",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01",
expectedErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
tempKeyFile, err := os.CreateTemp("", "*")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(tempKeyFile.Name())
_, err = tempKeyFile.WriteString(tt.content)
if err != nil {
t.Fatalf("failed to write to temp file: %v", err)
}
tempKeyFile.Close()
hmacKeyConfig := HMACKeyConfig{KeyFile: tempKeyFile.Name()}
_, err = hmacKeyConfig.Load()
if (err != nil) != tt.expectedErr {
t.Errorf("expected error: %v, got: %v", tt.expectedErr, err)
}
})
}
}

View File

@ -0,0 +1,84 @@
# Contact-Auditor
Audits subscriber registrations for e-mail addresses that
`notify-mailer` is currently configured to skip.
# Usage:
```shell
-config string
File containing a JSON config.
-to-file
Write the audit results to a file.
-to-stdout
Print the audit results to stdout.
```
## Results format:
```
<id> <createdAt> <problem type> "<contact contents or entry>" "<error msg>"
```
## Example output:
### Successful run with no violations encountered and `--to-file`:
```
I004823 contact-auditor nfWK_gM Running contact-auditor
I004823 contact-auditor qJ_zsQ4 Beginning database query
I004823 contact-auditor je7V9QM Query completed successfully
I004823 contact-auditor 7LzGvQI Audit finished successfully
I004823 contact-auditor 5Pbk_QM Audit results were written to: audit-2006-01-02T15:04.tsv
```
### Contact contains entries that violate policy and `--to-stdout`:
```
I004823 contact-auditor nfWK_gM Running contact-auditor
I004823 contact-auditor qJ_zsQ4 Beginning database query
I004823 contact-auditor je7V9QM Query completed successfully
1 2006-01-02 15:04:05 validation "<contact entry>" "<error msg>"
...
I004823 contact-auditor 2fv7-QY Audit finished successfully
```
### Contact is not valid JSON and `--to-stdout`:
```
I004823 contact-auditor nfWK_gM Running contact-auditor
I004823 contact-auditor qJ_zsQ4 Beginning database query
I004823 contact-auditor je7V9QM Query completed successfully
3 2006-01-02 15:04:05 unmarshal "<contact contents>" "<error msg>"
...
I004823 contact-auditor 2fv7-QY Audit finished successfully
```
### Audit incomplete, query ended prematurely:
```
I004823 contact-auditor nfWK_gM Running contact-auditor
I004823 contact-auditor qJ_zsQ4 Beginning database query
...
E004823 contact-auditor 8LmTgww [AUDIT] Audit was interrupted, results may be incomplete: <error msg>
exit status 1
```
# Configuration file:
The path to a database config file like the one below must be provided
following the `-config` flag.
```json
{
"contactAuditor": {
"db": {
"dbConnectFile": <string>,
"maxOpenConns": <int>,
"maxIdleConns": <int>,
"connMaxLifetime": <int>,
"connMaxIdleTime": <int>
}
}
}
```

212
cmd/contact-auditor/main.go Normal file
View File

@ -0,0 +1,212 @@
package notmain
import (
"context"
"database/sql"
"encoding/json"
"errors"
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
)
type contactAuditor struct {
db *db.WrappedMap
resultsFile *os.File
writeToStdout bool
logger blog.Logger
}
type result struct {
id int64
contacts []string
createdAt string
}
func unmarshalContact(contact []byte) ([]string, error) {
var contacts []string
err := json.Unmarshal(contact, &contacts)
if err != nil {
return nil, err
}
return contacts, nil
}
func validateContacts(id int64, createdAt string, contacts []string) error {
// Setup a buffer to store any validation problems we encounter.
var probsBuff strings.Builder
// Helper to write validation problems to our buffer.
writeProb := func(contact string, prob string) {
// Add validation problem to buffer.
fmt.Fprintf(&probsBuff, "%d\t%s\tvalidation\t%q\t%q\t%q\n", id, createdAt, contact, prob, contacts)
}
for _, contact := range contacts {
if strings.HasPrefix(contact, "mailto:") {
err := policy.ValidEmail(strings.TrimPrefix(contact, "mailto:"))
if err != nil {
writeProb(contact, err.Error())
}
} else {
writeProb(contact, "missing 'mailto:' prefix")
}
}
if probsBuff.Len() != 0 {
return errors.New(probsBuff.String())
}
return nil
}
// beginAuditQuery executes the audit query and returns a cursor used to
// stream the results.
func (c contactAuditor) beginAuditQuery(ctx context.Context) (*sql.Rows, error) {
rows, err := c.db.QueryContext(ctx, `
SELECT DISTINCT id, contact, createdAt
FROM registrations
WHERE contact NOT IN ('[]', 'null');`)
if err != nil {
return nil, err
}
return rows, nil
}
func (c contactAuditor) writeResults(result string) {
if c.writeToStdout {
_, err := fmt.Print(result)
if err != nil {
c.logger.Errf("Error while writing result to stdout: %s", err)
}
}
if c.resultsFile != nil {
_, err := c.resultsFile.WriteString(result)
if err != nil {
c.logger.Errf("Error while writing result to file: %s", err)
}
}
}
// run retrieves a cursor from `beginAuditQuery` and then audits the
// `contact` column of all returned rows for abnormalities or policy
// violations.
func (c contactAuditor) run(ctx context.Context, resChan chan *result) error {
c.logger.Infof("Beginning database query")
rows, err := c.beginAuditQuery(ctx)
if err != nil {
return err
}
for rows.Next() {
var id int64
var contact []byte
var createdAt string
err := rows.Scan(&id, &contact, &createdAt)
if err != nil {
return err
}
contacts, err := unmarshalContact(contact)
if err != nil {
c.writeResults(fmt.Sprintf("%d\t%s\tunmarshal\t%q\t%q\n", id, createdAt, contact, err))
}
err = validateContacts(id, createdAt, contacts)
if err != nil {
c.writeResults(err.Error())
}
// Only used for testing.
if resChan != nil {
resChan <- &result{id, contacts, createdAt}
}
}
// Ensure the query wasn't interrupted before it could complete.
err = rows.Close()
if err != nil {
return err
} else {
c.logger.Info("Query completed successfully")
}
// Only used for testing.
if resChan != nil {
close(resChan)
}
return nil
}
type Config struct {
ContactAuditor struct {
DB cmd.DBConfig
}
}
func main() {
configFile := flag.String("config", "", "File containing a JSON config.")
writeToStdout := flag.Bool("to-stdout", false, "Print the audit results to stdout.")
writeToFile := flag.Bool("to-file", false, "Write the audit results to a file.")
flag.Parse()
logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7})
logger.Info(cmd.VersionString())
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
// Load config from JSON.
configData, err := os.ReadFile(*configFile)
cmd.FailOnError(err, fmt.Sprintf("Error reading config file: %q", *configFile))
var cfg Config
err = json.Unmarshal(configData, &cfg)
cmd.FailOnError(err, "Couldn't unmarshal config")
db, err := sa.InitWrappedDb(cfg.ContactAuditor.DB, nil, logger)
cmd.FailOnError(err, "Couldn't setup database client")
var resultsFile *os.File
if *writeToFile {
resultsFile, err = os.Create(
fmt.Sprintf("contact-audit-%s.tsv", time.Now().Format("2006-01-02T15:04")),
)
cmd.FailOnError(err, "Failed to create results file")
}
// Setup and run contact-auditor.
auditor := contactAuditor{
db: db,
resultsFile: resultsFile,
writeToStdout: *writeToStdout,
logger: logger,
}
logger.Info("Running contact-auditor")
err = auditor.run(context.TODO(), nil)
cmd.FailOnError(err, "Audit was interrupted, results may be incomplete")
logger.Info("Audit finished successfully")
if *writeToFile {
logger.Infof("Audit results were written to: %s", resultsFile.Name())
resultsFile.Close()
}
}
func init() {
cmd.RegisterCommand("contact-auditor", main, &cmd.ConfigValidator{Config: &Config{}})
}

View File

@ -0,0 +1,219 @@
package notmain
import (
"context"
"fmt"
"net"
"os"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
)
var (
regA *corepb.Registration
regB *corepb.Registration
regC *corepb.Registration
regD *corepb.Registration
)
const (
emailARaw = "test@example.com"
emailBRaw = "example@notexample.com"
emailCRaw = "test-example@notexample.com"
telNum = "666-666-7777"
)
func TestContactAuditor(t *testing.T) {
testCtx := setup(t)
defer testCtx.cleanUp()
// Add some test registrations.
testCtx.addRegistrations(t)
resChan := make(chan *result, 10)
err := testCtx.c.run(context.Background(), resChan)
test.AssertNotError(t, err, "received error")
// We should get back A, B, C, and D
test.AssertEquals(t, len(resChan), 4)
for entry := range resChan {
err := validateContacts(entry.id, entry.createdAt, entry.contacts)
switch entry.id {
case regA.Id:
// Contact validation policy sad path.
test.AssertDeepEquals(t, entry.contacts, []string{"mailto:test@example.com"})
test.AssertError(t, err, "failed to error on a contact that violates our e-mail policy")
case regB.Id:
// Ensure grace period was respected.
test.AssertDeepEquals(t, entry.contacts, []string{"mailto:example@notexample.com"})
test.AssertNotError(t, err, "received error for a valid contact entry")
case regC.Id:
// Contact validation happy path.
test.AssertDeepEquals(t, entry.contacts, []string{"mailto:test-example@notexample.com"})
test.AssertNotError(t, err, "received error for a valid contact entry")
// Unmarshal Contact sad path.
_, err := unmarshalContact([]byte("[ mailto:test@example.com ]"))
test.AssertError(t, err, "failed to error while unmarshaling invalid Contact JSON")
// Fix our JSON and ensure that the contact field returns
// errors for our 2 additional contacts
contacts, err := unmarshalContact([]byte(`[ "mailto:test@example.com", "tel:666-666-7777" ]`))
test.AssertNotError(t, err, "received error while unmarshaling valid Contact JSON")
// Ensure Contact validation now fails.
err = validateContacts(entry.id, entry.createdAt, contacts)
test.AssertError(t, err, "failed to error on 2 invalid Contact entries")
case regD.Id:
test.AssertDeepEquals(t, entry.contacts, []string{"tel:666-666-7777"})
test.AssertError(t, err, "failed to error on an invalid contact entry")
default:
t.Errorf("ID: %d was not expected", entry.id)
}
}
// Load results file.
data, err := os.ReadFile(testCtx.c.resultsFile.Name())
if err != nil {
t.Error(err)
}
// Results file should contain 2 newlines, 1 for each result.
contentLines := strings.Split(strings.TrimRight(string(data), "\n"), "\n")
test.AssertEquals(t, len(contentLines), 2)
// Each result entry should contain six tab separated columns.
for _, line := range contentLines {
test.AssertEquals(t, len(strings.Split(line, "\t")), 6)
}
}
type testCtx struct {
c contactAuditor
dbMap *db.WrappedMap
ssa *sa.SQLStorageAuthority
cleanUp func()
}
func (tc testCtx) addRegistrations(t *testing.T) {
emailA := "mailto:" + emailARaw
emailB := "mailto:" + emailBRaw
emailC := "mailto:" + emailCRaw
tel := "tel:" + telNum
// Every registration needs a unique JOSE key
jsonKeyA := []byte(`{
"kty":"RSA",
"n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB"
}`)
jsonKeyB := []byte(`{
"kty":"RSA",
"n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w",
"e":"AAEAAQ"
}`)
jsonKeyC := []byte(`{
"kty":"RSA",
"n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ",
"e":"AQAB"
}`)
jsonKeyD := []byte(`{
"kty":"RSA",
"n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-FCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ",
"e":"AQAB"
}`)
initialIP, err := net.ParseIP("127.0.0.1").MarshalText()
test.AssertNotError(t, err, "Couldn't create initialIP")
regA = &corepb.Registration{
Id: 1,
Contact: []string{emailA},
Key: jsonKeyA,
InitialIP: initialIP,
}
regB = &corepb.Registration{
Id: 2,
Contact: []string{emailB},
Key: jsonKeyB,
InitialIP: initialIP,
}
regC = &corepb.Registration{
Id: 3,
Contact: []string{emailC},
Key: jsonKeyC,
InitialIP: initialIP,
}
// Reg D has a `tel:` contact ACME URL
regD = &corepb.Registration{
Id: 4,
Contact: []string{tel},
Key: jsonKeyD,
InitialIP: initialIP,
}
// Add the four test registrations
ctx := context.Background()
regA, err = tc.ssa.NewRegistration(ctx, regA)
test.AssertNotError(t, err, "Couldn't store regA")
regB, err = tc.ssa.NewRegistration(ctx, regB)
test.AssertNotError(t, err, "Couldn't store regB")
regC, err = tc.ssa.NewRegistration(ctx, regC)
test.AssertNotError(t, err, "Couldn't store regC")
regD, err = tc.ssa.NewRegistration(ctx, regD)
test.AssertNotError(t, err, "Couldn't store regD")
}
func setup(t *testing.T) testCtx {
log := blog.UseMock()
// Using DBConnSAFullPerms to be able to insert registrations and
// certificates
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
if err != nil {
t.Fatalf("Couldn't connect to the database: %s", err)
}
// Make temp results file
file, err := os.CreateTemp("", fmt.Sprintf("audit-%s", time.Now().Format("2006-01-02T15:04")))
if err != nil {
t.Fatal(err)
}
cleanUp := func() {
test.ResetBoulderTestDatabase(t)
file.Close()
os.Remove(file.Name())
}
db, err := sa.DBMapForTest(vars.DBConnSAMailer)
if err != nil {
t.Fatalf("Couldn't connect to the database: %s", err)
}
ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, 1, 0, clock.New(), log, metrics.NoopRegisterer)
if err != nil {
t.Fatalf("unable to create SQLStorageAuthority: %s", err)
}
return testCtx{
c: contactAuditor{
db: db,
resultsFile: file,
logger: blog.NewMock(),
},
dbMap: dbMap,
ssa: ssa,
cleanUp: cleanUp,
}
}

View File

@ -56,12 +56,33 @@ type Config struct {
// recovering from an outage to ensure continuity of coverage.
LookbackPeriod config.Duration `validate:"-"`
// CertificateLifetime is the validity period (usually expressed in hours,
// like "2160h") of the longest-lived currently-unexpired certificate. For
// Let's Encrypt, this is usually ninety days. If the validity period of
// the issued certificates ever changes upwards, this value must be updated
// immediately; if the validity period of the issued certificates ever
// changes downwards, the value must not change until after all certificates with
// the old validity period have expired.
// Deprecated: This config value is no longer used.
// TODO(#6438): Remove this value.
CertificateLifetime config.Duration `validate:"-"`
// UpdatePeriod controls how frequently the crl-updater runs and publishes
// new versions of every CRL shard. The Baseline Requirements, Section 4.9.7:
// "MUST update and publish a new CRL within twentyfour (24) hours after
// recording a Certificate as revoked."
// new versions of every CRL shard. The Baseline Requirements, Section 4.9.7
// state that this MUST NOT be more than 7 days. We believe that future
// updates may require that this not be more than 24 hours, and currently
// recommend an UpdatePeriod of 6 hours.
UpdatePeriod config.Duration
// UpdateOffset controls the times at which crl-updater runs, to avoid
// scheduling the batch job at exactly midnight. The updater runs every
// UpdatePeriod, starting from the Unix Epoch plus UpdateOffset, and
// continuing forward into the future forever. This value must be strictly
// less than the UpdatePeriod.
// Deprecated: This config value is not relevant with continuous updating.
// TODO(#7023): Remove this value.
UpdateOffset config.Duration `validate:"-"`
// UpdateTimeout controls how long a single CRL shard is allowed to attempt
// to update before being timed out. The total CRL updating process may take
// significantly longer, since a full update cycle may consist of updating
@ -70,19 +91,6 @@ type Config struct {
// of magnitude greater than our p99 update latency.
UpdateTimeout config.Duration `validate:"-"`
// TemporallyShardedSerialPrefixes is a list of prefixes that were used to
// issue certificates with no CRLDistributionPoints extension, and which are
// therefore temporally sharded. If it's non-empty, the CRL Updater will
// require matching serials when querying by temporal shard. When querying
// by explicit shard, any prefix is allowed.
//
// This should be set to the current set of serial prefixes in production.
// When deploying explicit sharding (i.e. the CRLDistributionPoints extension),
// the CAs should be configured with a new set of serial prefixes that haven't
// been used before (and the OCSP Responder config should be updated to
// recognize the new prefixes as well as the old ones).
TemporallyShardedSerialPrefixes []string
// MaxParallelism controls how many workers may be running in parallel.
// A higher value reduces the total time necessary to update all CRL shards
// that this updater is responsible for, but also increases the memory used
@ -95,37 +103,6 @@ type Config struct {
// load of said run. The default is 1.
MaxAttempts int `validate:"omitempty,min=1"`
// ExpiresMargin adds a small increment to the CRL's HTTP Expires time.
//
// When uploading a CRL, its Expires field in S3 is set to the expected time
// the next CRL will be uploaded (by this instance). That allows our CDN
// instances to cache for that long. However, since the next update might be
// slow or delayed, we add a margin of error.
//
// Tradeoffs: A large ExpiresMargin reduces the chance that a CRL becomes
// uncacheable and floods S3 with traffic (which might result in 503s while
// S3 scales out).
//
// A small ExpiresMargin means revocations become visible sooner, including
// admin-invoked revocations that may have a time requirement.
ExpiresMargin config.Duration
// CacheControl is a string passed verbatim to the crl-storer to store on
// the S3 object.
//
// Note: if this header contains max-age, it will override
// Expires. https://www.rfc-editor.org/rfc/rfc9111.html#name-calculating-freshness-lifet
// Cache-Control: max-age has the disadvantage that it caches for a fixed
// amount of time, regardless of how close the CRL is to replacement. So
// if max-age is used, the worst-case time for a revocation to become visible
// is UpdatePeriod + the value of max age.
//
// The stale-if-error and stale-while-revalidate headers may be useful here:
// https://aws.amazon.com/about-aws/whats-new/2023/05/amazon-cloudfront-stale-while-revalidate-stale-if-error-cache-control-directives/
//
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
Features features.Config
}
@ -199,9 +176,6 @@ func main() {
c.CRLUpdater.UpdateTimeout.Duration,
c.CRLUpdater.MaxParallelism,
c.CRLUpdater.MaxAttempts,
c.CRLUpdater.CacheControl,
c.CRLUpdater.ExpiresMargin.Duration,
c.CRLUpdater.TemporallyShardedSerialPrefixes,
sac,
cac,
csc,

View File

@ -1,130 +0,0 @@
package notmain
import (
"context"
"flag"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/email"
emailpb "github.com/letsencrypt/boulder/email/proto"
bgrpc "github.com/letsencrypt/boulder/grpc"
)
// Config holds the configuration for the email-exporter service.
type Config struct {
EmailExporter struct {
cmd.ServiceConfig
// PerDayLimit enforces the daily request limit imposed by the Pardot
// API. The total daily limit, which varies based on the Salesforce
// Pardot subscription tier, must be distributed among all
// email-exporter instances. For more information, see:
// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#daily-requests-limits
PerDayLimit float64 `validate:"required,min=1"`
// MaxConcurrentRequests enforces the concurrent request limit imposed
// by the Pardot API. This limit must be distributed among all
// email-exporter instances and be proportional to each instance's
// PerDayLimit. For example, if the total daily limit is 50,000 and one
// instance is assigned 40% (20,000 requests), it should also receive
// 40% of the max concurrent requests (2 out of 5). For more
// information, see:
// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#concurrent-requests
MaxConcurrentRequests int `validate:"required,min=1,max=5"`
// PardotBusinessUnit is the Pardot business unit to use.
PardotBusinessUnit string `validate:"required"`
// ClientId is the OAuth API client ID provided by Salesforce.
ClientId cmd.PasswordConfig
// ClientSecret is the OAuth API client secret provided by Salesforce.
ClientSecret cmd.PasswordConfig
// SalesforceBaseURL is the base URL for the Salesforce API. (e.g.,
// "https://login.salesforce.com")
SalesforceBaseURL string `validate:"required"`
// PardotBaseURL is the base URL for the Pardot API. (e.g.,
// "https://pi.pardot.com")
PardotBaseURL string `validate:"required"`
// EmailCacheSize controls how many hashed email addresses are retained
// in memory to prevent duplicates from being sent to the Pardot API.
// Each entry consumes ~120 bytes, so 100,000 entries uses around 12MB
// of memory. If left unset, no caching is performed.
EmailCacheSize int `validate:"omitempty,min=1"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
configFile := flag.String("config", "", "Path to configuration file")
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *grpcAddr != "" {
c.EmailExporter.ServiceConfig.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.EmailExporter.ServiceConfig.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.EmailExporter.ServiceConfig.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
clientId, err := c.EmailExporter.ClientId.Pass()
cmd.FailOnError(err, "Loading clientId")
clientSecret, err := c.EmailExporter.ClientSecret.Pass()
cmd.FailOnError(err, "Loading clientSecret")
var cache *email.EmailCache
if c.EmailExporter.EmailCacheSize > 0 {
cache = email.NewHashedEmailCache(c.EmailExporter.EmailCacheSize, scope)
}
pardotClient, err := email.NewPardotClientImpl(
clk,
c.EmailExporter.PardotBusinessUnit,
clientId,
clientSecret,
c.EmailExporter.SalesforceBaseURL,
c.EmailExporter.PardotBaseURL,
)
cmd.FailOnError(err, "Creating Pardot API client")
exporterServer := email.NewExporterImpl(pardotClient, cache, c.EmailExporter.PerDayLimit, c.EmailExporter.MaxConcurrentRequests, scope, logger)
tlsConfig, err := c.EmailExporter.TLS.Load(scope)
cmd.FailOnError(err, "Loading email-exporter TLS config")
daemonCtx, shutdownExporterServer := context.WithCancel(context.Background())
go exporterServer.Start(daemonCtx)
start, err := bgrpc.NewServer(c.EmailExporter.GRPC, logger).Add(
&emailpb.Exporter_ServiceDesc, exporterServer).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Configuring email-exporter gRPC server")
err = start()
shutdownExporterServer()
exporterServer.Drain()
cmd.FailOnError(err, "email-exporter gRPC service failed to start")
}
func init() {
cmd.RegisterCommand("email-exporter", main, &cmd.ConfigValidator{Config: &Config{}})
}

View File

@ -0,0 +1,968 @@
package notmain
import (
"bytes"
"context"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"math"
netmail "net/mail"
"net/url"
"os"
"sort"
"strings"
"sync"
"text/template"
"time"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
blog "github.com/letsencrypt/boulder/log"
bmail "github.com/letsencrypt/boulder/mail"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
const (
defaultExpirationSubject = "Let's Encrypt certificate expiration notice for domain {{.ExpirationSubject}}"
)
var (
errNoValidEmail = errors.New("no usable contact address")
)
type regStore interface {
GetRegistration(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error)
}
// limiter tracks how many mails we've sent to a given address in a given day.
// Note that this does not track mails across restarts of the process.
// Modifications to `counts` and `currentDay` are protected by a mutex.
type limiter struct {
sync.RWMutex
// currentDay is a day in UTC, truncated to 24 hours. When the current
// time is more than 24 hours past this date, all counts reset and this
// date is updated.
currentDay time.Time
// counts is a map from address to number of mails we have attempted to
// send during `currentDay`.
counts map[string]int
// limit is the number of sends after which we'll return an error from
// check()
limit int
clk clock.Clock
}
const oneDay = 24 * time.Hour
// maybeBumpDay updates lim.currentDay if its current value is more than 24
// hours ago, and resets the counts map. Expects limiter is locked.
func (lim *limiter) maybeBumpDay() {
today := lim.clk.Now().Truncate(oneDay)
if (today.Sub(lim.currentDay) >= oneDay && len(lim.counts) > 0) ||
lim.counts == nil {
// Throw away counts so far and switch to a new day.
// This also does the initialization of counts and currentDay the first
// time inc() is called.
lim.counts = make(map[string]int)
lim.currentDay = today
}
}
// inc increments the count for the current day, and cleans up previous days
// if needed.
func (lim *limiter) inc(address string) {
lim.Lock()
defer lim.Unlock()
lim.maybeBumpDay()
lim.counts[address] += 1
}
// check checks whether the count for the given address is at the limit,
// and returns an error if so.
func (lim *limiter) check(address string) error {
lim.RLock()
defer lim.RUnlock()
lim.maybeBumpDay()
if lim.counts[address] >= lim.limit {
return fmt.Errorf("daily mail limit exceeded for %q", address)
}
return nil
}
type mailer struct {
log blog.Logger
dbMap *db.WrappedMap
rs regStore
mailer bmail.Mailer
emailTemplate *template.Template
subjectTemplate *template.Template
nagTimes []time.Duration
parallelSends uint
certificatesPerTick int
// addressLimiter limits how many mails we'll send to a single address in
// a single day.
addressLimiter *limiter
// Maximum number of rows to update in a single SQL UPDATE statement.
updateChunkSize int
clk clock.Clock
stats mailerStats
}
type certDERWithRegID struct {
DER core.CertDER
RegID int64
}
type mailerStats struct {
sendDelay *prometheus.GaugeVec
sendDelayHistogram *prometheus.HistogramVec
nagsAtCapacity *prometheus.GaugeVec
errorCount *prometheus.CounterVec
sendLatency prometheus.Histogram
processingLatency prometheus.Histogram
certificatesExamined prometheus.Counter
certificatesAlreadyRenewed prometheus.Counter
certificatesPerAccountNeedingMail prometheus.Histogram
}
func (m *mailer) sendNags(conn bmail.Conn, contacts []string, certs []*x509.Certificate) error {
if len(certs) == 0 {
return errors.New("no certs given to send nags for")
}
emails := []string{}
for _, contact := range contacts {
parsed, err := url.Parse(contact)
if err != nil {
m.log.Errf("parsing contact email %s: %s", contact, err)
continue
}
if parsed.Scheme != "mailto" {
continue
}
address := parsed.Opaque
err = policy.ValidEmail(address)
if err != nil {
m.log.Debugf("skipping invalid email %q: %s", address, err)
continue
}
err = m.addressLimiter.check(address)
if err != nil {
m.log.Infof("not sending mail: %s", err)
continue
}
m.addressLimiter.inc(address)
emails = append(emails, parsed.Opaque)
}
if len(emails) == 0 {
return errNoValidEmail
}
expiresIn := time.Duration(math.MaxInt64)
expDate := m.clk.Now()
domains := []string{}
serials := []string{}
// Pick out the expiration date that is closest to being hit.
for _, cert := range certs {
domains = append(domains, cert.DNSNames...)
serials = append(serials, core.SerialToString(cert.SerialNumber))
possible := cert.NotAfter.Sub(m.clk.Now())
if possible < expiresIn {
expiresIn = possible
expDate = cert.NotAfter
}
}
domains = core.UniqueLowerNames(domains)
sort.Strings(domains)
const maxSerials = 100
truncatedSerials := serials
if len(truncatedSerials) > maxSerials {
truncatedSerials = serials[0:maxSerials]
}
const maxDomains = 100
truncatedDomains := domains
if len(truncatedDomains) > maxDomains {
truncatedDomains = domains[0:maxDomains]
}
// Construct the information about the expiring certificates for use in the
// subject template
expiringSubject := fmt.Sprintf("%q", domains[0])
if len(domains) > 1 {
expiringSubject += fmt.Sprintf(" (and %d more)", len(domains)-1)
}
// Execute the subjectTemplate by filling in the ExpirationSubject
subjBuf := new(bytes.Buffer)
err := m.subjectTemplate.Execute(subjBuf, struct {
ExpirationSubject string
}{
ExpirationSubject: expiringSubject,
})
if err != nil {
m.stats.errorCount.With(prometheus.Labels{"type": "SubjectTemplateFailure"}).Inc()
return err
}
email := struct {
ExpirationDate string
DaysToExpiration int
DNSNames string
TruncatedDNSNames string
NumDNSNamesOmitted int
}{
ExpirationDate: expDate.UTC().Format(time.DateOnly),
DaysToExpiration: int(expiresIn.Hours() / 24),
DNSNames: strings.Join(domains, "\n"),
TruncatedDNSNames: strings.Join(truncatedDomains, "\n"),
NumDNSNamesOmitted: len(domains) - len(truncatedDomains),
}
msgBuf := new(bytes.Buffer)
err = m.emailTemplate.Execute(msgBuf, email)
if err != nil {
m.stats.errorCount.With(prometheus.Labels{"type": "TemplateFailure"}).Inc()
return err
}
logItem := struct {
Rcpt []string
DaysToExpiration int
TruncatedDNSNames []string
TruncatedSerials []string
}{
Rcpt: emails,
DaysToExpiration: email.DaysToExpiration,
TruncatedDNSNames: truncatedDomains,
TruncatedSerials: truncatedSerials,
}
logStr, err := json.Marshal(logItem)
if err != nil {
m.log.Errf("logItem could not be serialized to JSON. Raw: %+v", logItem)
return err
}
m.log.Infof("attempting send JSON=%s", string(logStr))
startSending := m.clk.Now()
err = conn.SendMail(emails, subjBuf.String(), msgBuf.String())
if err != nil {
m.log.Errf("failed send JSON=%s err=%s", string(logStr), err)
return err
}
finishSending := m.clk.Now()
elapsed := finishSending.Sub(startSending)
m.stats.sendLatency.Observe(elapsed.Seconds())
return nil
}
// updateLastNagTimestamps updates the lastExpirationNagSent column for every cert in
// the given list. Even though it can encounter errors, it only logs them and
// does not return them, because we always prefer to simply continue.
func (m *mailer) updateLastNagTimestamps(ctx context.Context, certs []*x509.Certificate) {
for len(certs) > 0 {
size := len(certs)
if m.updateChunkSize > 0 && size > m.updateChunkSize {
size = m.updateChunkSize
}
chunk := certs[0:size]
certs = certs[size:]
m.updateLastNagTimestampsChunk(ctx, chunk)
}
}
// updateLastNagTimestampsChunk processes a single chunk (up to 65k) of certificates.
func (m *mailer) updateLastNagTimestampsChunk(ctx context.Context, certs []*x509.Certificate) {
params := make([]interface{}, len(certs)+1)
for i, cert := range certs {
params[i+1] = core.SerialToString(cert.SerialNumber)
}
query := fmt.Sprintf(
"UPDATE certificateStatus SET lastExpirationNagSent = ? WHERE serial IN (%s)",
db.QuestionMarks(len(certs)),
)
params[0] = m.clk.Now()
_, err := m.dbMap.ExecContext(ctx, query, params...)
if err != nil {
m.log.AuditErrf("Error updating certificate status for %d certs: %s", len(certs), err)
m.stats.errorCount.With(prometheus.Labels{"type": "UpdateCertificateStatus"}).Inc()
}
}
func (m *mailer) certIsRenewed(ctx context.Context, names []string, issued time.Time) (bool, error) {
namehash := core.HashNames(names)
var present bool
err := m.dbMap.SelectOne(
ctx,
&present,
`SELECT EXISTS (SELECT id FROM fqdnSets WHERE setHash = ? AND issued > ? LIMIT 1)`,
namehash,
issued,
)
return present, err
}
type work struct {
regID int64
certDERs []core.CertDER
}
func (m *mailer) processCerts(
ctx context.Context,
allCerts []certDERWithRegID,
expiresIn time.Duration,
) error {
regIDToCertDERs := make(map[int64][]core.CertDER)
for _, cert := range allCerts {
cs := regIDToCertDERs[cert.RegID]
cs = append(cs, cert.DER)
regIDToCertDERs[cert.RegID] = cs
}
parallelSends := m.parallelSends
if parallelSends == 0 {
parallelSends = 1
}
var wg sync.WaitGroup
workChan := make(chan work, len(regIDToCertDERs))
// Populate the work chan on a goroutine so work is available as soon
// as one of the sender routines starts.
go func(ch chan<- work) {
for regID, certs := range regIDToCertDERs {
ch <- work{regID, certs}
}
close(workChan)
}(workChan)
for senderNum := uint(0); senderNum < parallelSends; senderNum++ {
// For politeness' sake, don't open more than 1 new connection per
// second.
if senderNum > 0 {
time.Sleep(time.Second)
}
if ctx.Err() != nil {
return ctx.Err()
}
conn, err := m.mailer.Connect()
if err != nil {
m.log.AuditErrf("connecting parallel sender %d: %s", senderNum, err)
return err
}
wg.Add(1)
go func(conn bmail.Conn, ch <-chan work) {
defer wg.Done()
for w := range ch {
err := m.sendToOneRegID(ctx, conn, w.regID, w.certDERs, expiresIn)
if err != nil {
m.log.AuditErr(err.Error())
}
}
conn.Close()
}(conn, workChan)
}
wg.Wait()
return nil
}
func (m *mailer) sendToOneRegID(ctx context.Context, conn bmail.Conn, regID int64, certDERs []core.CertDER, expiresIn time.Duration) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(certDERs) == 0 {
return errors.New("shouldn't happen: empty certificate list in sendToOneRegID")
}
reg, err := m.rs.GetRegistration(ctx, &sapb.RegistrationID{Id: regID})
if err != nil {
m.stats.errorCount.With(prometheus.Labels{"type": "GetRegistration"}).Inc()
return fmt.Errorf("Error fetching registration %d: %s", regID, err)
}
parsedCerts := []*x509.Certificate{}
for i, certDER := range certDERs {
if ctx.Err() != nil {
return ctx.Err()
}
parsedCert, err := x509.ParseCertificate(certDER)
if err != nil {
// TODO(#1420): tell registration about this error
m.log.AuditErrf("Error parsing certificate: %s. Body: %x", err, certDER)
m.stats.errorCount.With(prometheus.Labels{"type": "ParseCertificate"}).Inc()
continue
}
// The histogram version of send delay reports the worst case send delay for
// a single regID in this cycle.
if i == 0 {
sendDelay := expiresIn - parsedCert.NotAfter.Sub(m.clk.Now())
m.stats.sendDelayHistogram.With(prometheus.Labels{"nag_group": expiresIn.String()}).Observe(
sendDelay.Truncate(time.Second).Seconds())
}
renewed, err := m.certIsRenewed(ctx, parsedCert.DNSNames, parsedCert.NotBefore)
if err != nil {
m.log.AuditErrf("expiration-mailer: error fetching renewal state: %v", err)
// assume not renewed
} else if renewed {
m.log.Debugf("Cert %s is already renewed", core.SerialToString(parsedCert.SerialNumber))
m.stats.certificatesAlreadyRenewed.Add(1)
m.updateLastNagTimestamps(ctx, []*x509.Certificate{parsedCert})
continue
}
parsedCerts = append(parsedCerts, parsedCert)
}
m.stats.certificatesPerAccountNeedingMail.Observe(float64(len(parsedCerts)))
if len(parsedCerts) == 0 {
// all certificates are renewed
return nil
}
err = m.sendNags(conn, reg.Contact, parsedCerts)
if err != nil {
// If the error was due to the address(es) being unusable or the mail being
// undeliverable, we don't want to try again later.
var badAddrErr *bmail.BadAddressSMTPError
if errors.Is(err, errNoValidEmail) || errors.As(err, &badAddrErr) {
m.updateLastNagTimestamps(ctx, parsedCerts)
// Some accounts have no email; some accounts have an invalid email.
// Treat those as non-error cases.
return nil
}
m.stats.errorCount.With(prometheus.Labels{"type": "SendNags"}).Inc()
return fmt.Errorf("sending nag emails: %s", err)
}
m.updateLastNagTimestamps(ctx, parsedCerts)
return nil
}
// findExpiringCertificates finds certificates that might need an expiration mail, filters them,
// groups by account, sends mail, and updates their status in the DB so we don't examine them again.
//
// Invariant: findExpiringCertificates should examine each certificate at most N times, where
// N is the number of reminders. For every certificate examined (barring errors), this function
// should update the lastExpirationNagSent field of certificateStatus, so it does not need to
// examine the same certificate again on the next go-round. This ensures we make forward progress
// and don't clog up the window of certificates to be examined.
func (m *mailer) findExpiringCertificates(ctx context.Context) error {
now := m.clk.Now()
// E.g. m.nagTimes = [2, 4, 8, 15] days from expiration
for i, expiresIn := range m.nagTimes {
left := now
if i > 0 {
left = left.Add(m.nagTimes[i-1])
}
right := now.Add(expiresIn)
m.log.Infof("expiration-mailer: Searching for certificates that expire between %s and %s and had last nag >%s before expiry",
left.UTC(), right.UTC(), expiresIn)
var certs []certDERWithRegID
var err error
if features.Get().ExpirationMailerUsesJoin {
certs, err = m.getCertsWithJoin(ctx, left, right, expiresIn)
} else {
certs, err = m.getCerts(ctx, left, right, expiresIn)
}
if err != nil {
return err
}
m.stats.certificatesExamined.Add(float64(len(certs)))
// If the number of rows was exactly `m.certificatesPerTick` rows we need to increment
// a stat indicating that this nag group is at capacity. If this condition
// continually occurs across mailer runs then we will not catch up,
// resulting in under-sending expiration mails. The effects of this
// were initially described in issue #2002[0].
//
// 0: https://github.com/letsencrypt/boulder/issues/2002
atCapacity := float64(0)
if len(certs) == m.certificatesPerTick {
m.log.Infof("nag group %s expiring certificates at configured capacity (select limit %d)",
expiresIn.String(), m.certificatesPerTick)
atCapacity = float64(1)
}
m.stats.nagsAtCapacity.With(prometheus.Labels{"nag_group": expiresIn.String()}).Set(atCapacity)
m.log.Infof("Found %d certificates expiring between %s and %s", len(certs),
left.Format(time.DateTime), right.Format(time.DateTime))
if len(certs) == 0 {
continue // nothing to do
}
processingStarted := m.clk.Now()
err = m.processCerts(ctx, certs, expiresIn)
if err != nil {
m.log.AuditErr(err.Error())
}
processingEnded := m.clk.Now()
elapsed := processingEnded.Sub(processingStarted)
m.stats.processingLatency.Observe(elapsed.Seconds())
}
return nil
}
func (m *mailer) getCertsWithJoin(ctx context.Context, left, right time.Time, expiresIn time.Duration) ([]certDERWithRegID, error) {
// First we do a query on the certificateStatus table to find certificates
// nearing expiry meeting our criteria for email notification. We later
// sequentially fetch the certificate details. This avoids an expensive
// JOIN.
var certs []certDERWithRegID
_, err := m.dbMap.Select(
ctx,
&certs,
`SELECT
cert.der as der, cert.registrationID as regID
FROM certificateStatus AS cs
JOIN certificates as cert
ON cs.serial = cert.serial
AND cs.notAfter > :cutoffA
AND cs.notAfter <= :cutoffB
AND cs.status != "revoked"
AND COALESCE(TIMESTAMPDIFF(SECOND, cs.lastExpirationNagSent, cs.notAfter) > :nagCutoff, 1)
ORDER BY cs.notAfter ASC
LIMIT :certificatesPerTick`,
map[string]interface{}{
"cutoffA": left,
"cutoffB": right,
"nagCutoff": expiresIn.Seconds(),
"certificatesPerTick": m.certificatesPerTick,
},
)
if err != nil {
m.log.AuditErrf("expiration-mailer: Error loading certificate serials: %s", err)
return nil, err
}
m.log.Debugf("found %d certificates", len(certs))
return certs, nil
}
func (m *mailer) getCerts(ctx context.Context, left, right time.Time, expiresIn time.Duration) ([]certDERWithRegID, error) {
// First we do a query on the certificateStatus table to find certificates
// nearing expiry meeting our criteria for email notification. We later
// sequentially fetch the certificate details. This avoids an expensive
// JOIN.
var serials []string
_, err := m.dbMap.Select(
ctx,
&serials,
`SELECT
cs.serial
FROM certificateStatus AS cs
WHERE cs.notAfter > :cutoffA
AND cs.notAfter <= :cutoffB
AND cs.status != "revoked"
AND COALESCE(TIMESTAMPDIFF(SECOND, cs.lastExpirationNagSent, cs.notAfter) > :nagCutoff, 1)
ORDER BY cs.notAfter ASC
LIMIT :certificatesPerTick`,
map[string]interface{}{
"cutoffA": left,
"cutoffB": right,
"nagCutoff": expiresIn.Seconds(),
"certificatesPerTick": m.certificatesPerTick,
},
)
if err != nil {
m.log.AuditErrf("expiration-mailer: Error loading certificate serials: %s", err)
return nil, err
}
m.log.Debugf("found %d certificates", len(serials))
// Now we can sequentially retrieve the certificate details for each of the
// certificate status rows
var certs []certDERWithRegID
for i, serial := range serials {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var cert core.Certificate
cert, err := sa.SelectCertificate(ctx, m.dbMap, serial)
if err != nil {
// We can get a NoRowsErr when processing a serial number corresponding
// to a precertificate with no final certificate. Since this certificate
// is not being used by a subscriber, we don't send expiration email about
// it.
if db.IsNoRows(err) {
m.log.Infof("no rows for serial %q", serial)
continue
}
m.log.AuditErrf("expiration-mailer: Error loading cert %q: %s", cert.Serial, err)
continue
}
certs = append(certs, certDERWithRegID{
DER: cert.DER,
RegID: cert.RegistrationID,
})
if i == 0 {
// Report the send delay metric. Note: this is the worst-case send delay
// of any certificate in this batch because it's based on the first (oldest).
sendDelay := expiresIn - cert.Expires.Sub(m.clk.Now())
m.stats.sendDelay.With(prometheus.Labels{"nag_group": expiresIn.String()}).Set(
sendDelay.Truncate(time.Second).Seconds())
}
}
return certs, nil
}
type durationSlice []time.Duration
func (ds durationSlice) Len() int {
return len(ds)
}
func (ds durationSlice) Less(a, b int) bool {
return ds[a] < ds[b]
}
func (ds durationSlice) Swap(a, b int) {
ds[a], ds[b] = ds[b], ds[a]
}
type Config struct {
Mailer struct {
DebugAddr string `validate:"omitempty,hostname_port"`
DB cmd.DBConfig
cmd.SMTPConfig
// From is an RFC 5322 formatted "From" address for reminder messages,
// e.g. "Example <example@test.org>"
From string `validate:"required"`
// Subject is the Subject line of reminder messages. This is a Go
// template with a single variable: ExpirationSubject, which contains
// a list of affected hostnames, possibly truncated.
Subject string
// CertLimit is the maximum number of certificates to investigate in a
// single batch. Defaults to 100.
CertLimit int `validate:"min=0"`
// MailsPerAddressPerDay is the maximum number of emails we'll send to
// a single address in a single day. Defaults to 0 (unlimited).
// Note that this does not track sends across restarts of the process,
// so we may send more than this when we restart expiration-mailer.
// This is a best-effort limitation. Defaults to math.MaxInt.
MailsPerAddressPerDay int `validate:"min=0"`
// UpdateChunkSize is the maximum number of rows to update in a single
// SQL UPDATE statement.
UpdateChunkSize int `validate:"min=0,max=65535"`
NagTimes []string `validate:"min=1,dive,required"`
// Path to a text/template email template with a .gotmpl or .txt file
// extension.
EmailTemplate string `validate:"required"`
// How often to process a batch of certificates
Frequency config.Duration
// ParallelSends is the number of parallel goroutines used to process
// each batch of emails. Defaults to 1.
ParallelSends uint
TLS cmd.TLSConfig
SAService *cmd.GRPCClientConfig
// Path to a file containing a list of trusted root certificates for use
// during the SMTP connection (as opposed to the gRPC connections).
SMTPTrustedRootFile string
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func initStats(stats prometheus.Registerer) mailerStats {
sendDelay := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "send_delay",
Help: "For the last batch of certificates, difference between the idealized send time and actual send time. Will always be nonzero, bigger numbers are worse",
},
[]string{"nag_group"})
stats.MustRegister(sendDelay)
sendDelayHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "send_delay_histogram",
Help: "For each mail sent, difference between the idealized send time and actual send time. Will always be nonzero, bigger numbers are worse",
Buckets: prometheus.LinearBuckets(86400, 86400, 10),
},
[]string{"nag_group"})
stats.MustRegister(sendDelayHistogram)
nagsAtCapacity := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "nags_at_capacity",
Help: "Count of nag groups at capacity",
},
[]string{"nag_group"})
stats.MustRegister(nagsAtCapacity)
errorCount := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "errors",
Help: "Number of errors",
},
[]string{"type"})
stats.MustRegister(errorCount)
sendLatency := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "send_latency",
Help: "Time the mailer takes sending messages in seconds",
Buckets: metrics.InternetFacingBuckets,
})
stats.MustRegister(sendLatency)
processingLatency := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "processing_latency",
Help: "Time the mailer takes processing certificates in seconds",
Buckets: []float64{30, 60, 75, 90, 120, 600, 3600},
})
stats.MustRegister(processingLatency)
certificatesExamined := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "certificates_examined",
Help: "Number of certificates looked at that are potentially due for an expiration mail",
})
stats.MustRegister(certificatesExamined)
certificatesAlreadyRenewed := prometheus.NewCounter(
prometheus.CounterOpts{
Name: "certificates_already_renewed",
Help: "Number of certificates from certificates_examined that were ignored because they were already renewed",
})
stats.MustRegister(certificatesAlreadyRenewed)
accountsNeedingMail := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "certificates_per_account_needing_mail",
Help: "After ignoring certificates_already_renewed and grouping the remaining certificates by account, how many accounts needed to get an email; grouped by how many certificates each account needed",
Buckets: []float64{0, 1, 2, 100, 1000, 10000, 100000},
})
stats.MustRegister(accountsNeedingMail)
return mailerStats{
sendDelay: sendDelay,
sendDelayHistogram: sendDelayHistogram,
nagsAtCapacity: nagsAtCapacity,
errorCount: errorCount,
sendLatency: sendLatency,
processingLatency: processingLatency,
certificatesExamined: certificatesExamined,
certificatesAlreadyRenewed: certificatesAlreadyRenewed,
certificatesPerAccountNeedingMail: accountsNeedingMail,
}
}
func main() {
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
certLimit := flag.Int("cert_limit", 0, "Count of certificates to process per expiration period")
reconnBase := flag.Duration("reconnectBase", 1*time.Second, "Base sleep duration between reconnect attempts")
reconnMax := flag.Duration("reconnectMax", 5*60*time.Second, "Max sleep duration between reconnect attempts after exponential backoff")
daemon := flag.Bool("daemon", false, "Run in daemon mode")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.Mailer.Features)
if *debugAddr != "" {
c.Mailer.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.Mailer.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
if *daemon && c.Mailer.Frequency.Duration == 0 {
fmt.Fprintln(os.Stderr, "mailer.frequency is not set in the JSON config")
os.Exit(1)
}
if *certLimit > 0 {
c.Mailer.CertLimit = *certLimit
}
// Default to 100 if no certLimit is set
if c.Mailer.CertLimit == 0 {
c.Mailer.CertLimit = 100
}
if c.Mailer.MailsPerAddressPerDay == 0 {
c.Mailer.MailsPerAddressPerDay = math.MaxInt
}
dbMap, err := sa.InitWrappedDb(c.Mailer.DB, scope, logger)
cmd.FailOnError(err, "While initializing dbMap")
tlsConfig, err := c.Mailer.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
clk := cmd.Clock()
conn, err := bgrpc.ClientSetup(c.Mailer.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityClient(conn)
var smtpRoots *x509.CertPool
if c.Mailer.SMTPTrustedRootFile != "" {
pem, err := os.ReadFile(c.Mailer.SMTPTrustedRootFile)
cmd.FailOnError(err, "Loading trusted roots file")
smtpRoots = x509.NewCertPool()
if !smtpRoots.AppendCertsFromPEM(pem) {
cmd.FailOnError(nil, "Failed to parse root certs PEM")
}
}
// Load email template
emailTmpl, err := os.ReadFile(c.Mailer.EmailTemplate)
cmd.FailOnError(err, fmt.Sprintf("Could not read email template file [%s]", c.Mailer.EmailTemplate))
tmpl, err := template.New("expiry-email").Parse(string(emailTmpl))
cmd.FailOnError(err, "Could not parse email template")
// If there is no configured subject template, use a default
if c.Mailer.Subject == "" {
c.Mailer.Subject = defaultExpirationSubject
}
// Load subject template
subjTmpl, err := template.New("expiry-email-subject").Parse(c.Mailer.Subject)
cmd.FailOnError(err, "Could not parse email subject template")
fromAddress, err := netmail.ParseAddress(c.Mailer.From)
cmd.FailOnError(err, fmt.Sprintf("Could not parse from address: %s", c.Mailer.From))
smtpPassword, err := c.Mailer.PasswordConfig.Pass()
cmd.FailOnError(err, "Failed to load SMTP password")
mailClient := bmail.New(
c.Mailer.Server,
c.Mailer.Port,
c.Mailer.Username,
smtpPassword,
smtpRoots,
*fromAddress,
logger,
scope,
*reconnBase,
*reconnMax)
var nags durationSlice
for _, nagDuration := range c.Mailer.NagTimes {
dur, err := time.ParseDuration(nagDuration)
if err != nil {
logger.AuditErrf("Failed to parse nag duration string [%s]: %s", nagDuration, err)
return
}
// Add some padding to the nag times so we send _before_ the configured
// time rather than after. See https://github.com/letsencrypt/boulder/pull/1029
adjustedInterval := dur + c.Mailer.Frequency.Duration
nags = append(nags, adjustedInterval)
}
// Make sure durations are sorted in increasing order
sort.Sort(nags)
if c.Mailer.UpdateChunkSize > 65535 {
// MariaDB limits the number of placeholders parameters to max_uint16:
// https://github.com/MariaDB/server/blob/10.5/sql/sql_prepare.cc#L2629-L2635
cmd.Fail(fmt.Sprintf("UpdateChunkSize of %d is too big", c.Mailer.UpdateChunkSize))
}
m := mailer{
log: logger,
dbMap: dbMap,
rs: sac,
mailer: mailClient,
subjectTemplate: subjTmpl,
emailTemplate: tmpl,
nagTimes: nags,
certificatesPerTick: c.Mailer.CertLimit,
addressLimiter: &limiter{clk: cmd.Clock(), limit: c.Mailer.MailsPerAddressPerDay},
updateChunkSize: c.Mailer.UpdateChunkSize,
parallelSends: c.Mailer.ParallelSends,
clk: clk,
stats: initStats(scope),
}
// Prefill this labelled stat with the possible label values, so each value is
// set to 0 on startup, rather than being missing from stats collection until
// the first mail run.
for _, expiresIn := range nags {
m.stats.nagsAtCapacity.With(prometheus.Labels{"nag_group": expiresIn.String()}).Set(0)
}
ctx, cancel := context.WithCancel(context.Background())
go cmd.CatchSignals(cancel)
if *daemon {
t := time.NewTicker(c.Mailer.Frequency.Duration)
for {
select {
case <-t.C:
err = m.findExpiringCertificates(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
cmd.FailOnError(err, "expiration-mailer has failed")
}
case <-ctx.Done():
return
}
}
} else {
err = m.findExpiringCertificates(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
cmd.FailOnError(err, "expiration-mailer has failed")
}
}
}
func init() {
cmd.RegisterCommand("expiration-mailer", main, &cmd.ConfigValidator{Config: &Config{}})
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
package notmain
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"math/big"
"testing"
"time"
"github.com/letsencrypt/boulder/mocks"
"github.com/letsencrypt/boulder/test"
)
var (
email1 = "mailto:one@shared-example.com"
email2 = "mailto:two@shared-example.com"
)
func TestSendEarliestCertInfo(t *testing.T) {
expiresIn := 24 * time.Hour
ctx := setup(t, []time.Duration{expiresIn})
defer ctx.cleanUp()
rawCertA := newX509Cert("happy A",
ctx.fc.Now().AddDate(0, 0, 5),
[]string{"example-A.com", "SHARED-example.com"},
serial1,
)
rawCertB := newX509Cert("happy B",
ctx.fc.Now().AddDate(0, 0, 2),
[]string{"shared-example.com", "example-b.com"},
serial2,
)
conn, err := ctx.m.mailer.Connect()
test.AssertNotError(t, err, "connecting SMTP")
err = ctx.m.sendNags(conn, []string{email1, email2}, []*x509.Certificate{rawCertA, rawCertB})
if err != nil {
t.Fatal(err)
}
if len(ctx.mc.Messages) != 2 {
t.Errorf("num of messages, want %d, got %d", 2, len(ctx.mc.Messages))
}
if len(ctx.mc.Messages) == 0 {
t.Fatalf("no message sent")
}
domains := "example-a.com\nexample-b.com\nshared-example.com"
expected := mocks.MailerMessage{
Subject: "Testing: Let's Encrypt certificate expiration notice for domain \"example-a.com\" (and 2 more)",
Body: fmt.Sprintf(`hi, cert for DNS names %s is going to expire in 2 days (%s)`,
domains,
rawCertB.NotAfter.Format(time.DateOnly)),
}
expected.To = "one@shared-example.com"
test.AssertEquals(t, expected, ctx.mc.Messages[0])
expected.To = "two@shared-example.com"
test.AssertEquals(t, expected, ctx.mc.Messages[1])
}
func newX509Cert(commonName string, notAfter time.Time, dnsNames []string, serial *big.Int) *x509.Certificate {
return &x509.Certificate{
Subject: pkix.Name{
CommonName: commonName,
},
NotAfter: notAfter,
DNSNames: dnsNames,
SerialNumber: serial,
}
}

304
cmd/id-exporter/main.go Normal file
View File

@ -0,0 +1,304 @@
package notmain
import (
"bufio"
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/features"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/sa"
)
type idExporter struct {
log blog.Logger
dbMap *db.WrappedMap
clk clock.Clock
grace time.Duration
}
// resultEntry is a JSON marshalable exporter result entry.
type resultEntry struct {
// ID is exported to support marshaling to JSON.
ID int64 `json:"id"`
// Hostname is exported to support marshaling to JSON. Not all queries
// will fill this field, so it's JSON field tag marks at as
// omittable.
Hostname string `json:"hostname,omitempty"`
}
// reverseHostname converts (reversed) names sourced from the
// registrations table to standard hostnames.
func (r *resultEntry) reverseHostname() {
r.Hostname = sa.ReverseName(r.Hostname)
}
// idExporterResults is passed as a selectable 'holder' for the results
// of id-exporter database queries
type idExporterResults []*resultEntry
// marshalToJSON returns JSON as bytes for all elements of the inner `id`
// slice.
func (i *idExporterResults) marshalToJSON() ([]byte, error) {
data, err := json.Marshal(i)
if err != nil {
return nil, err
}
data = append(data, '\n')
return data, nil
}
// writeToFile writes the contents of the inner `ids` slice, as JSON, to
// a file
func (i *idExporterResults) writeToFile(outfile string) error {
data, err := i.marshalToJSON()
if err != nil {
return err
}
return os.WriteFile(outfile, data, 0644)
}
// findIDs gathers all registration IDs with unexpired certificates.
func (c idExporter) findIDs(ctx context.Context) (idExporterResults, error) {
var holder idExporterResults
_, err := c.dbMap.Select(
ctx,
&holder,
`SELECT DISTINCT r.id
FROM registrations AS r
INNER JOIN certificates AS c on c.registrationID = r.id
WHERE r.contact NOT IN ('[]', 'null')
AND c.expires >= :expireCutoff;`,
map[string]interface{}{
"expireCutoff": c.clk.Now().Add(-c.grace),
})
if err != nil {
c.log.AuditErrf("Error finding IDs: %s", err)
return nil, err
}
return holder, nil
}
// findIDsWithExampleHostnames gathers all registration IDs with
// unexpired certificates and a corresponding example hostname.
func (c idExporter) findIDsWithExampleHostnames(ctx context.Context) (idExporterResults, error) {
var holder idExporterResults
_, err := c.dbMap.Select(
ctx,
&holder,
`SELECT SQL_BIG_RESULT
cert.registrationID AS id,
name.reversedName AS hostname
FROM certificates AS cert
INNER JOIN issuedNames AS name ON name.serial = cert.serial
WHERE cert.expires >= :expireCutoff
GROUP BY cert.registrationID;`,
map[string]interface{}{
"expireCutoff": c.clk.Now().Add(-c.grace),
})
if err != nil {
c.log.AuditErrf("Error finding IDs and example hostnames: %s", err)
return nil, err
}
for _, result := range holder {
result.reverseHostname()
}
return holder, nil
}
// findIDsForHostnames gathers all registration IDs with unexpired
// certificates for each `hostnames` entry.
func (c idExporter) findIDsForHostnames(ctx context.Context, hostnames []string) (idExporterResults, error) {
var holder idExporterResults
for _, hostname := range hostnames {
// Pass the same list in each time, borp will happily just append to the slice
// instead of overwriting it each time
// https://github.com/letsencrypt/borp/blob/c87bd6443d59746a33aca77db34a60cfc344adb2/select.go#L349-L353
_, err := c.dbMap.Select(
ctx,
&holder,
`SELECT DISTINCT c.registrationID AS id
FROM certificates AS c
INNER JOIN issuedNames AS n ON c.serial = n.serial
WHERE c.expires >= :expireCutoff
AND n.reversedName = :reversedName;`,
map[string]interface{}{
"expireCutoff": c.clk.Now().Add(-c.grace),
"reversedName": sa.ReverseName(hostname),
},
)
if err != nil {
if db.IsNoRows(err) {
continue
}
return nil, err
}
}
return holder, nil
}
const usageIntro = `
Introduction:
The ID exporter exists to retrieve the IDs of all registered
users with currently unexpired certificates. This list of registration IDs can
then be given as input to the notification mailer to send bulk notifications.
The -grace parameter can be used to allow registrations with certificates that
have already expired to be included in the export. The argument is a Go duration
obeying the usual suffix rules (e.g. 24h).
Registration IDs are favoured over email addresses as the intermediate format in
order to ensure the most up to date contact information is used at the time of
notification. The notification mailer will resolve the ID to email(s) when the
mailing is underway, ensuring we use the correct address if a user has updated
their contact information between the time of export and the time of
notification.
By default, the ID exporter's output will be JSON of the form:
[
{ "id": 1 },
...
{ "id": n }
]
Operations that return a hostname will be JSON of the form:
[
{ "id": 1, "hostname": "example-1.com" },
...
{ "id": n, "hostname": "example-n.com" }
]
Examples:
Export all registration IDs with unexpired certificates to "regs.json":
id-exporter -config test/config/id-exporter.json -outfile regs.json
Export all registration IDs with certificates that are unexpired or expired
within the last two days to "regs.json":
id-exporter -config test/config/id-exporter.json -grace 48h -outfile
"regs.json"
Required arguments:
- config
- outfile`
// unmarshalHostnames unmarshals a hostnames file and ensures that the file
// contained at least one entry.
func unmarshalHostnames(filePath string) ([]string, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
var hostnames []string
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, " ") {
return nil, fmt.Errorf(
"line: %q contains more than one entry, entries must be separated by newlines", line)
}
hostnames = append(hostnames, line)
}
if len(hostnames) == 0 {
return nil, errors.New("provided file contains 0 hostnames")
}
return hostnames, nil
}
type Config struct {
ContactExporter struct {
DB cmd.DBConfig
cmd.PasswordConfig
Features features.Config
}
}
func main() {
outFile := flag.String("outfile", "", "File to output results JSON to.")
grace := flag.Duration("grace", 2*24*time.Hour, "Include results with certificates that expired in < grace ago.")
hostnamesFile := flag.String(
"hostnames", "", "Only include results with unexpired certificates that contain hostnames\nlisted (newline separated) in this file.")
withExampleHostnames := flag.Bool(
"with-example-hostnames", false, "Include an example hostname for each registration ID with an unexpired certificate.")
configFile := flag.String("config", "", "File containing a JSON config.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s\n\n", usageIntro)
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
}
// Parse flags and check required.
flag.Parse()
if *outFile == "" || *configFile == "" {
flag.Usage()
os.Exit(1)
}
log := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7})
log.Info(cmd.VersionString())
// Load configuration file.
configData, err := os.ReadFile(*configFile)
cmd.FailOnError(err, fmt.Sprintf("Reading %q", *configFile))
// Unmarshal JSON config file.
var cfg Config
err = json.Unmarshal(configData, &cfg)
cmd.FailOnError(err, "Unmarshaling config")
features.Set(cfg.ContactExporter.Features)
dbMap, err := sa.InitWrappedDb(cfg.ContactExporter.DB, nil, log)
cmd.FailOnError(err, "While initializing dbMap")
exporter := idExporter{
log: log,
dbMap: dbMap,
clk: cmd.Clock(),
grace: *grace,
}
var results idExporterResults
if *hostnamesFile != "" {
hostnames, err := unmarshalHostnames(*hostnamesFile)
cmd.FailOnError(err, "Problem unmarshalling hostnames")
results, err = exporter.findIDsForHostnames(context.TODO(), hostnames)
cmd.FailOnError(err, "Could not find IDs for hostnames")
} else if *withExampleHostnames {
results, err = exporter.findIDsWithExampleHostnames(context.TODO())
cmd.FailOnError(err, "Could not find IDs with hostnames")
} else {
results, err = exporter.findIDs(context.TODO())
cmd.FailOnError(err, "Could not find IDs")
}
err = results.writeToFile(*outFile)
cmd.FailOnError(err, fmt.Sprintf("Could not write result to outfile %q", *outFile))
}
func init() {
cmd.RegisterCommand("id-exporter", main, &cmd.ConfigValidator{Config: &Config{}})
}

View File

@ -0,0 +1,486 @@
package notmain
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"fmt"
"math/big"
"net"
"os"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
isa "github.com/letsencrypt/boulder/test/inmem/sa"
"github.com/letsencrypt/boulder/test/vars"
)
var (
regA *corepb.Registration
regB *corepb.Registration
regC *corepb.Registration
regD *corepb.Registration
)
const (
emailARaw = "test@example.com"
emailBRaw = "example@example.com"
emailCRaw = "test-example@example.com"
telNum = "666-666-7777"
)
func TestFindIDs(t *testing.T) {
ctx := context.Background()
testCtx := setup(t)
defer testCtx.cleanUp()
// Add some test registrations
testCtx.addRegistrations(t)
// Run findIDs - since no certificates have been added corresponding to
// the above registrations, no IDs should be found.
results, err := testCtx.c.findIDs(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
test.AssertEquals(t, len(results), 0)
// Now add some certificates
testCtx.addCertificates(t)
// Run findIDs - since there are three registrations with unexpired certs
// we should get exactly three IDs back: RegA, RegC and RegD. RegB should
// *not* be present since their certificate has already expired. Unlike
// previous versions of this test RegD is not filtered out for having a `tel:`
// contact field anymore - this is the duty of the notify-mailer.
results, err = testCtx.c.findIDs(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
test.AssertEquals(t, len(results), 3)
for _, entry := range results {
switch entry.ID {
case regA.Id:
case regC.Id:
case regD.Id:
default:
t.Errorf("ID: %d not expected", entry.ID)
}
}
// Allow a 1 year grace period
testCtx.c.grace = 360 * 24 * time.Hour
results, err = testCtx.c.findIDs(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
// Now all four registration should be returned, including RegB since its
// certificate expired within the grace period
for _, entry := range results {
switch entry.ID {
case regA.Id:
case regB.Id:
case regC.Id:
case regD.Id:
default:
t.Errorf("ID: %d not expected", entry.ID)
}
}
}
func TestFindIDsWithExampleHostnames(t *testing.T) {
ctx := context.Background()
testCtx := setup(t)
defer testCtx.cleanUp()
// Add some test registrations
testCtx.addRegistrations(t)
// Run findIDsWithExampleHostnames - since no certificates have been
// added corresponding to the above registrations, no IDs should be
// found.
results, err := testCtx.c.findIDsWithExampleHostnames(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
test.AssertEquals(t, len(results), 0)
// Now add some certificates
testCtx.addCertificates(t)
// Run findIDsWithExampleHostnames - since there are three
// registrations with unexpired certs we should get exactly three
// IDs back: RegA, RegC and RegD. RegB should *not* be present since
// their certificate has already expired.
results, err = testCtx.c.findIDsWithExampleHostnames(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
test.AssertEquals(t, len(results), 3)
for _, entry := range results {
switch entry.ID {
case regA.Id:
test.AssertEquals(t, entry.Hostname, "example-a.com")
case regC.Id:
test.AssertEquals(t, entry.Hostname, "example-c.com")
case regD.Id:
test.AssertEquals(t, entry.Hostname, "example-d.com")
default:
t.Errorf("ID: %d not expected", entry.ID)
}
}
// Allow a 1 year grace period
testCtx.c.grace = 360 * 24 * time.Hour
results, err = testCtx.c.findIDsWithExampleHostnames(ctx)
test.AssertNotError(t, err, "findIDs() produced error")
// Now all four registrations should be returned, including RegB
// since it expired within the grace period
test.AssertEquals(t, len(results), 4)
for _, entry := range results {
switch entry.ID {
case regA.Id:
test.AssertEquals(t, entry.Hostname, "example-a.com")
case regB.Id:
test.AssertEquals(t, entry.Hostname, "example-b.com")
case regC.Id:
test.AssertEquals(t, entry.Hostname, "example-c.com")
case regD.Id:
test.AssertEquals(t, entry.Hostname, "example-d.com")
default:
t.Errorf("ID: %d not expected", entry.ID)
}
}
}
func TestFindIDsForHostnames(t *testing.T) {
ctx := context.Background()
testCtx := setup(t)
defer testCtx.cleanUp()
// Add some test registrations
testCtx.addRegistrations(t)
// Run findIDsForHostnames - since no certificates have been added corresponding to
// the above registrations, no IDs should be found.
results, err := testCtx.c.findIDsForHostnames(ctx, []string{"example-a.com", "example-b.com", "example-c.com", "example-d.com"})
test.AssertNotError(t, err, "findIDs() produced error")
test.AssertEquals(t, len(results), 0)
// Now add some certificates
testCtx.addCertificates(t)
results, err = testCtx.c.findIDsForHostnames(ctx, []string{"example-a.com", "example-b.com", "example-c.com", "example-d.com"})
test.AssertNotError(t, err, "findIDsForHostnames() failed")
test.AssertEquals(t, len(results), 3)
for _, entry := range results {
switch entry.ID {
case regA.Id:
case regC.Id:
case regD.Id:
default:
t.Errorf("ID: %d not expected", entry.ID)
}
}
}
func TestWriteToFile(t *testing.T) {
expected := `[{"id":1},{"id":2},{"id":3}]`
mockResults := idExporterResults{{ID: 1}, {ID: 2}, {ID: 3}}
dir := os.TempDir()
f, err := os.CreateTemp(dir, "ids_test")
test.AssertNotError(t, err, "os.CreateTemp produced an error")
// Writing the result to an outFile should produce the correct results
err = mockResults.writeToFile(f.Name())
test.AssertNotError(t, err, fmt.Sprintf("writeIDs produced an error writing to %s", f.Name()))
contents, err := os.ReadFile(f.Name())
test.AssertNotError(t, err, fmt.Sprintf("os.ReadFile produced an error reading from %s", f.Name()))
test.AssertEquals(t, string(contents), expected+"\n")
}
func Test_unmarshalHostnames(t *testing.T) {
testDir := os.TempDir()
testFile, err := os.CreateTemp(testDir, "ids_test")
test.AssertNotError(t, err, "os.CreateTemp produced an error")
// Non-existent hostnamesFile
_, err = unmarshalHostnames("file_does_not_exist")
test.AssertError(t, err, "expected error for non-existent file")
// Empty hostnamesFile
err = os.WriteFile(testFile.Name(), []byte(""), 0644)
test.AssertNotError(t, err, "os.WriteFile produced an error")
_, err = unmarshalHostnames(testFile.Name())
test.AssertError(t, err, "expected error for file containing 0 entries")
// One hostname present in the hostnamesFile
err = os.WriteFile(testFile.Name(), []byte("example-a.com"), 0644)
test.AssertNotError(t, err, "os.WriteFile produced an error")
results, err := unmarshalHostnames(testFile.Name())
test.AssertNotError(t, err, "error when unmarshalling hostnamesFile with a single hostname")
test.AssertEquals(t, len(results), 1)
// Two hostnames present in the hostnamesFile
err = os.WriteFile(testFile.Name(), []byte("example-a.com\nexample-b.com"), 0644)
test.AssertNotError(t, err, "os.WriteFile produced an error")
results, err = unmarshalHostnames(testFile.Name())
test.AssertNotError(t, err, "error when unmarshalling hostnamesFile with a two hostnames")
test.AssertEquals(t, len(results), 2)
// Three hostnames present in the hostnamesFile but two are separated only by a space
err = os.WriteFile(testFile.Name(), []byte("example-a.com\nexample-b.com example-c.com"), 0644)
test.AssertNotError(t, err, "os.WriteFile produced an error")
_, err = unmarshalHostnames(testFile.Name())
test.AssertError(t, err, "error when unmarshalling hostnamesFile with three space separated domains")
}
type testCtx struct {
c idExporter
ssa sapb.StorageAuthorityClient
cleanUp func()
}
func (tc testCtx) addRegistrations(t *testing.T) {
emailA := "mailto:" + emailARaw
emailB := "mailto:" + emailBRaw
emailC := "mailto:" + emailCRaw
tel := "tel:" + telNum
// Every registration needs a unique JOSE key
jsonKeyA := []byte(`{
"kty":"RSA",
"n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB"
}`)
jsonKeyB := []byte(`{
"kty":"RSA",
"n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w",
"e":"AAEAAQ"
}`)
jsonKeyC := []byte(`{
"kty":"RSA",
"n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ",
"e":"AQAB"
}`)
jsonKeyD := []byte(`{
"kty":"RSA",
"n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-FCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ",
"e":"AQAB"
}`)
initialIP, err := net.ParseIP("127.0.0.1").MarshalText()
test.AssertNotError(t, err, "Couldn't create initialIP")
// Regs A through C have `mailto:` contact ACME URL's
regA = &corepb.Registration{
Id: 1,
Contact: []string{emailA},
Key: jsonKeyA,
InitialIP: initialIP,
}
regB = &corepb.Registration{
Id: 2,
Contact: []string{emailB},
Key: jsonKeyB,
InitialIP: initialIP,
}
regC = &corepb.Registration{
Id: 3,
Contact: []string{emailC},
Key: jsonKeyC,
InitialIP: initialIP,
}
// Reg D has a `tel:` contact ACME URL
regD = &corepb.Registration{
Id: 4,
Contact: []string{tel},
Key: jsonKeyD,
InitialIP: initialIP,
}
// Add the four test registrations
ctx := context.Background()
regA, err = tc.ssa.NewRegistration(ctx, regA)
test.AssertNotError(t, err, "Couldn't store regA")
regB, err = tc.ssa.NewRegistration(ctx, regB)
test.AssertNotError(t, err, "Couldn't store regB")
regC, err = tc.ssa.NewRegistration(ctx, regC)
test.AssertNotError(t, err, "Couldn't store regC")
regD, err = tc.ssa.NewRegistration(ctx, regD)
test.AssertNotError(t, err, "Couldn't store regD")
}
func (tc testCtx) addCertificates(t *testing.T) {
ctx := context.Background()
serial1 := big.NewInt(1336)
serial1String := core.SerialToString(serial1)
serial2 := big.NewInt(1337)
serial2String := core.SerialToString(serial2)
serial3 := big.NewInt(1338)
serial3String := core.SerialToString(serial3)
serial4 := big.NewInt(1339)
serial4String := core.SerialToString(serial4)
n := bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==")
e := intFromB64("AQAB")
d := bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==")
p := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=")
q := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=")
testKey := rsa.PrivateKey{
PublicKey: rsa.PublicKey{N: n, E: e},
D: d,
Primes: []*big.Int{p, q},
}
fc := clock.NewFake()
// Add one cert for RegA that expires in 30 days
rawCertA := x509.Certificate{
Subject: pkix.Name{
CommonName: "happy A",
},
NotAfter: fc.Now().Add(30 * 24 * time.Hour),
DNSNames: []string{"example-a.com"},
SerialNumber: serial1,
}
certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey)
certA := &core.Certificate{
RegistrationID: regA.Id,
Serial: serial1String,
Expires: rawCertA.NotAfter,
DER: certDerA,
}
err := tc.c.dbMap.Insert(ctx, certA)
test.AssertNotError(t, err, "Couldn't add certA")
_, err = tc.c.dbMap.ExecContext(
ctx,
"INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)",
"com.example-a",
serial1String,
)
test.AssertNotError(t, err, "Couldn't add issued name for certA")
// Add one cert for RegB that already expired 30 days ago
rawCertB := x509.Certificate{
Subject: pkix.Name{
CommonName: "happy B",
},
NotAfter: fc.Now().Add(-30 * 24 * time.Hour),
DNSNames: []string{"example-b.com"},
SerialNumber: serial2,
}
certDerB, _ := x509.CreateCertificate(rand.Reader, &rawCertB, &rawCertB, &testKey.PublicKey, &testKey)
certB := &core.Certificate{
RegistrationID: regB.Id,
Serial: serial2String,
Expires: rawCertB.NotAfter,
DER: certDerB,
}
err = tc.c.dbMap.Insert(ctx, certB)
test.AssertNotError(t, err, "Couldn't add certB")
_, err = tc.c.dbMap.ExecContext(
ctx,
"INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)",
"com.example-b",
serial2String,
)
test.AssertNotError(t, err, "Couldn't add issued name for certB")
// Add one cert for RegC that expires in 30 days
rawCertC := x509.Certificate{
Subject: pkix.Name{
CommonName: "happy C",
},
NotAfter: fc.Now().Add(30 * 24 * time.Hour),
DNSNames: []string{"example-c.com"},
SerialNumber: serial3,
}
certDerC, _ := x509.CreateCertificate(rand.Reader, &rawCertC, &rawCertC, &testKey.PublicKey, &testKey)
certC := &core.Certificate{
RegistrationID: regC.Id,
Serial: serial3String,
Expires: rawCertC.NotAfter,
DER: certDerC,
}
err = tc.c.dbMap.Insert(ctx, certC)
test.AssertNotError(t, err, "Couldn't add certC")
_, err = tc.c.dbMap.ExecContext(
ctx,
"INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)",
"com.example-c",
serial3String,
)
test.AssertNotError(t, err, "Couldn't add issued name for certC")
// Add one cert for RegD that expires in 30 days
rawCertD := x509.Certificate{
Subject: pkix.Name{
CommonName: "happy D",
},
NotAfter: fc.Now().Add(30 * 24 * time.Hour),
DNSNames: []string{"example-d.com"},
SerialNumber: serial4,
}
certDerD, _ := x509.CreateCertificate(rand.Reader, &rawCertD, &rawCertD, &testKey.PublicKey, &testKey)
certD := &core.Certificate{
RegistrationID: regD.Id,
Serial: serial4String,
Expires: rawCertD.NotAfter,
DER: certDerD,
}
err = tc.c.dbMap.Insert(ctx, certD)
test.AssertNotError(t, err, "Couldn't add certD")
_, err = tc.c.dbMap.ExecContext(
ctx,
"INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)",
"com.example-d",
serial4String,
)
test.AssertNotError(t, err, "Couldn't add issued name for certD")
}
func setup(t *testing.T) testCtx {
log := blog.UseMock()
fc := clock.NewFake()
// Using DBConnSAFullPerms to be able to insert registrations and certificates
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
if err != nil {
t.Fatalf("Couldn't connect the database: %s", err)
}
cleanUp := test.ResetBoulderTestDatabase(t)
ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, 1, 0, fc, log, metrics.NoopRegisterer)
if err != nil {
t.Fatalf("unable to create SQLStorageAuthority: %s", err)
}
return testCtx{
c: idExporter{
dbMap: dbMap,
log: log,
clk: fc,
},
ssa: isa.SA{Impl: ssa},
cleanUp: cleanUp,
}
}
func bigIntFromB64(b64 string) *big.Int {
bytes, _ := base64.URLEncoding.DecodeString(b64)
x := big.NewInt(0)
x.SetBytes(bytes)
return x
}
func intFromB64(b64 string) int {
return int(bigIntFromB64(b64).Int64())
}

View File

@ -5,7 +5,6 @@ import (
"flag"
"fmt"
"net"
"net/netip"
"os"
"github.com/letsencrypt/boulder/cmd"
@ -20,20 +19,30 @@ type Config struct {
MaxUsed int
// NonceHMACKey is a path to a file containing an HMAC key which is a
// secret used for deriving the prefix of each nonce instance. It should
// contain 256 bits (32 bytes) of random data to be suitable as an
// HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a
// multi-DC deployment this value should be the same across all
// boulder-wfe and nonce-service instances.
NonceHMACKey cmd.HMACKeyConfig `validate:"required"`
// UseDerivablePrefix indicates whether to use a nonce prefix derived
// from the gRPC listening address. If this is false, the nonce prefix
// will be the value of the NoncePrefix field. If this is true, the
// NoncePrefixKey field is required.
// TODO(#6610): Remove this.
//
// Deprecated: this value is ignored, and treated as though it is always true.
UseDerivablePrefix bool `validate:"-"`
// NoncePrefixKey is a secret used for deriving the prefix of each nonce
// instance. It should contain 256 bits (32 bytes) of random data to be
// suitable as an HMAC-SHA256 key (e.g. the output of `openssl rand -hex
// 32`). In a multi-DC deployment this value should be the same across
// all boulder-wfe and nonce-service instances.
//
// TODO(#7632) Update this to use the new HMACKeyConfig.
NoncePrefixKey cmd.PasswordConfig `validate:"required"`
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
}
func derivePrefix(key []byte, grpcAddr string) (string, error) {
func derivePrefix(key string, grpcAddr string) (string, error) {
host, port, err := net.SplitHostPort(grpcAddr)
if err != nil {
return "", fmt.Errorf("parsing gRPC listen address: %w", err)
@ -42,8 +51,8 @@ func derivePrefix(key []byte, grpcAddr string) (string, error) {
return "", fmt.Errorf("nonce service gRPC address must include an IP address: got %q", grpcAddr)
}
if host != "" && port != "" {
hostIP, err := netip.ParseAddr(host)
if err != nil {
hostIP := net.ParseIP(host)
if hostIP == nil {
return "", fmt.Errorf("gRPC address host part was not an IP address")
}
if hostIP.IsUnspecified() {
@ -75,9 +84,12 @@ func main() {
c.NonceService.DebugAddr = *debugAddr
}
key, err := c.NonceService.NonceHMACKey.Load()
cmd.FailOnError(err, "Failed to load nonceHMACKey file.")
if c.NonceService.NoncePrefixKey.PasswordFile == "" {
cmd.Fail("NoncePrefixKey PasswordFile must be set")
}
key, err := c.NonceService.NoncePrefixKey.Pass()
cmd.FailOnError(err, "Failed to load 'noncePrefixKey' file.")
noncePrefix, err := derivePrefix(key, c.NonceService.GRPC.Address)
cmd.FailOnError(err, "Failed to derive nonce prefix")

619
cmd/notify-mailer/main.go Normal file
View File

@ -0,0 +1,619 @@
package notmain
import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"net/mail"
"os"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
bmail "github.com/letsencrypt/boulder/mail"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
)
type mailer struct {
clk clock.Clock
log blog.Logger
dbMap dbSelector
mailer bmail.Mailer
subject string
emailTemplate *template.Template
recipients []recipient
targetRange interval
sleepInterval time.Duration
parallelSends uint
}
// interval defines a range of email addresses to send to in alphabetical order.
// The `start` field is inclusive and the `end` field is exclusive. To include
// everything, set `end` to \xFF.
type interval struct {
start string
end string
}
// contactQueryResult is a receiver for queries to the `registrations` table.
type contactQueryResult struct {
// ID is exported to receive the value of `id`.
ID int64
// Contact is exported to receive the value of `contact`.
Contact []byte
}
func (i *interval) ok() error {
if i.start > i.end {
return fmt.Errorf("interval start value (%s) is greater than end value (%s)",
i.start, i.end)
}
return nil
}
func (i *interval) includes(s string) bool {
return s >= i.start && s < i.end
}
// ok ensures that both the `targetRange` and `sleepInterval` are valid.
func (m *mailer) ok() error {
err := m.targetRange.ok()
if err != nil {
return err
}
if m.sleepInterval < 0 {
return fmt.Errorf(
"sleep interval (%d) is < 0", m.sleepInterval)
}
return nil
}
func (m *mailer) logStatus(to string, current, total int, start time.Time) {
// Should never happen.
if total <= 0 || current < 1 || current > total {
m.log.AuditErrf("Invalid current (%d) or total (%d)", current, total)
}
completion := (float32(current) / float32(total)) * 100
now := m.clk.Now()
elapsed := now.Sub(start)
m.log.Infof("Sending message (%d) of (%d) to address (%s) [%.2f%%] time elapsed (%s)",
current, total, to, completion, elapsed)
}
func sortAddresses(input addressToRecipientMap) []string {
var addresses []string
for address := range input {
addresses = append(addresses, address)
}
sort.Strings(addresses)
return addresses
}
// makeMessageBody is a helper for mailer.run() that's split out for the
// purposes of testing.
func (m *mailer) makeMessageBody(recipients []recipient) (string, error) {
var messageBody strings.Builder
err := m.emailTemplate.Execute(&messageBody, recipients)
if err != nil {
return "", err
}
if messageBody.Len() == 0 {
return "", errors.New("templating resulted in an empty message body")
}
return messageBody.String(), nil
}
func (m *mailer) run(ctx context.Context) error {
err := m.ok()
if err != nil {
return err
}
totalRecipients := len(m.recipients)
m.log.Infof("Resolving addresses for (%d) recipients", totalRecipients)
addressToRecipient, err := m.resolveAddresses(ctx)
if err != nil {
return err
}
totalAddresses := len(addressToRecipient)
if totalAddresses == 0 {
return errors.New("0 recipients remained after resolving addresses")
}
m.log.Infof("%d recipients were resolved to %d addresses", totalRecipients, totalAddresses)
var mostRecipients string
var mostRecipientsLen int
for k, v := range addressToRecipient {
if len(v) > mostRecipientsLen {
mostRecipientsLen = len(v)
mostRecipients = k
}
}
m.log.Infof("Address %q was associated with the most recipients (%d)",
mostRecipients, mostRecipientsLen)
type work struct {
index int
address string
}
var wg sync.WaitGroup
workChan := make(chan work, totalAddresses)
startTime := m.clk.Now()
sortedAddresses := sortAddresses(addressToRecipient)
if (m.targetRange.start != "" && m.targetRange.start > sortedAddresses[totalAddresses-1]) ||
(m.targetRange.end != "" && m.targetRange.end < sortedAddresses[0]) {
return errors.New("Zero found addresses fall inside target range")
}
go func(ch chan<- work) {
for i, address := range sortedAddresses {
ch <- work{i, address}
}
close(workChan)
}(workChan)
if m.parallelSends < 1 {
m.parallelSends = 1
}
for senderNum := uint(0); senderNum < m.parallelSends; senderNum++ {
// For politeness' sake, don't open more than 1 new connection per
// second.
if senderNum > 0 {
m.clk.Sleep(time.Second)
}
conn, err := m.mailer.Connect()
if err != nil {
return fmt.Errorf("connecting parallel sender %d: %w", senderNum, err)
}
wg.Add(1)
go func(conn bmail.Conn, ch <-chan work) {
defer wg.Done()
for w := range ch {
if !m.targetRange.includes(w.address) {
m.log.Debugf("Address %q is outside of target range, skipping", w.address)
continue
}
err := policy.ValidEmail(w.address)
if err != nil {
m.log.Infof("Skipping %q due to policy violation: %s", w.address, err)
continue
}
recipients := addressToRecipient[w.address]
m.logStatus(w.address, w.index+1, totalAddresses, startTime)
messageBody, err := m.makeMessageBody(recipients)
if err != nil {
m.log.Errf("Skipping %q due to templating error: %s", w.address, err)
continue
}
err = conn.SendMail([]string{w.address}, m.subject, messageBody)
if err != nil {
var badAddrErr bmail.BadAddressSMTPError
if errors.As(err, &badAddrErr) {
m.log.Errf("address %q was rejected by server: %s", w.address, err)
continue
}
m.log.AuditErrf("while sending mail (%d) of (%d) to address %q: %s",
w.index, len(sortedAddresses), w.address, err)
}
m.clk.Sleep(m.sleepInterval)
}
conn.Close()
}(conn, workChan)
}
wg.Wait()
return nil
}
// resolveAddresses creates a mapping of email addresses to (a list of)
// `recipient`s that resolve to that email address.
func (m *mailer) resolveAddresses(ctx context.Context) (addressToRecipientMap, error) {
result := make(addressToRecipientMap, len(m.recipients))
for _, recipient := range m.recipients {
addresses, err := getAddressForID(ctx, recipient.id, m.dbMap)
if err != nil {
return nil, err
}
for _, address := range addresses {
parsed, err := mail.ParseAddress(address)
if err != nil {
m.log.Errf("Unparsable address %q, skipping ID (%d)", address, recipient.id)
continue
}
result[parsed.Address] = append(result[parsed.Address], recipient)
}
}
return result, nil
}
// dbSelector abstracts over a subset of methods from `borp.DbMap` objects to
// facilitate mocking in unit tests.
type dbSelector interface {
SelectOne(ctx context.Context, holder interface{}, query string, args ...interface{}) error
}
// getAddressForID queries the database for the email address associated with
// the provided registration ID.
func getAddressForID(ctx context.Context, id int64, dbMap dbSelector) ([]string, error) {
var result contactQueryResult
err := dbMap.SelectOne(ctx, &result,
`SELECT id,
contact
FROM registrations
WHERE contact NOT IN ('[]', 'null')
AND id = :id;`,
map[string]interface{}{"id": id})
if err != nil {
if db.IsNoRows(err) {
return []string{}, nil
}
return nil, err
}
var contacts []string
err = json.Unmarshal(result.Contact, &contacts)
if err != nil {
return nil, err
}
var addresses []string
for _, contact := range contacts {
if strings.HasPrefix(contact, "mailto:") {
addresses = append(addresses, strings.TrimPrefix(contact, "mailto:"))
}
}
return addresses, nil
}
// recipient represents a single record from the recipient list file. The 'id'
// column is parsed to the 'id' field, all additional data will be parsed to a
// mapping of column name to value in the 'Data' field. Please inform SRE if you
// make any changes to the exported fields of this struct. These fields are
// referenced in operationally critical e-mail templates used to notify
// subscribers during incident response.
type recipient struct {
// id is the subscriber's ID.
id int64
// Data is a mapping of column name to value parsed from a single record in
// the provided recipient list file. It's exported so the contents can be
// accessed by the template package. Please inform SRE if you make any
// changes to this field.
Data map[string]string
}
// addressToRecipientMap maps email addresses to a list of `recipient`s that
// resolve to that email address.
type addressToRecipientMap map[string][]recipient
// readRecipientsList parses the contents of a recipient list file into a list
// of `recipient` objects.
func readRecipientsList(filename string, delimiter rune) ([]recipient, string, error) {
f, err := os.Open(filename)
if err != nil {
return nil, "", err
}
reader := csv.NewReader(f)
reader.Comma = delimiter
// Parse header.
record, err := reader.Read()
if err != nil {
return nil, "", fmt.Errorf("failed to parse header: %w", err)
}
if record[0] != "id" {
return nil, "", errors.New("header must begin with \"id\"")
}
// Collect the names of each header column after `id`.
var dataColumns []string
for _, v := range record[1:] {
dataColumns = append(dataColumns, strings.TrimSpace(v))
if len(v) == 0 {
return nil, "", errors.New("header contains an empty column")
}
}
var recordsWithEmptyColumns []int64
var recordsWithDuplicateIDs []int64
var probsBuff strings.Builder
stringProbs := func() string {
if len(recordsWithEmptyColumns) != 0 {
fmt.Fprintf(&probsBuff, "ID(s) %v contained empty columns and ",
recordsWithEmptyColumns)
}
if len(recordsWithDuplicateIDs) != 0 {
fmt.Fprintf(&probsBuff, "ID(s) %v were skipped as duplicates",
recordsWithDuplicateIDs)
}
if probsBuff.Len() == 0 {
return ""
}
return strings.TrimSuffix(probsBuff.String(), " and ")
}
// Parse records.
recipientIDs := make(map[int64]bool)
var recipients []recipient
for {
record, err := reader.Read()
if errors.Is(err, io.EOF) {
// Finished parsing the file.
if len(recipients) == 0 {
return nil, stringProbs(), errors.New("no records after header")
}
return recipients, stringProbs(), nil
} else if err != nil {
return nil, "", err
}
// Ensure the first column of each record can be parsed as a valid
// registration ID.
recordID := record[0]
id, err := strconv.ParseInt(recordID, 10, 64)
if err != nil {
return nil, "", fmt.Errorf(
"%q couldn't be parsed as a registration ID due to: %s", recordID, err)
}
// Skip records that have the same ID as those read previously.
if recipientIDs[id] {
recordsWithDuplicateIDs = append(recordsWithDuplicateIDs, id)
continue
}
recipientIDs[id] = true
// Collect the columns of data after `id` into a map.
var emptyColumn bool
data := make(map[string]string)
for i, v := range record[1:] {
if len(v) == 0 {
emptyColumn = true
}
data[dataColumns[i]] = v
}
// Only used for logging.
if emptyColumn {
recordsWithEmptyColumns = append(recordsWithEmptyColumns, id)
}
recipients = append(recipients, recipient{id, data})
}
}
const usageIntro = `
Introduction:
The notification mailer exists to send a message to the contact associated
with a list of registration IDs. The attributes of the message (from address,
subject, and message content) are provided by the command line arguments. The
message content is provided as a path to a template file via the -body argument.
Provide a list of recipient user ids in a CSV file passed with the -recipientList
flag. The CSV file must have "id" as the first column and may have additional
fields to be interpolated into the email template:
id, lastIssuance
1234, "from example.com 2018-12-01"
5678, "from example.net 2018-12-13"
The additional fields will be interpolated with Golang templating, e.g.:
Your last issuance on each account was:
{{ range . }} {{ .Data.lastIssuance }}
{{ end }}
To help the operator gain confidence in the mailing run before committing fully
three safety features are supported: dry runs, intervals and a sleep between emails.
The -dryRun=true flag will use a mock mailer that prints message content to
stdout instead of performing an SMTP transaction with a real mailserver. This
can be used when the initial parameters are being tweaked to ensure no real
emails are sent. Using -dryRun=false will send real email.
Intervals supported via the -start and -end arguments. Only email addresses that
are alphabetically between the -start and -end strings will be sent. This can be used
to break up sending into batches, or more likely to resume sending if a batch is killed,
without resending messages that have already been sent. The -start flag is inclusive and
the -end flag is exclusive.
Notify-mailer de-duplicates email addresses and groups together the resulting recipient
structs, so a person who has multiple accounts using the same address will only receive
one email.
During mailing the -sleep argument is used to space out individual messages.
This can be used to ensure that the mailing happens at a steady pace with ample
opportunity for the operator to terminate early in the event of error. The
-sleep flag honours durations with a unit suffix (e.g. 1m for 1 minute, 10s for
10 seconds, etc). Using -sleep=0 will disable the sleep and send at full speed.
Examples:
Send an email with subject "Hello!" from the email "hello@goodbye.com" with
the contents read from "test_msg_body.txt" to every email associated with the
registration IDs listed in "test_reg_recipients.json", sleeping 10 seconds
between each message:
notify-mailer -config test/config/notify-mailer.json -body
cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com
-recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!"
-sleep 10s -dryRun=false
Do the same, but only to example@example.com:
notify-mailer -config test/config/notify-mailer.json
-body cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com
-recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!"
-start example@example.com -end example@example.comX
Send the message starting with example@example.com and emailing every address that's
alphabetically higher:
notify-mailer -config test/config/notify-mailer.json
-body cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com
-recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!"
-start example@example.com
Required arguments:
- body
- config
- from
- subject
- recipientList`
type Config struct {
NotifyMailer struct {
DB cmd.DBConfig
cmd.SMTPConfig
}
Syslog cmd.SyslogConfig
}
func main() {
from := flag.String("from", "", "From header for emails. Must be a bare email address.")
subject := flag.String("subject", "", "Subject of emails")
recipientListFile := flag.String("recipientList", "", "File containing a CSV list of registration IDs and extra info.")
parseAsTSV := flag.Bool("tsv", false, "Parse the recipient list file as a TSV.")
bodyFile := flag.String("body", "", "File containing the email body in Golang template format.")
dryRun := flag.Bool("dryRun", true, "Whether to do a dry run.")
sleep := flag.Duration("sleep", 500*time.Millisecond, "How long to sleep between emails.")
parallelSends := flag.Uint("parallelSends", 1, "How many parallel goroutines should process emails")
start := flag.String("start", "", "Alphabetically lowest email address to include.")
end := flag.String("end", "\xFF", "Alphabetically highest email address (exclusive).")
reconnBase := flag.Duration("reconnectBase", 1*time.Second, "Base sleep duration between reconnect attempts")
reconnMax := flag.Duration("reconnectMax", 5*60*time.Second, "Max sleep duration between reconnect attempts after exponential backoff")
configFile := flag.String("config", "", "File containing a JSON config.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s\n\n", usageIntro)
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
}
// Validate required args.
flag.Parse()
if *from == "" || *subject == "" || *bodyFile == "" || *configFile == "" || *recipientListFile == "" {
flag.Usage()
os.Exit(1)
}
configData, err := os.ReadFile(*configFile)
cmd.FailOnError(err, "Couldn't load JSON config file")
// Parse JSON config.
var cfg Config
err = json.Unmarshal(configData, &cfg)
cmd.FailOnError(err, "Couldn't unmarshal JSON config file")
log := cmd.NewLogger(cfg.Syslog)
log.Info(cmd.VersionString())
dbMap, err := sa.InitWrappedDb(cfg.NotifyMailer.DB, nil, log)
cmd.FailOnError(err, "While initializing dbMap")
// Load and parse message body.
template, err := template.ParseFiles(*bodyFile)
cmd.FailOnError(err, "Couldn't parse message template")
// Ensure that in the event of a missing key, an informative error is
// returned.
template.Option("missingkey=error")
address, err := mail.ParseAddress(*from)
cmd.FailOnError(err, fmt.Sprintf("Couldn't parse %q to address", *from))
recipientListDelimiter := ','
if *parseAsTSV {
recipientListDelimiter = '\t'
}
recipients, probs, err := readRecipientsList(*recipientListFile, recipientListDelimiter)
cmd.FailOnError(err, "Couldn't populate recipients")
if probs != "" {
log.Infof("While reading the recipient list file %s", probs)
}
var mailClient bmail.Mailer
if *dryRun {
log.Infof("Starting %s in dry-run mode", cmd.VersionString())
mailClient = bmail.NewDryRun(*address, log)
} else {
log.Infof("Starting %s", cmd.VersionString())
smtpPassword, err := cfg.NotifyMailer.PasswordConfig.Pass()
cmd.FailOnError(err, "Couldn't load SMTP password from file")
mailClient = bmail.New(
cfg.NotifyMailer.Server,
cfg.NotifyMailer.Port,
cfg.NotifyMailer.Username,
smtpPassword,
nil,
*address,
log,
metrics.NoopRegisterer,
*reconnBase,
*reconnMax)
}
m := mailer{
clk: cmd.Clock(),
log: log,
dbMap: dbMap,
mailer: mailClient,
subject: *subject,
recipients: recipients,
emailTemplate: template,
targetRange: interval{
start: *start,
end: *end,
},
sleepInterval: *sleep,
parallelSends: *parallelSends,
}
err = m.run(context.TODO())
cmd.FailOnError(err, "Couldn't complete")
log.Info("Completed successfully")
}
func init() {
cmd.RegisterCommand("notify-mailer", main, &cmd.ConfigValidator{Config: &Config{}})
}

View File

@ -0,0 +1,782 @@
package notmain
import (
"context"
"database/sql"
"errors"
"fmt"
"io"
"os"
"testing"
"text/template"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/mocks"
"github.com/letsencrypt/boulder/test"
)
func TestIntervalOK(t *testing.T) {
// Test a number of intervals know to be OK, ensure that no error is
// produced when calling `ok()`.
okCases := []struct {
testInterval interval
}{
{interval{}},
{interval{start: "aa", end: "\xFF"}},
{interval{end: "aa"}},
{interval{start: "aa", end: "bb"}},
}
for _, testcase := range okCases {
err := testcase.testInterval.ok()
test.AssertNotError(t, err, "valid interval produced ok() error")
}
badInterval := interval{start: "bb", end: "aa"}
err := badInterval.ok()
test.AssertError(t, err, "bad interval was considered ok")
}
func setupMakeRecipientList(t *testing.T, contents string) string {
entryFile, err := os.CreateTemp("", "")
test.AssertNotError(t, err, "couldn't create temp file")
_, err = entryFile.WriteString(contents)
test.AssertNotError(t, err, "couldn't write contents to temp file")
err = entryFile.Close()
test.AssertNotError(t, err, "couldn't close temp file")
return entryFile.Name()
}
func TestReadRecipientList(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
23,example.net,2018-11-22`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
list, _, err := readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
expected := []recipient{
{id: 10, Data: map[string]string{"date": "2018-11-21", "domainName": "example.com"}},
{id: 23, Data: map[string]string{"date": "2018-11-22", "domainName": "example.net"}},
}
test.AssertDeepEquals(t, list, expected)
contents = `id domainName date
10 example.com 2018-11-21
23 example.net 2018-11-22`
entryFile = setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
list, _, err = readRecipientsList(entryFile, '\t')
test.AssertNotError(t, err, "received an error for a valid TSV file")
test.AssertDeepEquals(t, list, expected)
}
func TestReadRecipientListNoExtraColumns(t *testing.T) {
contents := `id
10
23`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
}
func TestReadRecipientsListFileNoExist(t *testing.T) {
_, _, err := readRecipientsList("doesNotExist", ',')
test.AssertError(t, err, "expected error for a file that doesn't exist")
}
func TestReadRecipientListWithEmptyColumnInHeader(t *testing.T) {
contents := `id, domainName,,date
10,example.com,2018-11-21
23,example.net`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "failed to error on CSV file with trailing delimiter in header")
test.AssertDeepEquals(t, err, errors.New("header contains an empty column"))
}
func TestReadRecipientListWithProblems(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
23,example.net,
10,example.com,2018-11-22
42,example.net,
24,example.com,2018-11-21
24,example.com,2018-11-21
`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
recipients, probs, err := readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
test.AssertEquals(t, probs, "ID(s) [23 42] contained empty columns and ID(s) [10 24] were skipped as duplicates")
test.AssertEquals(t, len(recipients), 4)
// Ensure trailing " and " is trimmed from single problem.
contents = `id, domainName, date
23,example.net,
10,example.com,2018-11-21
42,example.net,
`
entryFile = setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, probs, err = readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
test.AssertEquals(t, probs, "ID(s) [23 42] contained empty columns")
}
func TestReadRecipientListWithEmptyLine(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
23,example.net,2018-11-22`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
}
func TestReadRecipientListWithMismatchedColumns(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
23,example.net`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "failed to error on CSV file with mismatched columns")
}
func TestReadRecipientListWithDuplicateIDs(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
10,example.net,2018-11-22`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertNotError(t, err, "received an error for a valid CSV file")
}
func TestReadRecipientListWithUnparsableID(t *testing.T) {
contents := `id, domainName, date
10,example.com,2018-11-21
twenty,example.net,2018-11-22`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "expected error for CSV file that contains an unparsable registration ID")
}
func TestReadRecipientListWithoutIDHeader(t *testing.T) {
contents := `notId, domainName, date
10,example.com,2018-11-21
twenty,example.net,2018-11-22`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "expected error for CSV file missing header field `id`")
}
func TestReadRecipientListWithNoRecords(t *testing.T) {
contents := `id, domainName, date
`
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "expected error for CSV file containing only a header")
}
func TestReadRecipientListWithNoHeaderOrRecords(t *testing.T) {
contents := ``
entryFile := setupMakeRecipientList(t, contents)
defer os.Remove(entryFile)
_, _, err := readRecipientsList(entryFile, ',')
test.AssertError(t, err, "expected error for CSV file containing only a header")
test.AssertErrorIs(t, err, io.EOF)
}
func TestMakeMessageBody(t *testing.T) {
emailTemplate := `{{range . }}
{{ .Data.date }}
{{ .Data.domainName }}
{{end}}`
m := &mailer{
log: blog.UseMock(),
mailer: &mocks.Mailer{},
emailTemplate: template.Must(template.New("email").Parse(emailTemplate)).Option("missingkey=error"),
sleepInterval: 0,
targetRange: interval{end: "\xFF"},
clk: clock.NewFake(),
recipients: nil,
dbMap: mockEmailResolver{},
}
recipients := []recipient{
{id: 10, Data: map[string]string{"date": "2018-11-21", "domainName": "example.com"}},
{id: 23, Data: map[string]string{"date": "2018-11-22", "domainName": "example.net"}},
}
expectedMessageBody := `
2018-11-21
example.com
2018-11-22
example.net
`
// Ensure that a very basic template with 2 recipients can be successfully
// executed.
messageBody, err := m.makeMessageBody(recipients)
test.AssertNotError(t, err, "failed to execute a valid template")
test.AssertEquals(t, messageBody, expectedMessageBody)
// With no recipients we should get an empty body error.
recipients = []recipient{}
_, err = m.makeMessageBody(recipients)
test.AssertError(t, err, "should have errored on empty body")
// With a missing key we should get an informative templating error.
recipients = []recipient{{id: 10, Data: map[string]string{"domainName": "example.com"}}}
_, err = m.makeMessageBody(recipients)
test.AssertEquals(t, err.Error(), "template: email:2:8: executing \"email\" at <.Data.date>: map has no entry for key \"date\"")
}
func TestSleepInterval(t *testing.T) {
const sleepLen = 10
mc := &mocks.Mailer{}
dbMap := mockEmailResolver{}
tmpl := template.Must(template.New("letter").Parse("an email body"))
recipients := []recipient{{id: 1}, {id: 2}, {id: 3}}
// Set up a mock mailer that sleeps for `sleepLen` seconds and only has one
// goroutine to process results
m := &mailer{
log: blog.UseMock(),
mailer: mc,
emailTemplate: tmpl,
sleepInterval: sleepLen * time.Second,
parallelSends: 1,
targetRange: interval{start: "", end: "\xFF"},
clk: clock.NewFake(),
recipients: recipients,
dbMap: dbMap,
}
// Call run() - this should sleep `sleepLen` per destination address
// After it returns, we expect (sleepLen * number of destinations) seconds has
// elapsed
err := m.run(context.Background())
test.AssertNotError(t, err, "error calling mailer run()")
expectedEnd := clock.NewFake()
expectedEnd.Add(time.Second * time.Duration(sleepLen*len(recipients)))
test.AssertEquals(t, m.clk.Now(), expectedEnd.Now())
// Set up a mock mailer that doesn't sleep at all
m = &mailer{
log: blog.UseMock(),
mailer: mc,
emailTemplate: tmpl,
sleepInterval: 0,
targetRange: interval{end: "\xFF"},
clk: clock.NewFake(),
recipients: recipients,
dbMap: dbMap,
}
// Call run() - this should blast through all destinations without sleep
// After it returns, we expect no clock time to have elapsed on the fake clock
err = m.run(context.Background())
test.AssertNotError(t, err, "error calling mailer run()")
expectedEnd = clock.NewFake()
test.AssertEquals(t, m.clk.Now(), expectedEnd.Now())
}
func TestMailIntervals(t *testing.T) {
const testSubject = "Test Subject"
dbMap := mockEmailResolver{}
tmpl := template.Must(template.New("letter").Parse("an email body"))
recipients := []recipient{{id: 1}, {id: 2}, {id: 3}}
mc := &mocks.Mailer{}
// Create a mailer with a checkpoint interval larger than any of the
// destination email addresses.
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: recipients,
emailTemplate: tmpl,
targetRange: interval{start: "\xFF", end: "\xFF\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer. It should produce an error about the interval start
mc.Clear()
err := m.run(context.Background())
test.AssertError(t, err, "expected error")
test.AssertEquals(t, len(mc.Messages), 0)
// Create a mailer with a negative sleep interval
m = &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: recipients,
emailTemplate: tmpl,
targetRange: interval{},
sleepInterval: -10,
clk: clock.NewFake(),
}
// Run the mailer. It should produce an error about the sleep interval
mc.Clear()
err = m.run(context.Background())
test.AssertEquals(t, len(mc.Messages), 0)
test.AssertEquals(t, err.Error(), "sleep interval (-10) is < 0")
// Create a mailer with an interval starting with a specific email address.
// It should send email to that address and others alphabetically higher.
m = &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: []recipient{{id: 1}, {id: 2}, {id: 3}, {id: 4}},
emailTemplate: tmpl,
targetRange: interval{start: "test-example-updated@letsencrypt.org", end: "\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer. Two messages should have been produced, one to
// test-example-updated@letsencrypt.org (beginning of the range),
// and one to test-test-test@letsencrypt.org.
mc.Clear()
err = m.run(context.Background())
test.AssertNotError(t, err, "run() produced an error")
test.AssertEquals(t, len(mc.Messages), 2)
test.AssertEquals(t, mocks.MailerMessage{
To: "test-example-updated@letsencrypt.org",
Subject: testSubject,
Body: "an email body",
}, mc.Messages[0])
test.AssertEquals(t, mocks.MailerMessage{
To: "test-test-test@letsencrypt.org",
Subject: testSubject,
Body: "an email body",
}, mc.Messages[1])
// Create a mailer with a checkpoint interval ending before
// "test-example-updated@letsencrypt.org"
m = &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: []recipient{{id: 1}, {id: 2}, {id: 3}, {id: 4}},
emailTemplate: tmpl,
targetRange: interval{end: "test-example-updated@letsencrypt.org"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer. Two messages should have been produced, one to
// example@letsencrypt.org (ID 1), one to example-example-example@example.com (ID 2)
mc.Clear()
err = m.run(context.Background())
test.AssertNotError(t, err, "run() produced an error")
test.AssertEquals(t, len(mc.Messages), 2)
test.AssertEquals(t, mocks.MailerMessage{
To: "example-example-example@letsencrypt.org",
Subject: testSubject,
Body: "an email body",
}, mc.Messages[0])
test.AssertEquals(t, mocks.MailerMessage{
To: "example@letsencrypt.org",
Subject: testSubject,
Body: "an email body",
}, mc.Messages[1])
}
func TestParallelism(t *testing.T) {
const testSubject = "Test Subject"
dbMap := mockEmailResolver{}
tmpl := template.Must(template.New("letter").Parse("an email body"))
recipients := []recipient{{id: 1}, {id: 2}, {id: 3}, {id: 4}}
mc := &mocks.Mailer{}
// Create a mailer with 10 parallel workers.
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: recipients,
emailTemplate: tmpl,
targetRange: interval{end: "\xFF"},
sleepInterval: 0,
parallelSends: 10,
clk: clock.NewFake(),
}
mc.Clear()
err := m.run(context.Background())
test.AssertNotError(t, err, "run() produced an error")
// The fake clock should have advanced 9 seconds, one for each parallel
// goroutine after the first doing its polite 1-second sleep at startup.
expectedEnd := clock.NewFake()
expectedEnd.Add(9 * time.Second)
test.AssertEquals(t, m.clk.Now(), expectedEnd.Now())
// A message should have been sent to all four addresses.
test.AssertEquals(t, len(mc.Messages), 4)
expectedAddresses := []string{
"example@letsencrypt.org",
"test-example-updated@letsencrypt.org",
"test-test-test@letsencrypt.org",
"example-example-example@letsencrypt.org",
}
for _, msg := range mc.Messages {
test.AssertSliceContains(t, expectedAddresses, msg.To)
}
}
func TestMessageContentStatic(t *testing.T) {
// Create a mailer with fixed content
const (
testSubject = "Test Subject"
)
dbMap := mockEmailResolver{}
mc := &mocks.Mailer{}
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: testSubject,
recipients: []recipient{{id: 1}},
emailTemplate: template.Must(template.New("letter").Parse("an email body")),
targetRange: interval{end: "\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer, one message should have been created with the content
// expected
err := m.run(context.Background())
test.AssertNotError(t, err, "error calling mailer run()")
test.AssertEquals(t, len(mc.Messages), 1)
test.AssertEquals(t, mocks.MailerMessage{
To: "example@letsencrypt.org",
Subject: testSubject,
Body: "an email body",
}, mc.Messages[0])
}
// Send mail with a variable interpolated.
func TestMessageContentInterpolated(t *testing.T) {
recipients := []recipient{
{
id: 1,
Data: map[string]string{
"validationMethod": "eyeballing it",
},
},
}
dbMap := mockEmailResolver{}
mc := &mocks.Mailer{}
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: "Test Subject",
recipients: recipients,
emailTemplate: template.Must(template.New("letter").Parse(
`issued by {{range .}}{{ .Data.validationMethod }}{{end}}`)),
targetRange: interval{end: "\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer, one message should have been created with the content
// expected
err := m.run(context.Background())
test.AssertNotError(t, err, "error calling mailer run()")
test.AssertEquals(t, len(mc.Messages), 1)
test.AssertEquals(t, mocks.MailerMessage{
To: "example@letsencrypt.org",
Subject: "Test Subject",
Body: "issued by eyeballing it",
}, mc.Messages[0])
}
// Send mail with a variable interpolated multiple times for accounts that share
// an email address.
func TestMessageContentInterpolatedMultiple(t *testing.T) {
recipients := []recipient{
{
id: 200,
Data: map[string]string{
"domain": "blog.example.com",
},
},
{
id: 201,
Data: map[string]string{
"domain": "nas.example.net",
},
},
{
id: 202,
Data: map[string]string{
"domain": "mail.example.org",
},
},
{
id: 203,
Data: map[string]string{
"domain": "panel.example.net",
},
},
}
dbMap := mockEmailResolver{}
mc := &mocks.Mailer{}
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: "Test Subject",
recipients: recipients,
emailTemplate: template.Must(template.New("letter").Parse(
`issued for:
{{range .}}{{ .Data.domain }}
{{end}}Thanks`)),
targetRange: interval{end: "\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
// Run the mailer, one message should have been created with the content
// expected
err := m.run(context.Background())
test.AssertNotError(t, err, "error calling mailer run()")
test.AssertEquals(t, len(mc.Messages), 1)
test.AssertEquals(t, mocks.MailerMessage{
To: "gotta.lotta.accounts@letsencrypt.org",
Subject: "Test Subject",
Body: `issued for:
blog.example.com
nas.example.net
mail.example.org
panel.example.net
Thanks`,
}, mc.Messages[0])
}
// the `mockEmailResolver` implements the `dbSelector` interface from
// `notify-mailer/main.go` to allow unit testing without using a backing
// database
type mockEmailResolver struct{}
// the `mockEmailResolver` select method treats the requested reg ID as an index
// into a list of anonymous structs
func (bs mockEmailResolver) SelectOne(ctx context.Context, output interface{}, _ string, args ...interface{}) error {
// The "dbList" is just a list of contact records in memory
dbList := []contactQueryResult{
{
ID: 1,
Contact: []byte(`["mailto:example@letsencrypt.org"]`),
},
{
ID: 2,
Contact: []byte(`["mailto:test-example-updated@letsencrypt.org"]`),
},
{
ID: 3,
Contact: []byte(`["mailto:test-test-test@letsencrypt.org"]`),
},
{
ID: 4,
Contact: []byte(`["mailto:example-example-example@letsencrypt.org"]`),
},
{
ID: 5,
Contact: []byte(`["mailto:youve.got.mail@letsencrypt.org"]`),
},
{
ID: 6,
Contact: []byte(`["mailto:mail@letsencrypt.org"]`),
},
{
ID: 7,
Contact: []byte(`["mailto:***********"]`),
},
{
ID: 200,
Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`),
},
{
ID: 201,
Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`),
},
{
ID: 202,
Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`),
},
{
ID: 203,
Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`),
},
{
ID: 204,
Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`),
},
}
// Play the type cast game so that we can dig into the arguments map and get
// out an int64 `id` parameter.
argsRaw := args[0]
argsMap, ok := argsRaw.(map[string]interface{})
if !ok {
return fmt.Errorf("incorrect args type %T", args)
}
idRaw := argsMap["id"]
id, ok := idRaw.(int64)
if !ok {
return fmt.Errorf("incorrect args ID type %T", id)
}
// Play the type cast game to get a `*contactQueryResult` so we can write
// the result from the db list.
outputPtr, ok := output.(*contactQueryResult)
if !ok {
return fmt.Errorf("incorrect output type %T", output)
}
for _, v := range dbList {
if v.ID == id {
*outputPtr = v
}
}
if outputPtr.ID == 0 {
return db.ErrDatabaseOp{
Op: "select one",
Table: "registrations",
Err: sql.ErrNoRows,
}
}
return nil
}
func TestResolveEmails(t *testing.T) {
// Start with three reg. IDs. Note: the IDs have been matched with fake
// results in the `db` slice in `mockEmailResolver`'s `SelectOne`. If you add
// more test cases here you must also add the corresponding DB result in the
// mock.
recipients := []recipient{
{
id: 1,
},
{
id: 2,
},
{
id: 3,
},
// This registration ID deliberately doesn't exist in the mock data to make
// sure this case is handled gracefully
{
id: 999,
},
// This registration ID deliberately returns an invalid email to make sure any
// invalid contact info that slipped into the DB once upon a time will be ignored
{
id: 7,
},
{
id: 200,
},
{
id: 201,
},
{
id: 202,
},
{
id: 203,
},
{
id: 204,
},
}
tmpl := template.Must(template.New("letter").Parse("an email body"))
dbMap := mockEmailResolver{}
mc := &mocks.Mailer{}
m := &mailer{
log: blog.UseMock(),
mailer: mc,
dbMap: dbMap,
subject: "Test",
recipients: recipients,
emailTemplate: tmpl,
targetRange: interval{end: "\xFF"},
sleepInterval: 0,
clk: clock.NewFake(),
}
addressesToRecipients, err := m.resolveAddresses(context.Background())
test.AssertNotError(t, err, "failed to resolveEmailAddresses")
expected := []string{
"example@letsencrypt.org",
"test-example-updated@letsencrypt.org",
"test-test-test@letsencrypt.org",
"gotta.lotta.accounts@letsencrypt.org",
}
test.AssertEquals(t, len(addressesToRecipients), len(expected))
for _, address := range expected {
if _, ok := addressesToRecipients[address]; !ok {
t.Errorf("missing entry in addressesToRecipients: %q", address)
}
}
}

View File

@ -0,0 +1,3 @@
This is a test message body regarding these domains:
{{ range . }} {{ .Extra.domainName }}
{{ end }}

View File

@ -0,0 +1,4 @@
id,domainName
1,one.example.com
2,two.example.net
3,three.example.org
1 id domainName
2 1 one.example.com
3 2 two.example.net
4 3 three.example.org

View File

@ -51,15 +51,10 @@ type Config struct {
// OCSP requests. This has a default value of ":80".
ListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
// When to timeout a request. This should be slightly lower than the
// upstream's timeout when making request to ocsp-responder.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
ShutdownStopTimeout config.Duration
// How often a response should be signed when using Redis/live-signing
// path. This has a default value of 60h.
LiveSigningPeriod config.Duration `validate:"-"`
@ -85,6 +80,8 @@ type Config struct {
// 40 * 5 / 0.02 = 10,000 requests before the oldest request times out.
MaxSigningWaiters int `validate:"min=0"`
ShutdownStopTimeout config.Duration
RequiredSerialPrefixes []string `validate:"omitempty,dive,hexadecimal"`
Features features.Config

View File

@ -11,7 +11,6 @@ import (
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/va"
vaConfig "github.com/letsencrypt/boulder/va/config"
vapb "github.com/letsencrypt/boulder/va/proto"
@ -26,7 +25,9 @@ type Config struct {
// Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts
// from each Network Perspective"). It should uniquely identify a group
// of RVAs deployed in the same datacenter.
Perspective string `omitempty:"required"`
//
// TODO(#7615): Make mandatory.
Perspective string `validate:"omitempty"`
// RIR indicates the Regional Internet Registry where this RVA is
// located. This field is used to identify the RIR region from which a
@ -37,8 +38,10 @@ type Config struct {
// - RIPE
// - APNIC
// - LACNIC
// - AFRINIC
RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"`
// - AfriNIC
//
// TODO(#7615): Make mandatory.
RIR string `validate:"omitempty,oneof=ARIN RIPE APNIC LACNIC AfriNIC"`
// SkipGRPCClientCertVerification, when disabled as it should typically
// be, will cause the remoteva server (which receives gRPCs from a
@ -87,12 +90,16 @@ func main() {
clk := cmd.Clock()
var servers bdns.ServerProvider
proto := "udp"
if features.Get().DOH {
proto = "tcp"
}
if len(c.RVA.DNSStaticResolvers) != 0 {
servers, err = bdns.NewStaticProvider(c.RVA.DNSStaticResolvers)
cmd.FailOnError(err, "Couldn't start static DNS server resolver")
} else {
servers, err = bdns.StartDynamicProvider(c.RVA.DNSProvider, 60*time.Second, "tcp")
servers, err = bdns.StartDynamicProvider(c.RVA.DNSProvider, 60*time.Second, proto)
cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver")
}
defer servers.Stop()
@ -112,7 +119,6 @@ func main() {
scope,
clk,
c.RVA.DNSTries,
c.RVA.UserAgent,
logger,
tlsConfig)
} else {
@ -122,7 +128,6 @@ func main() {
scope,
clk,
c.RVA.DNSTries,
c.RVA.UserAgent,
logger,
tlsConfig)
}
@ -130,6 +135,7 @@ func main() {
vai, err := va.NewValidationAuthorityImpl(
resolver,
nil, // Our RVAs will never have RVAs of their own.
0, // Only the VA is concerned with max validation failures
c.RVA.UserAgent,
c.RVA.IssuerDomain,
scope,
@ -137,8 +143,7 @@ func main() {
logger,
c.RVA.AccountURIPrefixes,
c.RVA.Perspective,
c.RVA.RIR,
iana.IsReservedAddr)
c.RVA.RIR)
cmd.FailOnError(err, "Unable to create Remote-VA server")
start, err := bgrpc.NewServer(c.RVA.GRPC, logger).Add(

View File

@ -1,5 +1,5 @@
// Read a list of reversed FQDNs and/or normal IP addresses, separated by
// newlines. Print only those that are rejected by the current policy.
// Read a list of reversed hostnames, separated by newlines. Print only those
// that are rejected by the current policy.
package notmain
@ -9,11 +9,9 @@ import (
"fmt"
"io"
"log"
"net/netip"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/policy"
"github.com/letsencrypt/boulder/sa"
)
@ -41,7 +39,7 @@ func main() {
scanner := bufio.NewScanner(input)
logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7})
logger.Info(cmd.VersionString())
pa, err := policy.New(nil, nil, logger)
pa, err := policy.New(nil, logger)
if err != nil {
log.Fatal(err)
}
@ -51,15 +49,8 @@ func main() {
}
var errors bool
for scanner.Scan() {
n := sa.EncodeIssuedName(scanner.Text())
var ident identifier.ACMEIdentifier
ip, err := netip.ParseAddr(n)
if err == nil {
ident = identifier.NewIP(ip)
} else {
ident = identifier.NewDNS(n)
}
err = pa.WillingToIssue(identifier.ACMEIdentifiers{ident})
n := sa.ReverseName(scanner.Text())
err := pa.WillingToIssue([]string{n})
if err != nil {
errors = true
fmt.Printf("%s: %s\n", n, err)

View File

@ -34,7 +34,7 @@ type client struct {
// for a single certificateStatus ID. If `err` is non-nil, it indicates the
// attempt failed.
type processResult struct {
id int64
id uint64
err error
}
@ -181,7 +181,7 @@ func (cl *client) scanFromDBOneBatch(ctx context.Context, prevID int64, frequenc
return fmt.Errorf("scanning row %d (previous ID %d): %w", scanned, previousID, err)
}
scanned++
inflightIDs.add(status.ID)
inflightIDs.add(uint64(status.ID))
// Emit a log line every 100000 rows. For our current ~215M rows, that
// will emit about 2150 log lines. This probably strikes a good balance
// between too spammy and having a reasonably frequent checkpoint.
@ -213,25 +213,25 @@ func (cl *client) signAndStoreResponses(ctx context.Context, input <-chan *sa.Ce
Serial: status.Serial,
IssuerID: status.IssuerID,
Status: string(status.Status),
Reason: int32(status.RevokedReason), //nolint: gosec // Revocation reasons are guaranteed to be small, no risk of overflow.
Reason: int32(status.RevokedReason),
RevokedAt: timestamppb.New(status.RevokedDate),
}
result, err := cl.ocspGenerator.GenerateOCSP(ctx, ocspReq)
if err != nil {
output <- processResult{id: status.ID, err: err}
output <- processResult{id: uint64(status.ID), err: err}
continue
}
resp, err := ocsp.ParseResponse(result.Response, nil)
if err != nil {
output <- processResult{id: status.ID, err: err}
output <- processResult{id: uint64(status.ID), err: err}
continue
}
err = cl.redis.StoreResponse(ctx, resp)
if err != nil {
output <- processResult{id: status.ID, err: err}
output <- processResult{id: uint64(status.ID), err: err}
} else {
output <- processResult{id: status.ID, err: nil}
output <- processResult{id: uint64(status.ID), err: nil}
}
}
}

View File

@ -15,7 +15,6 @@ import (
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/rocsp"
@ -40,8 +39,8 @@ func makeClient() (*rocsp.RWClient, clock.Clock) {
rdb := redis.NewRing(&redis.RingOptions{
Addrs: map[string]string{
"shard1": "10.77.77.2:4218",
"shard2": "10.77.77.3:4218",
"shard1": "10.33.33.2:4218",
"shard2": "10.33.33.3:4218",
},
Username: "unittest-rw",
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
@ -51,34 +50,29 @@ func makeClient() (*rocsp.RWClient, clock.Clock) {
return rocsp.NewWritingClient(rdb, 500*time.Millisecond, clk, metrics.NoopRegisterer), clk
}
func insertCertificateStatus(t *testing.T, dbMap db.Executor, serial string, notAfter, ocspLastUpdated time.Time) int64 {
result, err := dbMap.ExecContext(context.Background(),
`INSERT INTO certificateStatus
(serial, notAfter, status, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, issuerID)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
serial,
notAfter,
core.OCSPStatusGood,
ocspLastUpdated,
time.Time{},
0,
time.Time{},
99)
test.AssertNotError(t, err, "inserting certificate status")
id, err := result.LastInsertId()
test.AssertNotError(t, err, "getting last insert ID")
return id
}
func TestGetStartingID(t *testing.T) {
ctx := context.Background()
clk := clock.NewFake()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
firstID := insertCertificateStatus(t, dbMap, "1337", clk.Now().Add(12*time.Hour), time.Time{})
secondID := insertCertificateStatus(t, dbMap, "1338", clk.Now().Add(36*time.Hour), time.Time{})
cs := core.CertificateStatus{
Serial: "1337",
NotAfter: clk.Now().Add(12 * time.Hour),
}
err = dbMap.Insert(ctx, &cs)
test.AssertNotError(t, err, "inserting certificate status")
firstID := cs.ID
cs = core.CertificateStatus{
Serial: "1338",
NotAfter: clk.Now().Add(36 * time.Hour),
}
err = dbMap.Insert(ctx, &cs)
test.AssertNotError(t, err, "inserting certificate status")
secondID := cs.ID
t.Logf("first ID %d, second ID %d", firstID, secondID)
clk.Sleep(48 * time.Hour)
@ -137,7 +131,11 @@ func TestLoadFromDB(t *testing.T) {
defer test.ResetBoulderTestDatabase(t)
for i := range 100 {
insertCertificateStatus(t, dbMap, fmt.Sprintf("%036x", i), clk.Now().Add(200*time.Hour), clk.Now())
err = dbMap.Insert(context.Background(), &core.CertificateStatus{
Serial: fmt.Sprintf("%036x", i),
NotAfter: clk.Now().Add(200 * time.Hour),
OCSPLastUpdated: clk.Now(),
})
if err != nil {
t.Fatalf("Failed to insert certificateStatus: %s", err)
}

View File

@ -4,22 +4,22 @@ import "sync"
type inflight struct {
sync.RWMutex
items map[int64]struct{}
items map[uint64]struct{}
}
func newInflight() *inflight {
return &inflight{
items: make(map[int64]struct{}),
items: make(map[uint64]struct{}),
}
}
func (i *inflight) add(n int64) {
func (i *inflight) add(n uint64) {
i.Lock()
defer i.Unlock()
i.items[n] = struct{}{}
}
func (i *inflight) remove(n int64) {
func (i *inflight) remove(n uint64) {
i.Lock()
defer i.Unlock()
delete(i.items, n)
@ -34,13 +34,13 @@ func (i *inflight) len() int {
// min returns the numerically smallest key inflight. If nothing is inflight,
// it returns 0. Note: this takes O(n) time in the number of keys and should
// be called rarely.
func (i *inflight) min() int64 {
func (i *inflight) min() uint64 {
i.RLock()
defer i.RUnlock()
if len(i.items) == 0 {
return 0
}
var min int64
var min uint64
for k := range i.items {
if min == 0 {
min = k

View File

@ -9,25 +9,25 @@ import (
func TestInflight(t *testing.T) {
ifl := newInflight()
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
test.AssertEquals(t, ifl.min(), uint64(0))
ifl.add(1337)
test.AssertEquals(t, ifl.len(), 1)
test.AssertEquals(t, ifl.min(), int64(1337))
test.AssertEquals(t, ifl.min(), uint64(1337))
ifl.remove(1337)
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
test.AssertEquals(t, ifl.min(), uint64(0))
ifl.add(7341)
ifl.add(3317)
ifl.add(1337)
test.AssertEquals(t, ifl.len(), 3)
test.AssertEquals(t, ifl.min(), int64(1337))
test.AssertEquals(t, ifl.min(), uint64(1337))
ifl.remove(3317)
ifl.remove(1337)
ifl.remove(7341)
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
test.AssertEquals(t, ifl.min(), uint64(0))
}

View File

@ -25,12 +25,11 @@ type Config struct {
ListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
// lower than the upstream's timeout when making requests to the SFE.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
// ShutdownStopTimeout is the duration that the SFE will wait before
// shutting down any listening servers.
ShutdownStopTimeout config.Duration
TLS cmd.TLSConfig

View File

@ -31,7 +31,7 @@ import (
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.30.0"
semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
"google.golang.org/grpc/grpclog"
"github.com/letsencrypt/boulder/config"
@ -261,12 +261,6 @@ func newVersionCollector() prometheus.Collector {
func newStatsRegistry(addr string, logger blog.Logger) prometheus.Registerer {
registry := prometheus.NewRegistry()
if addr == "" {
logger.Info("No debug listen address specified")
return registry
}
registry.MustRegister(collectors.NewGoCollector())
registry.MustRegister(collectors.NewProcessCollector(
collectors.ProcessCollectorOpts{}))
@ -293,6 +287,10 @@ func newStatsRegistry(addr string, logger blog.Logger) prometheus.Registerer {
ErrorLog: promLogger{logger},
}))
if addr == "" {
logger.Err("Debug listen address is not configured")
os.Exit(1)
}
logger.Infof("Debug server listening on %s", addr)
server := http.Server{

View File

@ -23,24 +23,22 @@ var (
validPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": { "http-01": true },
"identifiers": { "dns": true, "ip": true }
"challenges": { "http-01": true }
}`)
invalidPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": { "nonsense": true },
"identifiers": { "openpgp": true }
"challenges": { "nonsense": true }
}`)
noChallengesIdentsPAConfig = []byte(`{
noChallengesPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false
}`)
emptyChallengesIdentsPAConfig = []byte(`{
emptyChallengesPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": {},
"identifiers": {}
"challenges": {}
}`)
)
@ -49,25 +47,21 @@ func TestPAConfigUnmarshal(t *testing.T) {
err := json.Unmarshal(validPAConfig, &pc1)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertNotError(t, pc1.CheckChallenges(), "Flagged valid challenges as bad")
test.AssertNotError(t, pc1.CheckIdentifiers(), "Flagged valid identifiers as bad")
var pc2 PAConfig
err = json.Unmarshal(invalidPAConfig, &pc2)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc2.CheckChallenges(), "Considered invalid challenges as good")
test.AssertError(t, pc2.CheckIdentifiers(), "Considered invalid identifiers as good")
var pc3 PAConfig
err = json.Unmarshal(noChallengesIdentsPAConfig, &pc3)
err = json.Unmarshal(noChallengesPAConfig, &pc3)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc3.CheckChallenges(), "Disallow empty challenges map")
test.AssertNotError(t, pc3.CheckIdentifiers(), "Disallowed empty identifiers map")
var pc4 PAConfig
err = json.Unmarshal(emptyChallengesIdentsPAConfig, &pc4)
err = json.Unmarshal(emptyChallengesPAConfig, &pc4)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc4.CheckChallenges(), "Disallow empty challenges map")
test.AssertNotError(t, pc4.CheckIdentifiers(), "Disallowed empty identifiers map")
}
func TestMysqlLogger(t *testing.T) {
@ -133,13 +127,16 @@ func TestReadConfigFile(t *testing.T) {
test.AssertError(t, err, "ReadConfigFile('') did not error")
type config struct {
GRPC *GRPCClientConfig
TLS *TLSConfig
NotifyMailer struct {
DB DBConfig
SMTPConfig
}
Syslog SyslogConfig
}
var c config
err = ReadConfigFile("../test/config/health-checker.json", &c)
test.AssertNotError(t, err, "ReadConfigFile(../test/config/health-checker.json) errored")
test.AssertEquals(t, c.GRPC.Timeout.Duration, 1*time.Second)
err = ReadConfigFile("../test/config/notify-mailer.json", &c)
test.AssertNotError(t, err, "ReadConfigFile(../test/config/notify-mailer.json) errored")
test.AssertEquals(t, c.NotifyMailer.SMTPConfig.Server, "localhost")
}
func TestLogWriter(t *testing.T) {
@ -276,6 +273,9 @@ func TestFailExit(t *testing.T) {
return
}
// gosec points out that os.Args[0] is tainted, but we only run this as a test
// so we are not worried about it containing an untrusted value.
//nolint:gosec
cmd := exec.Command(os.Args[0], "-test.run=TestFailExit")
cmd.Env = append(os.Environ(), "TIME_TO_DIE=1")
output, err := cmd.CombinedOutput()
@ -288,7 +288,7 @@ func TestFailExit(t *testing.T) {
func testPanicStackTraceHelper() {
var x *int
*x = 1 //nolint: govet // Purposeful nil pointer dereference to trigger a panic
*x = 1 //nolint:govet
}
func TestPanicStackTrace(t *testing.T) {
@ -302,6 +302,9 @@ func TestPanicStackTrace(t *testing.T) {
return
}
// gosec points out that os.Args[0] is tainted, but we only run this as a test
// so we are not worried about it containing an untrusted value.
//nolint:gosec
cmd := exec.Command(os.Args[0], "-test.run=TestPanicStackTrace")
cmd.Env = append(os.Environ(), "AT_THE_DISCO=1")
output, err := cmd.CombinedOutput()

View File

@ -7,7 +7,7 @@ import (
// PolicyAuthority defines the public interface for the Boulder PA
// TODO(#5891): Move this interface to a more appropriate location.
type PolicyAuthority interface {
WillingToIssue(identifier.ACMEIdentifiers) error
WillingToIssue([]string) error
ChallengeTypesFor(identifier.ACMEIdentifier) ([]AcmeChallenge, error)
ChallengeTypeEnabled(AcmeChallenge) bool
CheckAuthzChallenges(*Authorization) error

View File

@ -6,7 +6,7 @@ import (
"encoding/json"
"fmt"
"hash/fnv"
"net/netip"
"net"
"strings"
"time"
@ -68,7 +68,7 @@ func (c AcmeChallenge) IsValid() bool {
}
}
// OCSPStatus defines the state of OCSP for a certificate
// OCSPStatus defines the state of OCSP for a domain
type OCSPStatus string
// These status are the states of OCSP
@ -98,7 +98,7 @@ type RawCertificateRequest struct {
// to account keys.
type Registration struct {
// Unique identifier
ID int64 `json:"id,omitempty"`
ID int64 `json:"id,omitempty" db:"id"`
// Account key to which the details are attached
Key *jose.JSONWebKey `json:"key"`
@ -109,6 +109,9 @@ type Registration struct {
// Agreement with terms of service
Agreement string `json:"agreement,omitempty"`
// InitialIP is the IP address from which the registration was created
InitialIP net.IP `json:"initialIp"`
// CreatedAt is the time the registration was created.
CreatedAt *time.Time `json:"createdAt,omitempty"`
@ -122,12 +125,10 @@ type ValidationRecord struct {
URL string `json:"url,omitempty"`
// Shared
//
// Hostname can hold either a DNS name or an IP address.
Hostname string `json:"hostname,omitempty"`
Port string `json:"port,omitempty"`
AddressesResolved []netip.Addr `json:"addressesResolved,omitempty"`
AddressUsed netip.Addr `json:"addressUsed,omitempty"`
DnsName string `json:"hostname,omitempty"`
Port string `json:"port,omitempty"`
AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
AddressUsed net.IP `json:"addressUsed,omitempty"`
// AddressesTried contains a list of addresses tried before the `AddressUsed`.
// Presently this will only ever be one IP from `AddressesResolved` since the
@ -143,12 +144,30 @@ type ValidationRecord struct {
// AddressesTried: [ ::1 ],
// ...
// }
AddressesTried []netip.Addr `json:"addressesTried,omitempty"`
AddressesTried []net.IP `json:"addressesTried,omitempty"`
// ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the
// lookup for AddressUsed. During recursive A and AAAA lookups, a record may
// instead look like A:host:port or AAAA:host:port
ResolverAddrs []string `json:"resolverAddrs,omitempty"`
// Perspective uniquely identifies the Network Perspective used to perform
// the validation, as specified in BRs Section 5.4.1, Requirement 2.7
// ("Multi-Perspective Issuance Corroboration attempts from each Network
// Perspective"). It should uniquely identify either the Primary Perspective
// (VA) or a group of RVAs deployed in the same datacenter.
Perspective string `json:"perspective,omitempty"`
// RIR indicates the Regional Internet Registry where this RVA is located.
// This field is used to identify the RIR region from which a given
// validation was performed, as specified in the "Phased Implementation
// Timeline" in BRs Section 3.2.2.9. It must be one of the following values:
// - ARIN
// - RIPE
// - APNIC
// - LACNIC
// - AfriNIC
RIR string `json:"rir,omitempty"`
}
// Challenge is an aggregate of all data needed for any challenges.
@ -210,7 +229,7 @@ func (ch Challenge) RecordsSane() bool {
for _, rec := range ch.ValidationRecord {
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || (rec.AddressUsed == netip.Addr{}) ||
if rec.URL == "" || rec.DnsName == "" || rec.Port == "" || rec.AddressUsed == nil ||
len(rec.AddressesResolved) == 0 {
return false
}
@ -224,8 +243,8 @@ func (ch Challenge) RecordsSane() bool {
}
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" ||
(ch.ValidationRecord[0].AddressUsed == netip.Addr{}) || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
if ch.ValidationRecord[0].DnsName == "" || ch.ValidationRecord[0].Port == "" ||
ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
return false
}
case ChallengeTypeDNS01:
@ -234,7 +253,7 @@ func (ch Challenge) RecordsSane() bool {
}
// TODO(#7140): Add a check for ResolverAddress == "" only after the
// core.proto change has been deployed.
if ch.ValidationRecord[0].Hostname == "" {
if ch.ValidationRecord[0].DnsName == "" {
return false
}
return true
@ -271,30 +290,30 @@ func (ch Challenge) StringID() string {
return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4])
}
// Authorization represents the authorization of an account key holder to act on
// behalf of an identifier. This struct is intended to be used both internally
// and for JSON marshaling on the wire. Any fields that should be suppressed on
// the wire (e.g., ID, regID) must be made empty before marshaling.
// Authorization represents the authorization of an account key holder
// to act on behalf of a domain. This struct is intended to be used both
// internally and for JSON marshaling on the wire. Any fields that should be
// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling.
type Authorization struct {
// An identifier for this authorization, unique across
// authorizations and certificates within this instance.
ID string `json:"-"`
ID string `json:"-" db:"id"`
// The identifier for which authorization is being given
Identifier identifier.ACMEIdentifier `json:"identifier,omitempty"`
Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"`
// The registration ID associated with the authorization
RegistrationID int64 `json:"-"`
RegistrationID int64 `json:"-" db:"registrationID"`
// The status of the validation of this authorization
Status AcmeStatus `json:"status,omitempty"`
Status AcmeStatus `json:"status,omitempty" db:"status"`
// The date after which this authorization will be no
// longer be considered valid. Note: a certificate may be issued even on the
// last day of an authorization's lifetime. The last day for which someone can
// hold a valid certificate based on an authorization is authorization
// lifetime + certificate lifetime.
Expires *time.Time `json:"expires,omitempty"`
Expires *time.Time `json:"expires,omitempty" db:"expires"`
// An array of challenges objects used to validate the
// applicant's control of the identifier. For authorizations
@ -304,7 +323,7 @@ type Authorization struct {
//
// There should only ever be one challenge of each type in this
// slice and the order of these challenges may not be predictable.
Challenges []Challenge `json:"challenges,omitempty"`
Challenges []Challenge `json:"challenges,omitempty" db:"-"`
// https://datatracker.ietf.org/doc/html/rfc8555#page-29
//
@ -318,12 +337,7 @@ type Authorization struct {
// the identifier stored in the database. Unlike the identifier returned
// as part of the authorization, the identifier we store in the database
// can contain an asterisk.
Wildcard bool `json:"wildcard,omitempty"`
// CertificateProfileName is the name of the profile associated with the
// order that first resulted in the creation of this authorization. Omitted
// from API responses.
CertificateProfileName string `json:"-"`
Wildcard bool `json:"wildcard,omitempty" db:"-"`
}
// FindChallengeByStringID will look for a challenge matching the given ID inside
@ -467,21 +481,16 @@ type RenewalInfo struct {
// RenewalInfoSimple constructs a `RenewalInfo` object and suggested window
// using a very simple renewal calculation: calculate a point 2/3rds of the way
// through the validity period (or halfway through, for short-lived certs), then
// give a 2%-of-validity wide window around that. Both the `issued` and
// `expires` timestamps are expected to be UTC.
// through the validity period, then give a 2-day window around that. Both the
// `issued` and `expires` timestamps are expected to be UTC.
func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo {
validity := expires.Add(time.Second).Sub(issued)
renewalOffset := validity / time.Duration(3)
if validity < 10*24*time.Hour {
renewalOffset = validity / time.Duration(2)
}
idealRenewal := expires.Add(-renewalOffset)
margin := validity / time.Duration(100)
return RenewalInfo{
SuggestedWindow: SuggestedWindow{
Start: idealRenewal.Add(-1 * margin).Truncate(time.Second),
End: idealRenewal.Add(margin).Truncate(time.Second),
Start: idealRenewal.Add(-24 * time.Hour),
End: idealRenewal.Add(24 * time.Hour),
},
}
}
@ -496,8 +505,8 @@ func RenewalInfoImmediate(now time.Time, explanationURL string) RenewalInfo {
oneHourAgo := now.Add(-1 * time.Hour)
return RenewalInfo{
SuggestedWindow: SuggestedWindow{
Start: oneHourAgo.Truncate(time.Second),
End: oneHourAgo.Add(time.Minute * 30).Truncate(time.Second),
Start: oneHourAgo,
End: oneHourAgo.Add(time.Minute * 30),
},
ExplanationURL: explanationURL,
}

View File

@ -4,7 +4,7 @@ import (
"crypto/rsa"
"encoding/json"
"math/big"
"net/netip"
"net"
"testing"
"time"
@ -37,10 +37,10 @@ func TestRecordSanityCheckOnUnsupportedChallengeType(t *testing.T) {
rec := []ValidationRecord{
{
URL: "http://localhost/test",
Hostname: "localhost",
DnsName: "localhost",
Port: "80",
AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")},
AddressUsed: netip.MustParseAddr("127.0.0.1"),
AddressesResolved: []net.IP{{127, 0, 0, 1}},
AddressUsed: net.IP{127, 0, 0, 1},
ResolverAddrs: []string{"eastUnboundAndDown"},
},
}

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc-gen-go v1.34.1
// protoc v3.20.1
// source: core.proto
@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -23,18 +22,21 @@ const (
)
type Identifier struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (x *Identifier) Reset() {
*x = Identifier{}
mi := &file_core_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Identifier) String() string {
@ -45,7 +47,7 @@ func (*Identifier) ProtoMessage() {}
func (x *Identifier) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[0]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -75,8 +77,11 @@ func (x *Identifier) GetValue() string {
}
type Challenge struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
// Fields specified by RFC 8555, Section 8.
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
Url string `protobuf:"bytes,9,opt,name=url,proto3" json:"url,omitempty"`
@ -87,15 +92,15 @@ type Challenge struct {
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
// Additional fields for our own record keeping.
Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Challenge) Reset() {
*x = Challenge{}
mi := &file_core_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Challenge) String() string {
@ -106,7 +111,7 @@ func (*Challenge) ProtoMessage() {}
func (x *Challenge) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[1]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -178,28 +183,31 @@ func (x *Challenge) GetValidationrecords() []*ValidationRecord {
}
type ValidationRecord struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 9
Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // netip.Addr.MarshalText()
AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // netip.Addr.MarshalText()
AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText()
AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText()
Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"`
Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"`
// A list of addresses tried before the address used (see
// core/objects.go and the comment on the ValidationRecord structure
// definition for more information.
AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // netip.Addr.MarshalText()
AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText()
ResolverAddrs []string `protobuf:"bytes,8,rep,name=resolverAddrs,proto3" json:"resolverAddrs,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidationRecord) Reset() {
*x = ValidationRecord{}
mi := &file_core_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ValidationRecord) String() string {
@ -210,7 +218,7 @@ func (*ValidationRecord) ProtoMessage() {}
func (x *ValidationRecord) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[2]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -282,19 +290,22 @@ func (x *ValidationRecord) GetResolverAddrs() []string {
}
type ProblemDetails struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
}
func (x *ProblemDetails) Reset() {
*x = ProblemDetails{}
mi := &file_core_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProblemDetails) String() string {
@ -305,7 +316,7 @@ func (*ProblemDetails) ProtoMessage() {}
func (x *ProblemDetails) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[3]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -342,7 +353,10 @@ func (x *ProblemDetails) GetHttpStatus() int32 {
}
type Certificate struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 9
RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
@ -350,15 +364,15 @@ type Certificate struct {
Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"`
Issued *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=issued,proto3" json:"issued,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=expires,proto3" json:"expires,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Certificate) Reset() {
*x = Certificate{}
mi := &file_core_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Certificate) String() string {
@ -369,7 +383,7 @@ func (*Certificate) ProtoMessage() {}
func (x *Certificate) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[4]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -427,7 +441,10 @@ func (x *Certificate) GetExpires() *timestamppb.Timestamp {
}
type CertificateStatus struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 16
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
@ -438,15 +455,15 @@ type CertificateStatus struct {
NotAfter *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=notAfter,proto3" json:"notAfter,omitempty"`
IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"`
IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CertificateStatus) Reset() {
*x = CertificateStatus{}
mi := &file_core_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CertificateStatus) String() string {
@ -457,7 +474,7 @@ func (*CertificateStatus) ProtoMessage() {}
func (x *CertificateStatus) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[5]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -536,23 +553,28 @@ func (x *CertificateStatus) GetIssuerID() int64 {
}
type Registration struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 10
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"`
Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 10
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"`
Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"`
Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
}
func (x *Registration) Reset() {
*x = Registration{}
mi := &file_core_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Registration) String() string {
@ -563,7 +585,7 @@ func (*Registration) ProtoMessage() {}
func (x *Registration) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[6]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -599,6 +621,13 @@ func (x *Registration) GetContact() []string {
return nil
}
func (x *Registration) GetContactsPresent() bool {
if x != nil {
return x.ContactsPresent
}
return false
}
func (x *Registration) GetAgreement() string {
if x != nil {
return x.Agreement
@ -606,6 +635,13 @@ func (x *Registration) GetAgreement() string {
return ""
}
func (x *Registration) GetInitialIP() []byte {
if x != nil {
return x.InitialIP
}
return nil
}
func (x *Registration) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
@ -621,23 +657,26 @@ func (x *Registration) GetStatus() string {
}
type Authorization struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
Identifier *Identifier `protobuf:"bytes,11,opt,name=identifier,proto3" json:"identifier,omitempty"`
Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"`
Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
CertificateProfileName string `protobuf:"bytes,10,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
// Fields specified by RFC 8555, Section 7.1.4
DnsName string `protobuf:"bytes,2,opt,name=dnsName,proto3" json:"dnsName,omitempty"`
Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"`
Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
}
func (x *Authorization) Reset() {
*x = Authorization{}
mi := &file_core_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Authorization) String() string {
@ -648,7 +687,7 @@ func (*Authorization) ProtoMessage() {}
func (x *Authorization) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[7]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -677,11 +716,11 @@ func (x *Authorization) GetRegistrationID() int64 {
return 0
}
func (x *Authorization) GetIdentifier() *Identifier {
func (x *Authorization) GetDnsName() string {
if x != nil {
return x.Identifier
return x.DnsName
}
return nil
return ""
}
func (x *Authorization) GetStatus() string {
@ -705,40 +744,35 @@ func (x *Authorization) GetChallenges() []*Challenge {
return nil
}
func (x *Authorization) GetCertificateProfileName() string {
if x != nil {
return x.CertificateProfileName
}
return ""
}
type Order struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
// Fields specified by RFC 8555, Section 7.1.3
// Note that we do not respect notBefore and notAfter, and we infer the
// finalize and certificate URLs from the id and certificateSerial fields.
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=expires,proto3" json:"expires,omitempty"`
Identifiers []*Identifier `protobuf:"bytes,16,rep,name=identifiers,proto3" json:"identifiers,omitempty"`
DnsNames []string `protobuf:"bytes,8,rep,name=dnsNames,proto3" json:"dnsNames,omitempty"`
Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
// Additional fields for our own record-keeping.
Created *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=created,proto3" json:"created,omitempty"`
CertificateProfileName string `protobuf:"bytes,14,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"`
Replaces string `protobuf:"bytes,15,opt,name=replaces,proto3" json:"replaces,omitempty"`
BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Order) Reset() {
*x = Order{}
mi := &file_core_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Order) String() string {
@ -749,7 +783,7 @@ func (*Order) ProtoMessage() {}
func (x *Order) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[8]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -792,9 +826,9 @@ func (x *Order) GetExpires() *timestamppb.Timestamp {
return nil
}
func (x *Order) GetIdentifiers() []*Identifier {
func (x *Order) GetDnsNames() []string {
if x != nil {
return x.Identifiers
return x.DnsNames
}
return nil
}
@ -834,13 +868,6 @@ func (x *Order) GetCertificateProfileName() string {
return ""
}
func (x *Order) GetReplaces() string {
if x != nil {
return x.Replaces
}
return ""
}
func (x *Order) GetBeganProcessing() bool {
if x != nil {
return x.BeganProcessing
@ -849,20 +876,23 @@ func (x *Order) GetBeganProcessing() bool {
}
type CRLEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Next unused field number: 5
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Next unused field number: 5
Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
RevokedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"`
}
func (x *CRLEntry) Reset() {
*x = CRLEntry{}
mi := &file_core_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_core_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CRLEntry) String() string {
@ -873,7 +903,7 @@ func (*CRLEntry) ProtoMessage() {}
func (x *CRLEntry) ProtoReflect() protoreflect.Message {
mi := &file_core_proto_msgTypes[9]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -911,7 +941,7 @@ func (x *CRLEntry) GetRevokedAt() *timestamppb.Timestamp {
var File_core_proto protoreflect.FileDescriptor
var file_core_proto_rawDesc = string([]byte{
var file_core_proto_rawDesc = []byte{
0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f,
0x72, 0x65, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
@ -1006,101 +1036,96 @@ var file_core_proto_rawDesc = string([]byte{
0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08,
0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04,
0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xcc, 0x01, 0x0a,
0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x88, 0x02, 0x0a,
0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a,
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x67, 0x72,
0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x67,
0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
0x65, 0x64, 0x41, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a,
0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0xc8, 0x02, 0x0a, 0x0d,
0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a,
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a,
0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
0x69, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65,
0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78,
0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65,
0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c,
0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04,
0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09,
0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x93, 0x04, 0x0a, 0x05, 0x4f, 0x72, 0x64, 0x65, 0x72,
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65,
0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f,
0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65,
0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52,
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03,
0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c,
0x12, 0x34, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63,
0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a,
0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x62, 0x65,
0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73,
0x73, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07,
0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x7a, 0x0a, 0x08,
0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69,
0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f,
0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64,
0x41, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79,
0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73,
0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e,
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12,
0x38, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61,
0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0xf2, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67,
0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28,
0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
0x44, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73,
0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x09,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a, 0x63, 0x68, 0x61,
0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x0a,
0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd9, 0x03, 0x0a,
0x05, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e,
0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16,
0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50,
0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10,
0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x34,
0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65,
0x61, 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f,
0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18,
0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63,
0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06,
0x10, 0x07, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x22, 0x7a, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06,
0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65,
0x61, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41,
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x4a, 0x04,
0x08, 0x03, 0x10, 0x04, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62,
0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_core_proto_rawDescOnce sync.Once
file_core_proto_rawDescData []byte
file_core_proto_rawDescData = file_core_proto_rawDesc
)
func file_core_proto_rawDescGZIP() []byte {
file_core_proto_rawDescOnce.Do(func() {
file_core_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_proto_rawDesc), len(file_core_proto_rawDesc)))
file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData)
})
return file_core_proto_rawDescData
}
var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_core_proto_goTypes = []any{
var file_core_proto_goTypes = []interface{}{
(*Identifier)(nil), // 0: core.Identifier
(*Challenge)(nil), // 1: core.Challenge
(*ValidationRecord)(nil), // 2: core.ValidationRecord
@ -1124,19 +1149,17 @@ var file_core_proto_depIdxs = []int32{
10, // 7: core.CertificateStatus.lastExpirationNagSent:type_name -> google.protobuf.Timestamp
10, // 8: core.CertificateStatus.notAfter:type_name -> google.protobuf.Timestamp
10, // 9: core.Registration.createdAt:type_name -> google.protobuf.Timestamp
0, // 10: core.Authorization.identifier:type_name -> core.Identifier
10, // 11: core.Authorization.expires:type_name -> google.protobuf.Timestamp
1, // 12: core.Authorization.challenges:type_name -> core.Challenge
10, // 13: core.Order.expires:type_name -> google.protobuf.Timestamp
0, // 14: core.Order.identifiers:type_name -> core.Identifier
3, // 15: core.Order.error:type_name -> core.ProblemDetails
10, // 16: core.Order.created:type_name -> google.protobuf.Timestamp
10, // 17: core.CRLEntry.revokedAt:type_name -> google.protobuf.Timestamp
18, // [18:18] is the sub-list for method output_type
18, // [18:18] is the sub-list for method input_type
18, // [18:18] is the sub-list for extension type_name
18, // [18:18] is the sub-list for extension extendee
0, // [0:18] is the sub-list for field type_name
10, // 10: core.Authorization.expires:type_name -> google.protobuf.Timestamp
1, // 11: core.Authorization.challenges:type_name -> core.Challenge
10, // 12: core.Order.expires:type_name -> google.protobuf.Timestamp
3, // 13: core.Order.error:type_name -> core.ProblemDetails
10, // 14: core.Order.created:type_name -> google.protobuf.Timestamp
10, // 15: core.CRLEntry.revokedAt:type_name -> google.protobuf.Timestamp
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
}
func init() { file_core_proto_init() }
@ -1144,11 +1167,133 @@ func file_core_proto_init() {
if File_core_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Identifier); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Challenge); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidationRecord); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProblemDetails); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Certificate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CertificateStatus); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Registration); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Authorization); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Order); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_core_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CRLEntry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_proto_rawDesc), len(file_core_proto_rawDesc)),
RawDescriptor: file_core_proto_rawDesc,
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
@ -1159,6 +1304,7 @@ func file_core_proto_init() {
MessageInfos: file_core_proto_msgTypes,
}.Build()
File_core_proto = out.File
file_core_proto_rawDesc = nil
file_core_proto_goTypes = nil
file_core_proto_depIdxs = nil
}

View File

@ -30,15 +30,15 @@ message ValidationRecord {
// Next unused field number: 9
string hostname = 1;
string port = 2;
repeated bytes addressesResolved = 3; // netip.Addr.MarshalText()
bytes addressUsed = 4; // netip.Addr.MarshalText()
repeated bytes addressesResolved = 3; // net.IP.MarshalText()
bytes addressUsed = 4; // net.IP.MarshalText()
repeated string authorities = 5;
string url = 6;
// A list of addresses tried before the address used (see
// core/objects.go and the comment on the ValidationRecord structure
// definition for more information.
repeated bytes addressesTried = 7; // netip.Addr.MarshalText()
repeated bytes addressesTried = 7; // net.IP.MarshalText()
repeated string resolverAddrs = 8;
}
@ -84,32 +84,30 @@ message Registration {
int64 id = 1;
bytes key = 2;
repeated string contact = 3;
reserved 4; // Previously contactsPresent
bool contactsPresent = 4;
string agreement = 5;
reserved 6; // Previously initialIP
bytes initialIP = 6;
reserved 7; // Previously createdAtNS
google.protobuf.Timestamp createdAt = 9;
string status = 8;
}
message Authorization {
// Next unused field number: 12
// Next unused field number: 10
reserved 5, 7, 8;
string id = 1;
int64 registrationID = 3;
// Fields specified by RFC 8555, Section 7.1.4
reserved 2; // Previously dnsName
Identifier identifier = 11;
string dnsName = 2;
string status = 4;
google.protobuf.Timestamp expires = 9;
repeated core.Challenge challenges = 6;
string certificateProfileName = 10;
// We do not directly represent the "wildcard" field, instead inferring it
// from the identifier value.
}
message Order {
// Next unused field number: 17
// Next unused field number: 15
reserved 3, 6, 10;
int64 id = 1;
int64 registrationID = 2;
@ -118,15 +116,13 @@ message Order {
// finalize and certificate URLs from the id and certificateSerial fields.
string status = 7;
google.protobuf.Timestamp expires = 12;
reserved 8; // Previously dnsNames
repeated Identifier identifiers = 16;
repeated string dnsNames = 8;
ProblemDetails error = 4;
repeated int64 v2Authorizations = 11;
string certificateSerial = 5;
// Additional fields for our own record-keeping.
google.protobuf.Timestamp created = 13;
string certificateProfileName = 14;
string replaces = 15;
bool beganProcessing = 9;
}

View File

@ -1,7 +1,6 @@
package core
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
@ -21,18 +20,16 @@ import (
"path"
"reflect"
"regexp"
"slices"
"sort"
"strings"
"time"
"unicode"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/letsencrypt/boulder/identifier"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/identifier"
)
const Unspecified = "Unspecified"
@ -321,15 +318,26 @@ func UniqueLowerNames(names []string) (unique []string) {
return
}
// HashIdentifiers returns a hash of the identifiers requested. This is intended
// for use when interacting with the orderFqdnSets table and rate limiting.
func HashIdentifiers(idents identifier.ACMEIdentifiers) []byte {
var values []string
for _, ident := range identifier.Normalize(idents) {
values = append(values, ident.Value)
// NormalizeIdentifiers returns the set of all unique ACME identifiers in the
// input after all of them are lowercased. The returned identifier values will
// be in their lowercased form and sorted alphabetically by value.
func NormalizeIdentifiers(identifiers []identifier.ACMEIdentifier) []identifier.ACMEIdentifier {
for i := range identifiers {
identifiers[i].Value = strings.ToLower(identifiers[i].Value)
}
hash := sha256.Sum256([]byte(strings.Join(values, ",")))
sort.Slice(identifiers, func(i, j int) bool {
return fmt.Sprintf("%s:%s", identifiers[i].Type, identifiers[i].Value) < fmt.Sprintf("%s:%s", identifiers[j].Type, identifiers[j].Value)
})
return slices.Compact(identifiers)
}
// HashNames returns a hash of the names requested. This is intended for use
// when interacting with the orderFqdnSets table and rate limiting.
func HashNames(names []string) []byte {
names = UniqueLowerNames(names)
hash := sha256.Sum256([]byte(strings.Join(names, ",")))
return hash[:]
}
@ -387,14 +395,6 @@ func IsASCII(str string) bool {
return true
}
// IsCanceled returns true if err is non-nil and is either context.Canceled, or
// has a grpc code of Canceled. This is useful because cancellations propagate
// through gRPC boundaries, and if we choose to treat in-process cancellations a
// certain way, we usually want to treat cross-process cancellations the same way.
func IsCanceled(err error) bool {
return errors.Is(err, context.Canceled) || status.Code(err) == codes.Canceled
}
func Command() string {
return path.Base(os.Args[0])
}

View File

@ -1,23 +1,18 @@
package core
import (
"context"
"bytes"
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"net/netip"
"os"
"slices"
"sort"
"strings"
"testing"
"time"
"github.com/go-jose/go-jose/v4"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
@ -256,6 +251,26 @@ func TestUniqueLowerNames(t *testing.T) {
test.AssertDeepEquals(t, []string{"a.com", "bar.com", "baz.com", "foobar.com"}, u)
}
func TestNormalizeIdentifiers(t *testing.T) {
identifiers := []identifier.ACMEIdentifier{
{Type: "DNS", Value: "foobar.com"},
{Type: "DNS", Value: "fooBAR.com"},
{Type: "DNS", Value: "baz.com"},
{Type: "DNS", Value: "foobar.com"},
{Type: "DNS", Value: "bar.com"},
{Type: "DNS", Value: "bar.com"},
{Type: "DNS", Value: "a.com"},
}
expected := []identifier.ACMEIdentifier{
{Type: "DNS", Value: "a.com"},
{Type: "DNS", Value: "bar.com"},
{Type: "DNS", Value: "baz.com"},
{Type: "DNS", Value: "foobar.com"},
}
u := NormalizeIdentifiers(identifiers)
test.AssertDeepEquals(t, expected, u)
}
func TestValidSerial(t *testing.T) {
notLength32Or36 := "A"
length32 := strings.Repeat("A", 32)
@ -321,108 +336,29 @@ func TestRetryBackoff(t *testing.T) {
}
func TestHashIdentifiers(t *testing.T) {
dns1 := identifier.NewDNS("example.com")
dns1_caps := identifier.NewDNS("eXaMpLe.COM")
dns2 := identifier.NewDNS("high-energy-cheese-lab.nrc-cnrc.gc.ca")
dns2_caps := identifier.NewDNS("HIGH-ENERGY-CHEESE-LAB.NRC-CNRC.GC.CA")
ipv4_1 := identifier.NewIP(netip.MustParseAddr("10.10.10.10"))
ipv4_2 := identifier.NewIP(netip.MustParseAddr("172.16.16.16"))
ipv6_1 := identifier.NewIP(netip.MustParseAddr("2001:0db8:0bad:0dab:c0ff:fee0:0007:1337"))
ipv6_2 := identifier.NewIP(netip.MustParseAddr("3fff::"))
func TestHashNames(t *testing.T) {
// Test that it is deterministic
h1 := HashNames([]string{"a"})
h2 := HashNames([]string{"a"})
test.AssertByteEquals(t, h1, h2)
testCases := []struct {
Name string
Idents1 identifier.ACMEIdentifiers
Idents2 identifier.ACMEIdentifiers
ExpectedEqual bool
}{
{
Name: "Deterministic for DNS",
Idents1: identifier.ACMEIdentifiers{dns1},
Idents2: identifier.ACMEIdentifiers{dns1},
ExpectedEqual: true,
},
{
Name: "Deterministic for IPv4",
Idents1: identifier.ACMEIdentifiers{ipv4_1},
Idents2: identifier.ACMEIdentifiers{ipv4_1},
ExpectedEqual: true,
},
{
Name: "Deterministic for IPv6",
Idents1: identifier.ACMEIdentifiers{ipv6_1},
Idents2: identifier.ACMEIdentifiers{ipv6_1},
ExpectedEqual: true,
},
{
Name: "Differentiates for DNS",
Idents1: identifier.ACMEIdentifiers{dns1},
Idents2: identifier.ACMEIdentifiers{dns2},
ExpectedEqual: false,
},
{
Name: "Differentiates for IPv4",
Idents1: identifier.ACMEIdentifiers{ipv4_1},
Idents2: identifier.ACMEIdentifiers{ipv4_2},
ExpectedEqual: false,
},
{
Name: "Differentiates for IPv6",
Idents1: identifier.ACMEIdentifiers{ipv6_1},
Idents2: identifier.ACMEIdentifiers{ipv6_2},
ExpectedEqual: false,
},
{
Name: "Not subject to ordering",
Idents1: identifier.ACMEIdentifiers{
dns1, dns2, ipv4_1, ipv4_2, ipv6_1, ipv6_2,
},
Idents2: identifier.ACMEIdentifiers{
ipv6_1, dns2, ipv4_2, dns1, ipv4_1, ipv6_2,
},
ExpectedEqual: true,
},
{
Name: "Not case sensitive",
Idents1: identifier.ACMEIdentifiers{
dns1, dns2,
},
Idents2: identifier.ACMEIdentifiers{
dns1_caps, dns2_caps,
},
ExpectedEqual: true,
},
{
Name: "Not subject to duplication",
Idents1: identifier.ACMEIdentifiers{
dns1, dns1,
},
Idents2: identifier.ACMEIdentifiers{dns1},
ExpectedEqual: true,
},
}
// Test that it differentiates
h1 = HashNames([]string{"a"})
h2 = HashNames([]string{"b"})
test.Assert(t, !bytes.Equal(h1, h2), "Should have been different")
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
h1 := HashIdentifiers(tc.Idents1)
h2 := HashIdentifiers(tc.Idents2)
if slices.Equal(h1, h2) != tc.ExpectedEqual {
t.Errorf("Comparing hashes of idents %#v and %#v, expected equality to be %v", tc.Idents1, tc.Idents2, tc.ExpectedEqual)
}
})
}
}
func TestIsCanceled(t *testing.T) {
if !IsCanceled(context.Canceled) {
t.Errorf("Expected context.Canceled to be canceled, but wasn't.")
}
if !IsCanceled(status.Errorf(codes.Canceled, "hi")) {
t.Errorf("Expected gRPC cancellation to be canceled, but wasn't.")
}
if IsCanceled(errors.New("hi")) {
t.Errorf("Expected random error to not be canceled, but was.")
}
// Test that it is not subject to ordering
h1 = HashNames([]string{"a", "b"})
h2 = HashNames([]string{"b", "a"})
test.AssertByteEquals(t, h1, h2)
// Test that it is not subject to case
h1 = HashNames([]string{"a", "b"})
h2 = HashNames([]string{"A", "B"})
test.AssertByteEquals(t, h1, h2)
// Test that it is not subject to duplication
h1 = HashNames([]string{"a", "a"})
h2 = HashNames([]string{"a"})
test.AssertByteEquals(t, h1, h2)
}

View File

@ -59,11 +59,11 @@ func Diff(old, new *x509.RevocationList) (*diffResult, error) {
return nil, fmt.Errorf("CRLs were not issued by same issuer")
}
if old.Number.Cmp(new.Number) >= 0 {
if !old.ThisUpdate.Before(new.ThisUpdate) {
return nil, fmt.Errorf("old CRL does not precede new CRL")
}
if new.ThisUpdate.Before(old.ThisUpdate) {
if old.Number.Cmp(new.Number) >= 0 {
return nil, fmt.Errorf("old CRL does not precede new CRL")
}

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc-gen-go v1.34.1
// protoc v3.20.1
// source: storer.proto
@ -10,10 +10,8 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -24,21 +22,24 @@ const (
)
type UploadCRLRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Payload:
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Payload:
//
// *UploadCRLRequest_Metadata
// *UploadCRLRequest_CrlChunk
Payload isUploadCRLRequest_Payload `protobuf_oneof:"payload"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
Payload isUploadCRLRequest_Payload `protobuf_oneof:"payload"`
}
func (x *UploadCRLRequest) Reset() {
*x = UploadCRLRequest{}
mi := &file_storer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_storer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UploadCRLRequest) String() string {
@ -49,7 +50,7 @@ func (*UploadCRLRequest) ProtoMessage() {}
func (x *UploadCRLRequest) ProtoReflect() protoreflect.Message {
mi := &file_storer_proto_msgTypes[0]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -64,27 +65,23 @@ func (*UploadCRLRequest) Descriptor() ([]byte, []int) {
return file_storer_proto_rawDescGZIP(), []int{0}
}
func (x *UploadCRLRequest) GetPayload() isUploadCRLRequest_Payload {
if x != nil {
return x.Payload
func (m *UploadCRLRequest) GetPayload() isUploadCRLRequest_Payload {
if m != nil {
return m.Payload
}
return nil
}
func (x *UploadCRLRequest) GetMetadata() *CRLMetadata {
if x != nil {
if x, ok := x.Payload.(*UploadCRLRequest_Metadata); ok {
return x.Metadata
}
if x, ok := x.GetPayload().(*UploadCRLRequest_Metadata); ok {
return x.Metadata
}
return nil
}
func (x *UploadCRLRequest) GetCrlChunk() []byte {
if x != nil {
if x, ok := x.Payload.(*UploadCRLRequest_CrlChunk); ok {
return x.CrlChunk
}
if x, ok := x.GetPayload().(*UploadCRLRequest_CrlChunk); ok {
return x.CrlChunk
}
return nil
}
@ -106,21 +103,22 @@ func (*UploadCRLRequest_Metadata) isUploadCRLRequest_Payload() {}
func (*UploadCRLRequest_CrlChunk) isUploadCRLRequest_Payload() {}
type CRLMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
Number int64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
Expires *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires,proto3" json:"expires,omitempty"`
CacheControl string `protobuf:"bytes,5,opt,name=cacheControl,proto3" json:"cacheControl,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
Number int64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"`
ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"`
}
func (x *CRLMetadata) Reset() {
*x = CRLMetadata{}
mi := &file_storer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
if protoimpl.UnsafeEnabled {
mi := &file_storer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CRLMetadata) String() string {
@ -131,7 +129,7 @@ func (*CRLMetadata) ProtoMessage() {}
func (x *CRLMetadata) ProtoReflect() protoreflect.Message {
mi := &file_storer_proto_msgTypes[1]
if x != nil {
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@ -167,88 +165,64 @@ func (x *CRLMetadata) GetShardIdx() int64 {
return 0
}
func (x *CRLMetadata) GetExpires() *timestamppb.Timestamp {
if x != nil {
return x.Expires
}
return nil
}
func (x *CRLMetadata) GetCacheControl() string {
if x != nil {
return x.CacheControl
}
return ""
}
var File_storer_proto protoreflect.FileDescriptor
var file_storer_proto_rawDesc = string([]byte{
var file_storer_proto_rawDesc = []byte{
0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x10, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52,
0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48,
0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x08, 0x63,
0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52,
0x08, 0x63, 0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61,
0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01,
0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x12, 0x34, 0x0a, 0x07,
0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72,
0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x32, 0x4e, 0x0a, 0x09, 0x43, 0x52, 0x4c, 0x53, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x09, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, 0x4c,
0x12, 0x18, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
0x74, 0x79, 0x22, 0x00, 0x28, 0x01, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x72, 0x6c, 0x2f, 0x73, 0x74, 0x6f,
0x72, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
})
0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x10, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, 0x4c,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64,
0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x74, 0x6f, 0x72,
0x65, 0x72, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00,
0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x08, 0x63, 0x72,
0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08,
0x63, 0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x22, 0x65, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72,
0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a,
0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x32, 0x4e, 0x0a, 0x09, 0x43, 0x52,
0x4c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x09, 0x55, 0x70, 0x6c, 0x6f, 0x61,
0x64, 0x43, 0x52, 0x4c, 0x12, 0x18, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x55, 0x70,
0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x28, 0x01, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63,
0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x72, 0x6c,
0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_storer_proto_rawDescOnce sync.Once
file_storer_proto_rawDescData []byte
file_storer_proto_rawDescData = file_storer_proto_rawDesc
)
func file_storer_proto_rawDescGZIP() []byte {
file_storer_proto_rawDescOnce.Do(func() {
file_storer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc)))
file_storer_proto_rawDescData = protoimpl.X.CompressGZIP(file_storer_proto_rawDescData)
})
return file_storer_proto_rawDescData
}
var file_storer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_storer_proto_goTypes = []any{
(*UploadCRLRequest)(nil), // 0: storer.UploadCRLRequest
(*CRLMetadata)(nil), // 1: storer.CRLMetadata
(*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
(*emptypb.Empty)(nil), // 3: google.protobuf.Empty
var file_storer_proto_goTypes = []interface{}{
(*UploadCRLRequest)(nil), // 0: storer.UploadCRLRequest
(*CRLMetadata)(nil), // 1: storer.CRLMetadata
(*emptypb.Empty)(nil), // 2: google.protobuf.Empty
}
var file_storer_proto_depIdxs = []int32{
1, // 0: storer.UploadCRLRequest.metadata:type_name -> storer.CRLMetadata
2, // 1: storer.CRLMetadata.expires:type_name -> google.protobuf.Timestamp
0, // 2: storer.CRLStorer.UploadCRL:input_type -> storer.UploadCRLRequest
3, // 3: storer.CRLStorer.UploadCRL:output_type -> google.protobuf.Empty
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
0, // 1: storer.CRLStorer.UploadCRL:input_type -> storer.UploadCRLRequest
2, // 2: storer.CRLStorer.UploadCRL:output_type -> google.protobuf.Empty
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_storer_proto_init() }
@ -256,7 +230,33 @@ func file_storer_proto_init() {
if File_storer_proto != nil {
return
}
file_storer_proto_msgTypes[0].OneofWrappers = []any{
if !protoimpl.UnsafeEnabled {
file_storer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UploadCRLRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_storer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CRLMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_storer_proto_msgTypes[0].OneofWrappers = []interface{}{
(*UploadCRLRequest_Metadata)(nil),
(*UploadCRLRequest_CrlChunk)(nil),
}
@ -264,7 +264,7 @@ func file_storer_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc)),
RawDescriptor: file_storer_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
@ -275,6 +275,7 @@ func file_storer_proto_init() {
MessageInfos: file_storer_proto_msgTypes,
}.Build()
File_storer_proto = out.File
file_storer_proto_rawDesc = nil
file_storer_proto_goTypes = nil
file_storer_proto_depIdxs = nil
}

View File

@ -4,7 +4,6 @@ package storer;
option go_package = "github.com/letsencrypt/boulder/crl/storer/proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
service CRLStorer {
rpc UploadCRL(stream UploadCRLRequest) returns (google.protobuf.Empty) {}
@ -21,6 +20,4 @@ message CRLMetadata {
int64 issuerNameID = 1;
int64 number = 2;
int64 shardIdx = 3;
google.protobuf.Timestamp expires = 4;
string cacheControl = 5;
}

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: storer.proto
@ -53,24 +53,20 @@ type CRLStorer_UploadCRLClient = grpc.ClientStreamingClient[UploadCRLRequest, em
// CRLStorerServer is the server API for CRLStorer service.
// All implementations must embed UnimplementedCRLStorerServer
// for forward compatibility.
// for forward compatibility
type CRLStorerServer interface {
UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error
mustEmbedUnimplementedCRLStorerServer()
}
// UnimplementedCRLStorerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCRLStorerServer struct{}
// UnimplementedCRLStorerServer must be embedded to have forward compatible implementations.
type UnimplementedCRLStorerServer struct {
}
func (UnimplementedCRLStorerServer) UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error {
return status.Errorf(codes.Unimplemented, "method UploadCRL not implemented")
}
func (UnimplementedCRLStorerServer) mustEmbedUnimplementedCRLStorerServer() {}
func (UnimplementedCRLStorerServer) testEmbeddedByValue() {}
// UnsafeCRLStorerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CRLStorerServer will
@ -80,13 +76,6 @@ type UnsafeCRLStorerServer interface {
}
func RegisterCRLStorerServer(s grpc.ServiceRegistrar, srv CRLStorerServer) {
// If the following call pancis, it indicates UnimplementedCRLStorerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CRLStorer_ServiceDesc, srv)
}

View File

@ -105,8 +105,6 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
var shardIdx int64
var crlNumber *big.Int
crlBytes := make([]byte, 0)
var cacheControl string
var expires time.Time
// Read all of the messages from the input stream.
for {
@ -127,9 +125,6 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
return errors.New("got incomplete metadata message")
}
cacheControl = payload.Metadata.CacheControl
expires = payload.Metadata.Expires.AsTime()
shardIdx = payload.Metadata.ShardIdx
crlNumber = crl.Number(time.Unix(0, payload.Metadata.Number))
@ -234,8 +229,6 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
ChecksumSHA256: &checksumb64,
ContentType: &crlContentType,
Metadata: map[string]string{"crlNumber": crlNumber.String()},
Expires: &expires,
CacheControl: &cacheControl,
})
latency := cs.clk.Now().Sub(start)

View File

@ -26,12 +26,9 @@ func TestRunOnce(t *testing.T) {
[]*issuance.Certificate{e1, r3},
2, 18*time.Hour, 24*time.Hour,
6*time.Hour, time.Minute, 1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{revokedCerts: revokedCertsStream{err: errors.New("db no worky")}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
&fakeSAC{grcc: fakeGRCC{err: errors.New("db no worky")}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCGC{gcc: fakeGCC{}},
&fakeCSC{ucc: fakeUCC{}},
metrics.NoopRegisterer, mockLog, clk,
)
test.AssertNotError(t, err, "building test crlUpdater")

View File

@ -7,12 +7,10 @@ import (
"fmt"
"io"
"math"
"slices"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
@ -36,11 +34,6 @@ type crlUpdater struct {
maxParallelism int
maxAttempts int
cacheControl string
expiresMargin time.Duration
temporallyShardedPrefixes []string
sa sapb.StorageAuthorityClient
ca capb.CRLGeneratorClient
cs cspb.CRLStorerClient
@ -61,9 +54,6 @@ func NewUpdater(
updateTimeout time.Duration,
maxParallelism int,
maxAttempts int,
cacheControl string,
expiresMargin time.Duration,
temporallyShardedPrefixes []string,
sa sapb.StorageAuthorityClient,
ca capb.CRLGeneratorClient,
cs cspb.CRLStorerClient,
@ -80,8 +70,8 @@ func NewUpdater(
return nil, fmt.Errorf("must have positive number of shards, got: %d", numShards)
}
if updatePeriod >= 24*time.Hour {
return nil, fmt.Errorf("must update CRLs at least every 24 hours, got: %s", updatePeriod)
if updatePeriod >= 7*24*time.Hour {
return nil, fmt.Errorf("must update CRLs at least every 7 days, got: %s", updatePeriod)
}
if updateTimeout >= updatePeriod {
@ -122,9 +112,6 @@ func NewUpdater(
updateTimeout,
maxParallelism,
maxAttempts,
cacheControl,
expiresMargin,
temporallyShardedPrefixes,
sa,
ca,
cs,
@ -138,9 +125,9 @@ func NewUpdater(
// updateShardWithRetry calls updateShard repeatedly (with exponential backoff
// between attempts) until it succeeds or the max number of attempts is reached.
func (cu *crlUpdater) updateShardWithRetry(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int, chunks []chunk) error {
deadline := cu.clk.Now().Add(cu.updateTimeout)
ctx, cancel := context.WithDeadline(ctx, deadline)
ctx, cancel := context.WithTimeout(ctx, cu.updateTimeout)
defer cancel()
deadline, _ := ctx.Deadline()
if chunks == nil {
// Compute the shard map and relevant chunk boundaries, if not supplied.
@ -196,78 +183,11 @@ func (cu *crlUpdater) updateShardWithRetry(ctx context.Context, atTime time.Time
return nil
}
type crlStream interface {
Recv() (*proto.CRLEntry, error)
}
// reRevoked returns the later of the two entries, only if the latter represents a valid
// re-revocation of the former (reason == KeyCompromise).
func reRevoked(a *proto.CRLEntry, b *proto.CRLEntry) (*proto.CRLEntry, error) {
first, second := a, b
if b.RevokedAt.AsTime().Before(a.RevokedAt.AsTime()) {
first, second = b, a
}
if first.Reason != ocsp.KeyCompromise && second.Reason == ocsp.KeyCompromise {
return second, nil
}
// The RA has logic to prevent re-revocation for any reason other than KeyCompromise,
// so this should be impossible. The best we can do is error out.
return nil, fmt.Errorf("certificate %s was revoked with reason %d at %s and re-revoked with invalid reason %d at %s",
first.Serial, first.Reason, first.RevokedAt.AsTime(), second.Reason, second.RevokedAt.AsTime())
}
// addFromStream pulls `proto.CRLEntry` objects from a stream, adding them to the crlEntries map.
//
// Consolidates duplicates and checks for internal consistency of the results.
// If allowedSerialPrefixes is non-empty, only serials with that one-byte prefix (two hex-encoded
// bytes) will be accepted.
//
// Returns the number of entries received from the stream, regardless of whether they were accepted.
func addFromStream(crlEntries map[string]*proto.CRLEntry, stream crlStream, allowedSerialPrefixes []string) (int, error) {
var count int
for {
entry, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return 0, fmt.Errorf("retrieving entry from SA: %w", err)
}
count++
serialPrefix := entry.Serial[0:2]
if len(allowedSerialPrefixes) > 0 && !slices.Contains(allowedSerialPrefixes, serialPrefix) {
continue
}
previousEntry := crlEntries[entry.Serial]
if previousEntry == nil {
crlEntries[entry.Serial] = entry
continue
}
if previousEntry.Reason == entry.Reason &&
previousEntry.RevokedAt.AsTime().Equal(entry.RevokedAt.AsTime()) {
continue
}
// There's a tiny possibility a certificate was re-revoked for KeyCompromise and
// we got a different view of it from temporal sharding vs explicit sharding.
// Prefer the re-revoked CRL entry, which must be the one with KeyCompromise.
second, err := reRevoked(entry, previousEntry)
if err != nil {
return 0, err
}
crlEntries[entry.Serial] = second
}
return count, nil
}
// updateShard processes a single shard. It computes the shard's boundaries, gets
// the list of revoked certs in that shard from the SA, gets the CA to sign the
// resulting CRL, and gets the crl-storer to upload it. It returns an error if
// any of these operations fail.
func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int, chunks []chunk) (err error) {
if shardIdx <= 0 {
return fmt.Errorf("invalid shard %d", shardIdx)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -287,10 +207,8 @@ func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerN
cu.log.Infof(
"Generating CRL shard: id=[%s] numChunks=[%d]", crlID, len(chunks))
// Deduplicate the CRL entries by serial number, since we can get the same certificate via
// both temporal sharding (GetRevokedCerts) and explicit sharding (GetRevokedCertsByShard).
crlEntries := make(map[string]*proto.CRLEntry)
// Get the full list of CRL Entries for this shard from the SA.
var crlEntries []*proto.CRLEntry
for _, chunk := range chunks {
saStream, err := cu.sa.GetRevokedCerts(ctx, &sapb.GetRevokedCertsRequest{
IssuerNameID: int64(issuerNameID),
@ -299,41 +217,25 @@ func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerN
RevokedBefore: timestamppb.New(atTime),
})
if err != nil {
return fmt.Errorf("GetRevokedCerts: %w", err)
return fmt.Errorf("connecting to SA: %w", err)
}
n, err := addFromStream(crlEntries, saStream, cu.temporallyShardedPrefixes)
if err != nil {
return fmt.Errorf("streaming GetRevokedCerts: %w", err)
for {
entry, err := saStream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("retrieving entry from SA: %w", err)
}
crlEntries = append(crlEntries, entry)
}
cu.log.Infof(
"Queried SA for CRL shard: id=[%s] expiresAfter=[%s] expiresBefore=[%s] numEntries=[%d]",
crlID, chunk.start, chunk.end, n)
crlID, chunk.start, chunk.end, len(crlEntries))
}
// Query for unexpired certificates, with padding to ensure that revoked certificates show
// up in at least one CRL, even if they expire between revocation and CRL generation.
expiresAfter := cu.clk.Now().Add(-cu.lookbackPeriod)
saStream, err := cu.sa.GetRevokedCertsByShard(ctx, &sapb.GetRevokedCertsByShardRequest{
IssuerNameID: int64(issuerNameID),
ShardIdx: int64(shardIdx),
ExpiresAfter: timestamppb.New(expiresAfter),
RevokedBefore: timestamppb.New(atTime),
})
if err != nil {
return fmt.Errorf("GetRevokedCertsByShard: %w", err)
}
n, err := addFromStream(crlEntries, saStream, nil)
if err != nil {
return fmt.Errorf("streaming GetRevokedCertsByShard: %w", err)
}
cu.log.Infof(
"Queried SA by CRL shard number: id=[%s] shardIdx=[%d] numEntries=[%d]", crlID, shardIdx, n)
// Send the full list of CRL Entries to the CA.
caStream, err := cu.ca.GenerateCRL(ctx)
if err != nil {
@ -399,8 +301,6 @@ func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerN
IssuerNameID: int64(issuerNameID),
Number: atTime.UnixNano(),
ShardIdx: int64(shardIdx),
CacheControl: cu.cacheControl,
Expires: timestamppb.New(atTime.Add(cu.updatePeriod).Add(cu.expiresMargin)),
},
},
})

View File

@ -1,17 +1,12 @@
package updater
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"reflect"
"testing"
"time"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
@ -29,17 +24,17 @@ import (
"github.com/letsencrypt/boulder/test"
)
// revokedCertsStream is a fake grpc.ClientStreamingClient which can be
// fakeGRCC is a fake grpc.ClientStreamingClient which can be
// populated with some CRL entries or an error for use as the return value of
// a faked GetRevokedCerts call.
type revokedCertsStream struct {
type fakeGRCC struct {
grpc.ClientStream
entries []*corepb.CRLEntry
nextIdx int
err error
}
func (f *revokedCertsStream) Recv() (*corepb.CRLEntry, error) {
func (f *fakeGRCC) Recv() (*corepb.CRLEntry, error) {
if f.err != nil {
return nil, f.err
}
@ -56,31 +51,13 @@ func (f *revokedCertsStream) Recv() (*corepb.CRLEntry, error) {
// fake timestamp to serve as the database's maximum notAfter value.
type fakeSAC struct {
sapb.StorageAuthorityClient
revokedCerts revokedCertsStream
revokedCertsByShard revokedCertsStream
maxNotAfter time.Time
leaseError error
grcc fakeGRCC
maxNotAfter time.Time
leaseError error
}
func (f *fakeSAC) GetRevokedCerts(ctx context.Context, _ *sapb.GetRevokedCertsRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) {
return &f.revokedCerts, nil
}
// Return some configured contents, but only for shard 2.
func (f *fakeSAC) GetRevokedCertsByShard(ctx context.Context, req *sapb.GetRevokedCertsByShardRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) {
// This time is based on the setting of `clk` in TestUpdateShard,
// minus the setting of `lookbackPeriod` in that same function (24h).
want := time.Date(2020, time.January, 17, 0, 0, 0, 0, time.UTC)
got := req.ExpiresAfter.AsTime().UTC()
if !got.Equal(want) {
return nil, fmt.Errorf("fakeSAC.GetRevokedCertsByShard called with ExpiresAfter=%s, want %s",
got, want)
}
if req.ShardIdx == 2 {
return &f.revokedCertsByShard, nil
}
return &revokedCertsStream{}, nil
return &f.grcc, nil
}
func (f *fakeSAC) GetMaxExpiration(_ context.Context, req *emptypb.Empty, _ ...grpc.CallOption) (*timestamppb.Timestamp, error) {
@ -94,20 +71,10 @@ func (f *fakeSAC) LeaseCRLShard(_ context.Context, req *sapb.LeaseCRLShardReques
return &sapb.LeaseCRLShardResponse{IssuerNameID: req.IssuerNameID, ShardIdx: req.MinShardIdx}, nil
}
// generateCRLStream implements the streaming API returned from GenerateCRL.
//
// Specifically it implements grpc.BidiStreamingClient.
//
// If it has non-nil error fields, it returns those on Send() or Recv().
//
// When it receives a CRL entry (on Send()), it records that entry internally, JSON serialized,
// with a newline between JSON objects.
//
// When it is asked for bytes of a signed CRL (Recv()), it sends those JSON serialized contents.
//
// We use JSON instead of CRL format because we're not testing the signing and formatting done
// by the CA, just the plumbing of different components together done by the crl-updater.
type generateCRLStream struct {
// fakeGCC is a fake grpc.BidiStreamingClient which can be
// populated with some CRL entries or an error for use as the return value of
// a faked GenerateCRL call.
type fakeGCC struct {
grpc.ClientStream
chunks [][]byte
nextIdx int
@ -115,36 +82,15 @@ type generateCRLStream struct {
recvErr error
}
type crlEntry struct {
Serial string
Reason int32
RevokedAt time.Time
}
func (f *generateCRLStream) Send(req *capb.GenerateCRLRequest) error {
if f.sendErr != nil {
return f.sendErr
}
if t, ok := req.Payload.(*capb.GenerateCRLRequest_Entry); ok {
jsonBytes, err := json.Marshal(crlEntry{
Serial: t.Entry.Serial,
Reason: t.Entry.Reason,
RevokedAt: t.Entry.RevokedAt.AsTime(),
})
if err != nil {
return err
}
f.chunks = append(f.chunks, jsonBytes)
f.chunks = append(f.chunks, []byte("\n"))
}
func (f *fakeGCC) Send(*capb.GenerateCRLRequest) error {
return f.sendErr
}
func (f *generateCRLStream) CloseSend() error {
func (f *fakeGCC) CloseSend() error {
return nil
}
func (f *generateCRLStream) Recv() (*capb.GenerateCRLResponse, error) {
func (f *fakeGCC) Recv() (*capb.GenerateCRLResponse, error) {
if f.recvErr != nil {
return nil, f.recvErr
}
@ -156,67 +102,43 @@ func (f *generateCRLStream) Recv() (*capb.GenerateCRLResponse, error) {
return nil, io.EOF
}
// fakeCA acts as a fake CA (specifically implementing capb.CRLGeneratorClient).
//
// It always returns its field in response to `GenerateCRL`. Because this is a streaming
// RPC, that return value is responsible for most of the work.
type fakeCA struct {
gcc generateCRLStream
// fakeCGC is a fake capb.CRLGeneratorClient which can be populated with a
// fakeGCC to be used as the return value for calls to GenerateCRL.
type fakeCGC struct {
gcc fakeGCC
}
func (f *fakeCA) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) {
func (f *fakeCGC) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) {
return &f.gcc, nil
}
// recordingUploader acts as the streaming part of UploadCRL.
//
// Records all uploaded chunks in crlBody.
type recordingUploader struct {
grpc.ClientStream
crlBody []byte
}
func (r *recordingUploader) Send(req *cspb.UploadCRLRequest) error {
if t, ok := req.Payload.(*cspb.UploadCRLRequest_CrlChunk); ok {
r.crlBody = append(r.crlBody, t.CrlChunk...)
}
return nil
}
func (r *recordingUploader) CloseAndRecv() (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
// noopUploader is a fake grpc.ClientStreamingClient which can be populated with
// fakeUCC is a fake grpc.ClientStreamingClient which can be populated with
// an error for use as the return value of a faked UploadCRL call.
//
// It does nothing with uploaded contents.
type noopUploader struct {
type fakeUCC struct {
grpc.ClientStream
sendErr error
recvErr error
}
func (f *noopUploader) Send(*cspb.UploadCRLRequest) error {
func (f *fakeUCC) Send(*cspb.UploadCRLRequest) error {
return f.sendErr
}
func (f *noopUploader) CloseAndRecv() (*emptypb.Empty, error) {
func (f *fakeUCC) CloseAndRecv() (*emptypb.Empty, error) {
if f.recvErr != nil {
return nil, f.recvErr
}
return &emptypb.Empty{}, nil
}
// fakeStorer is a fake cspb.CRLStorerClient which can be populated with an
// uploader stream for use as the return value for calls to UploadCRL.
type fakeStorer struct {
uploaderStream grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty]
// fakeCSC is a fake cspb.CRLStorerClient which can be populated with a
// fakeUCC for use as the return value for calls to UploadCRL.
type fakeCSC struct {
ucc fakeUCC
}
func (f *fakeStorer) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty], error) {
return f.uploaderStream, nil
func (f *fakeCSC) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty], error) {
return &f.ucc, nil
}
func TestUpdateShard(t *testing.T) {
@ -230,24 +152,14 @@ func TestUpdateShard(t *testing.T) {
defer cancel()
clk := clock.NewFake()
clk.Set(time.Date(2020, time.January, 18, 0, 0, 0, 0, time.UTC))
clk.Set(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC))
cu, err := NewUpdater(
[]*issuance.Certificate{e1, r3},
2,
18*time.Hour, // shardWidth
24*time.Hour, // lookbackPeriod
6*time.Hour, // updatePeriod
time.Minute, // updateTimeout
1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{
revokedCerts: revokedCertsStream{},
maxNotAfter: clk.Now().Add(90 * 24 * time.Hour),
},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
2, 18*time.Hour, 24*time.Hour,
6*time.Hour, time.Minute, 1, 1,
&fakeSAC{grcc: fakeGRCC{}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCGC{gcc: fakeGCC{}},
&fakeCSC{ucc: fakeUCC{}},
metrics.NoopRegisterer, blog.NewMock(), clk,
)
test.AssertNotError(t, err, "building test crlUpdater")
@ -257,91 +169,7 @@ func TestUpdateShard(t *testing.T) {
}
// Ensure that getting no results from the SA still works.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
test.AssertNotError(t, err, "empty CRL")
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "success",
}, 1)
// Make a CRL with actual contents. Verify that the information makes it through
// each of the steps:
// - read from SA
// - write to CA and read the response
// - upload with CRL storer
//
// The final response should show up in the bytes recorded by our fake storer.
recordingUploader := &recordingUploader{}
now := timestamppb.Now()
cu.cs = &fakeStorer{uploaderStream: recordingUploader}
cu.sa = &fakeSAC{
revokedCerts: revokedCertsStream{
entries: []*corepb.CRLEntry{
{
Serial: "0311b5d430823cfa25b0fc85d14c54ee35",
Reason: int32(ocsp.KeyCompromise),
RevokedAt: now,
},
},
},
revokedCertsByShard: revokedCertsStream{
entries: []*corepb.CRLEntry{
{
Serial: "0311b5d430823cfa25b0fc85d14c54ee35",
Reason: int32(ocsp.KeyCompromise),
RevokedAt: now,
},
{
Serial: "037d6a05a0f6a975380456ae605cee9889",
Reason: int32(ocsp.AffiliationChanged),
RevokedAt: now,
},
{
Serial: "03aa617ab8ee58896ba082bfa25199c884",
Reason: int32(ocsp.Unspecified),
RevokedAt: now,
},
},
},
maxNotAfter: clk.Now().Add(90 * 24 * time.Hour),
}
// We ask for shard 2 specifically because GetRevokedCertsByShard only returns our
// certificate for that shard.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 2, testChunks)
test.AssertNotError(t, err, "updateShard")
expectedEntries := map[string]int32{
"0311b5d430823cfa25b0fc85d14c54ee35": int32(ocsp.KeyCompromise),
"037d6a05a0f6a975380456ae605cee9889": int32(ocsp.AffiliationChanged),
"03aa617ab8ee58896ba082bfa25199c884": int32(ocsp.Unspecified),
}
for _, r := range bytes.Split(recordingUploader.crlBody, []byte("\n")) {
if len(r) == 0 {
continue
}
var entry crlEntry
err := json.Unmarshal(r, &entry)
if err != nil {
t.Fatalf("unmarshaling JSON: %s", err)
}
expectedReason, ok := expectedEntries[entry.Serial]
if !ok {
t.Errorf("CRL entry for %s was unexpected", entry.Serial)
}
if entry.Reason != expectedReason {
t.Errorf("CRL entry for %s had reason=%d, want %d", entry.Serial, entry.Reason, expectedReason)
}
delete(expectedEntries, entry.Serial)
}
// At this point the expectedEntries map should be empty; if it's not, emit an error
// for each remaining expectation.
for k, v := range expectedEntries {
t.Errorf("expected cert %s to be revoked for reason=%d, but it was not on the CRL", k, v)
}
cu.updatedCounter.Reset()
// Ensure that getting no results from the SA still works.
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertNotError(t, err, "empty CRL")
test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{
"issuer": "(TEST) Elegant Elephant E1", "result": "success",
@ -349,8 +177,8 @@ func TestUpdateShard(t *testing.T) {
cu.updatedCounter.Reset()
// Errors closing the Storer upload stream should bubble up.
cu.cs = &fakeStorer{uploaderStream: &noopUploader{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
cu.cs = &fakeCSC{ucc: fakeUCC{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "storer error")
test.AssertContains(t, err.Error(), "closing CRLStorer upload stream")
test.AssertErrorIs(t, err, sentinelErr)
@ -360,8 +188,8 @@ func TestUpdateShard(t *testing.T) {
cu.updatedCounter.Reset()
// Errors sending to the Storer should bubble up sooner.
cu.cs = &fakeStorer{uploaderStream: &noopUploader{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
cu.cs = &fakeCSC{ucc: fakeUCC{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "storer error")
test.AssertContains(t, err.Error(), "sending CRLStorer metadata")
test.AssertErrorIs(t, err, sentinelErr)
@ -371,8 +199,8 @@ func TestUpdateShard(t *testing.T) {
cu.updatedCounter.Reset()
// Errors reading from the CA should bubble up sooner.
cu.ca = &fakeCA{gcc: generateCRLStream{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
cu.ca = &fakeCGC{gcc: fakeGCC{recvErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "CA error")
test.AssertContains(t, err.Error(), "receiving CRL bytes")
test.AssertErrorIs(t, err, sentinelErr)
@ -382,8 +210,8 @@ func TestUpdateShard(t *testing.T) {
cu.updatedCounter.Reset()
// Errors sending to the CA should bubble up sooner.
cu.ca = &fakeCA{gcc: generateCRLStream{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
cu.ca = &fakeCGC{gcc: fakeGCC{sendErr: sentinelErr}}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "CA error")
test.AssertContains(t, err.Error(), "sending CA metadata")
test.AssertErrorIs(t, err, sentinelErr)
@ -393,8 +221,8 @@ func TestUpdateShard(t *testing.T) {
cu.updatedCounter.Reset()
// Errors reading from the SA should bubble up soonest.
cu.sa = &fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
cu.sa = &fakeSAC{grcc: fakeGRCC{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)}
err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "database error")
test.AssertContains(t, err.Error(), "retrieving entry from SA")
test.AssertErrorIs(t, err, sentinelErr)
@ -422,12 +250,9 @@ func TestUpdateShardWithRetry(t *testing.T) {
[]*issuance.Certificate{e1, r3},
2, 18*time.Hour, 24*time.Hour,
6*time.Hour, time.Minute, 1, 1,
"stale-if-error=60",
5*time.Minute,
nil,
&fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCA{gcc: generateCRLStream{}},
&fakeStorer{uploaderStream: &noopUploader{}},
&fakeSAC{grcc: fakeGRCC{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)},
&fakeCGC{gcc: fakeGCC{}},
&fakeCSC{ucc: fakeUCC{}},
metrics.NoopRegisterer, blog.NewMock(), clk,
)
test.AssertNotError(t, err, "building test crlUpdater")
@ -439,7 +264,7 @@ func TestUpdateShardWithRetry(t *testing.T) {
// Ensure that having MaxAttempts set to 1 results in the clock not moving
// forward at all.
startTime := cu.clk.Now()
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "database error")
test.AssertErrorIs(t, err, sentinelErr)
test.AssertEquals(t, cu.clk.Now(), startTime)
@ -449,7 +274,7 @@ func TestUpdateShardWithRetry(t *testing.T) {
// in, so we have to be approximate.
cu.maxAttempts = 5
startTime = cu.clk.Now()
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1, testChunks)
err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 0, testChunks)
test.AssertError(t, err, "database error")
test.AssertErrorIs(t, err, sentinelErr)
t.Logf("start: %v", startTime)
@ -571,150 +396,6 @@ func TestGetChunkAtTime(t *testing.T) {
// the time twice, since the whole point of "very far in the future" is that
// it isn't representable by a time.Duration.
atTime = anchorTime().Add(200 * 365 * 24 * time.Hour).Add(200 * 365 * 24 * time.Hour)
_, err = GetChunkAtTime(shardWidth, numShards, atTime)
c, err = GetChunkAtTime(shardWidth, numShards, atTime)
test.AssertError(t, err, "getting far-future chunk")
}
func TestAddFromStream(t *testing.T) {
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
simpleEntry := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
}
reRevokedEntry := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.KeyCompromise,
RevokedAt: timestamppb.New(now),
}
reRevokedEntryOld := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.KeyCompromise,
RevokedAt: timestamppb.New(now.Add(-48 * time.Hour)),
}
reRevokedEntryBadReason := &corepb.CRLEntry{
Serial: "abcdefg",
Reason: ocsp.AffiliationChanged,
RevokedAt: timestamppb.New(now),
}
type testCase struct {
name string
inputs [][]*corepb.CRLEntry
expected map[string]*corepb.CRLEntry
expectErr bool
}
testCases := []testCase{
{
name: "two streams with same entry",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: simpleEntry,
},
},
{
name: "re-revoked",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: reRevokedEntry,
},
},
{
name: "re-revoked (newer shows up first)",
inputs: [][]*corepb.CRLEntry{
{reRevokedEntry, simpleEntry},
{simpleEntry},
},
expected: map[string]*corepb.CRLEntry{
simpleEntry.Serial: reRevokedEntry,
},
},
{
name: "re-revoked (wrong date)",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntryOld},
},
expectErr: true,
},
{
name: "re-revoked (wrong reason)",
inputs: [][]*corepb.CRLEntry{
{simpleEntry},
{simpleEntry, reRevokedEntryBadReason},
},
expectErr: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
crlEntries := make(map[string]*corepb.CRLEntry)
var err error
for _, input := range tc.inputs {
_, err = addFromStream(crlEntries, &revokedCertsStream{entries: input}, nil)
if err != nil {
break
}
}
if tc.expectErr {
if err == nil {
t.Errorf("addFromStream=%+v, want error", crlEntries)
}
} else {
if err != nil {
t.Fatalf("addFromStream=%s, want no error", err)
}
if !reflect.DeepEqual(crlEntries, tc.expected) {
t.Errorf("addFromStream=%+v, want %+v", crlEntries, tc.expected)
}
}
})
}
}
func TestAddFromStreamDisallowedSerialPrefix(t *testing.T) {
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
input := []*corepb.CRLEntry{
{
Serial: "abcdefg",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
},
{
Serial: "01020304",
Reason: ocsp.CessationOfOperation,
RevokedAt: timestamppb.New(yesterday),
},
}
crlEntries := make(map[string]*corepb.CRLEntry)
var err error
_, err = addFromStream(
crlEntries,
&revokedCertsStream{entries: input},
[]string{"ab"},
)
if err != nil {
t.Fatalf("addFromStream: %s", err)
}
expected := map[string]*corepb.CRLEntry{
"abcdefg": input[0],
}
if !reflect.DeepEqual(crlEntries, expected) {
t.Errorf("addFromStream=%+v, want %+v", crlEntries, expected)
}
}

View File

@ -5,13 +5,11 @@ import (
"crypto"
"crypto/x509"
"errors"
"net/netip"
"strings"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/identifier"
)
// maxCNLength is the maximum length allowed for the common name as specified in RFC 5280
@ -35,13 +33,13 @@ var (
unsupportedSigAlg = berrors.BadCSRError("signature algorithm not supported")
invalidSig = berrors.BadCSRError("invalid signature on CSR")
invalidEmailPresent = berrors.BadCSRError("CSR contains one or more email address fields")
invalidURIPresent = berrors.BadCSRError("CSR contains one or more URI fields")
invalidNoIdent = berrors.BadCSRError("at least one identifier is required")
invalidIPPresent = berrors.BadCSRError("CSR contains one or more IP address fields")
invalidNoDNS = berrors.BadCSRError("at least one DNS name is required")
)
// VerifyCSR checks the validity of a x509.CertificateRequest. It uses
// identifier.FromCSR to normalize the DNS names before checking whether we'll
// issue for them.
// NamesFromCSR to normalize the DNS names before checking whether we'll issue
// for them.
func VerifyCSR(ctx context.Context, csr *x509.CertificateRequest, maxNames int, keyPolicy *goodkey.KeyPolicy, pa core.PolicyAuthority) error {
key, ok := csr.PublicKey.(crypto.PublicKey)
if !ok {
@ -65,54 +63,67 @@ func VerifyCSR(ctx context.Context, csr *x509.CertificateRequest, maxNames int,
if len(csr.EmailAddresses) > 0 {
return invalidEmailPresent
}
if len(csr.URIs) > 0 {
return invalidURIPresent
if len(csr.IPAddresses) > 0 {
return invalidIPPresent
}
// FromCSR also performs normalization, returning values that may not match
// the literal CSR contents.
idents := identifier.FromCSR(csr)
if len(idents) == 0 {
return invalidNoIdent
// NamesFromCSR also performs normalization, returning values that may not
// match the literal CSR contents.
names := NamesFromCSR(csr)
if len(names.SANs) == 0 && names.CN == "" {
return invalidNoDNS
}
if len(idents) > maxNames {
return berrors.BadCSRError("CSR contains more than %d identifiers", maxNames)
if len(names.CN) > maxCNLength {
return berrors.BadCSRError("CN was longer than %d bytes", maxCNLength)
}
if len(names.SANs) > maxNames {
return berrors.BadCSRError("CSR contains more than %d DNS names", maxNames)
}
err = pa.WillingToIssue(idents)
err = pa.WillingToIssue(names.SANs)
if err != nil {
return err
}
return nil
}
// CNFromCSR returns the lower-cased Subject Common Name from the CSR, if a
// short enough CN was provided. If it was too long or appears to be an IP,
// there will be no CN. If none was provided, the CN will be the first SAN that
// is short enough, which is done only for backwards compatibility with prior
// Let's Encrypt behaviour.
func CNFromCSR(csr *x509.CertificateRequest) string {
type names struct {
SANs []string
CN string
}
// NamesFromCSR deduplicates and lower-cases the Subject Common Name and Subject
// Alternative Names from the CSR. If a CN was provided, it will be used if it
// is short enough, otherwise there will be no CN. If no CN was provided, the CN
// will be the first SAN that is short enough, which is done only for backwards
// compatibility with prior Let's Encrypt behaviour. The resulting SANs will
// always include the original CN, if any.
func NamesFromCSR(csr *x509.CertificateRequest) names {
// Produce a new "sans" slice with the same memory address as csr.DNSNames
// but force a new allocation if an append happens so that we don't
// accidentally mutate the underlying csr.DNSNames array.
sans := csr.DNSNames[0:len(csr.DNSNames):len(csr.DNSNames)]
if csr.Subject.CommonName != "" {
sans = append(sans, csr.Subject.CommonName)
}
if len(csr.Subject.CommonName) > maxCNLength {
return ""
return names{SANs: core.UniqueLowerNames(sans)}
}
if csr.Subject.CommonName != "" {
_, err := netip.ParseAddr(csr.Subject.CommonName)
if err == nil { // inverted; we're looking for successful parsing here
return ""
}
return strings.ToLower(csr.Subject.CommonName)
return names{SANs: core.UniqueLowerNames(sans), CN: strings.ToLower(csr.Subject.CommonName)}
}
// If there's no CN already, but we want to set one, promote the first dnsName
// SAN which is shorter than the maximum acceptable CN length (if any). We
// will never promote an ipAddress SAN to the CN.
for _, name := range csr.DNSNames {
// If there's no CN already, but we want to set one, promote the first SAN
// which is shorter than the maximum acceptable CN length (if any).
for _, name := range sans {
if len(name) <= maxCNLength {
return strings.ToLower(name)
return names{SANs: core.UniqueLowerNames(sans), CN: strings.ToLower(name)}
}
}
return ""
return names{SANs: core.UniqueLowerNames(sans)}
}

View File

@ -9,8 +9,6 @@ import (
"encoding/asn1"
"errors"
"net"
"net/netip"
"net/url"
"strings"
"testing"
@ -24,13 +22,13 @@ import (
type mockPA struct{}
func (pa *mockPA) ChallengeTypesFor(ident identifier.ACMEIdentifier) ([]core.AcmeChallenge, error) {
func (pa *mockPA) ChallengeTypesFor(identifier identifier.ACMEIdentifier) ([]core.AcmeChallenge, error) {
return []core.AcmeChallenge{}, nil
}
func (pa *mockPA) WillingToIssue(idents identifier.ACMEIdentifiers) error {
for _, ident := range idents {
if ident.Value == "bad-name.com" || ident.Value == "other-bad-name.com" {
func (pa *mockPA) WillingToIssue(domains []string) error {
for _, domain := range domains {
if domain == "bad-name.com" || domain == "other-bad-name.com" {
return errors.New("policy forbids issuing for identifier")
}
}
@ -70,10 +68,6 @@ func TestVerifyCSR(t *testing.T) {
signedReqWithIPAddress := new(x509.CertificateRequest)
*signedReqWithIPAddress = *signedReq
signedReqWithIPAddress.IPAddresses = []net.IP{net.IPv4(1, 2, 3, 4)}
signedReqWithURI := new(x509.CertificateRequest)
*signedReqWithURI = *signedReq
testURI, _ := url.ParseRequestURI("https://example.com/")
signedReqWithURI.URIs = []*url.URL{testURI}
signedReqWithAllLongSANs := new(x509.CertificateRequest)
*signedReqWithAllLongSANs = *signedReq
signedReqWithAllLongSANs.DNSNames = []string{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"}
@ -109,7 +103,7 @@ func TestVerifyCSR(t *testing.T) {
signedReq,
100,
&mockPA{},
invalidNoIdent,
invalidNoDNS,
},
{
signedReqWithLongCN,
@ -121,7 +115,7 @@ func TestVerifyCSR(t *testing.T) {
signedReqWithHosts,
1,
&mockPA{},
berrors.BadCSRError("CSR contains more than 1 identifiers"),
berrors.BadCSRError("CSR contains more than 1 DNS names"),
},
{
signedReqWithBadNames,
@ -139,13 +133,7 @@ func TestVerifyCSR(t *testing.T) {
signedReqWithIPAddress,
100,
&mockPA{},
nil,
},
{
signedReqWithURI,
100,
&mockPA{},
invalidURIPresent,
invalidIPPresent,
},
{
signedReqWithAllLongSANs,
@ -161,38 +149,44 @@ func TestVerifyCSR(t *testing.T) {
}
}
func TestCNFromCSR(t *testing.T) {
func TestNamesFromCSR(t *testing.T) {
tooLongString := strings.Repeat("a", maxCNLength+1)
cases := []struct {
name string
csr *x509.CertificateRequest
expectedCN string
name string
csr *x509.CertificateRequest
expectedCN string
expectedNames []string
}{
{
"no explicit CN",
&x509.CertificateRequest{DNSNames: []string{"a.com"}},
"a.com",
[]string{"a.com"},
},
{
"explicit uppercase CN",
&x509.CertificateRequest{Subject: pkix.Name{CommonName: "A.com"}, DNSNames: []string{"a.com"}},
"a.com",
[]string{"a.com"},
},
{
"no explicit CN, uppercase SAN",
&x509.CertificateRequest{DNSNames: []string{"A.com"}},
"a.com",
[]string{"a.com"},
},
{
"duplicate SANs",
&x509.CertificateRequest{DNSNames: []string{"b.com", "b.com", "a.com", "a.com"}},
"b.com",
[]string{"a.com", "b.com"},
},
{
"explicit CN not found in SANs",
&x509.CertificateRequest{Subject: pkix.Name{CommonName: "a.com"}, DNSNames: []string{"b.com"}},
"a.com",
[]string{"a.com", "b.com"},
},
{
"no explicit CN, all SANs too long to be the CN",
@ -201,6 +195,7 @@ func TestCNFromCSR(t *testing.T) {
tooLongString + ".b.com",
}},
"",
[]string{tooLongString + ".a.com", tooLongString + ".b.com"},
},
{
"no explicit CN, leading SANs too long to be the CN",
@ -211,6 +206,7 @@ func TestCNFromCSR(t *testing.T) {
"b.com",
}},
"a.com",
[]string{"a.com", tooLongString + ".a.com", tooLongString + ".b.com", "b.com"},
},
{
"explicit CN, leading SANs too long to be the CN",
@ -223,6 +219,7 @@ func TestCNFromCSR(t *testing.T) {
"b.com",
}},
"a.com",
[]string{"a.com", tooLongString + ".a.com", tooLongString + ".b.com", "b.com"},
},
{
"explicit CN that's too long to be the CN",
@ -230,6 +227,7 @@ func TestCNFromCSR(t *testing.T) {
Subject: pkix.Name{CommonName: tooLongString + ".a.com"},
},
"",
[]string{tooLongString + ".a.com"},
},
{
"explicit CN that's too long to be the CN, with a SAN",
@ -239,27 +237,14 @@ func TestCNFromCSR(t *testing.T) {
"b.com",
}},
"",
},
{
"explicit CN that's an IP",
&x509.CertificateRequest{
Subject: pkix.Name{CommonName: "127.0.0.1"},
},
"",
},
{
"no CN, only IP SANs",
&x509.CertificateRequest{
IPAddresses: []net.IP{
netip.MustParseAddr("127.0.0.1").AsSlice(),
},
},
"",
[]string{tooLongString + ".a.com", "b.com"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
test.AssertEquals(t, CNFromCSR(tc.csr), tc.expectedCN)
names := NamesFromCSR(tc.csr)
test.AssertEquals(t, names.CN, tc.expectedCN)
test.AssertDeepEquals(t, names.SANs, tc.expectedNames)
})
}
}

View File

@ -1,9 +1,93 @@
package ctconfig
import (
"errors"
"fmt"
"time"
"github.com/letsencrypt/boulder/config"
)
// LogShard describes a single shard of a temporally sharded
// CT log
type LogShard struct {
URI string
Key string
WindowStart time.Time
WindowEnd time.Time
}
// TemporalSet contains a set of temporal shards of a single log
type TemporalSet struct {
Name string
Shards []LogShard
}
// Setup initializes the TemporalSet by parsing the start and end dates
// and verifying WindowEnd > WindowStart
func (ts *TemporalSet) Setup() error {
if ts.Name == "" {
return errors.New("Name cannot be empty")
}
if len(ts.Shards) == 0 {
return errors.New("temporal set contains no shards")
}
for i := range ts.Shards {
if !ts.Shards[i].WindowEnd.After(ts.Shards[i].WindowStart) {
return errors.New("WindowStart must be before WindowEnd")
}
}
return nil
}
// pick chooses the correct shard from a TemporalSet to use for the given
// expiration time. In the case where two shards have overlapping windows
// the earlier of the two shards will be chosen.
func (ts *TemporalSet) pick(exp time.Time) (*LogShard, error) {
for _, shard := range ts.Shards {
if exp.Before(shard.WindowStart) {
continue
}
if !exp.Before(shard.WindowEnd) {
continue
}
return &shard, nil
}
return nil, fmt.Errorf("no valid shard available for temporal set %q for expiration date %q", ts.Name, exp)
}
// LogDescription contains the information needed to submit certificates
// to a CT log and verify returned receipts. If TemporalSet is non-nil then
// URI and Key should be empty.
type LogDescription struct {
URI string
Key string
SubmitFinalCert bool
*TemporalSet
}
// Info returns the URI and key of the log, either from a plain log description
// or from the earliest valid shard from a temporal log set
func (ld LogDescription) Info(exp time.Time) (string, string, error) {
if ld.TemporalSet == nil {
return ld.URI, ld.Key, nil
}
shard, err := ld.TemporalSet.pick(exp)
if err != nil {
return "", "", err
}
return shard.URI, shard.Key, nil
}
// CTGroup represents a group of CT Logs. Although capable of holding logs
// grouped by any arbitrary feature, is today primarily used to hold logs which
// are all operated by the same legal entity.
type CTGroup struct {
Name string
Logs []LogDescription
}
// CTConfig is the top-level config object expected to be embedded in an
// executable's JSON config struct.
type CTConfig struct {
@ -25,3 +109,13 @@ type CTConfig struct {
// and final certs to the same log.
FinalLogs []string
}
// LogID holds enough information to uniquely identify a CT Log: its log_id
// (the base64-encoding of the SHA-256 hash of its public key) and its human-
// readable name/description. This is used to extract other log parameters
// (such as its URL and public key) from the Chrome Log List.
type LogID struct {
Name string
ID string
SubmitFinal bool
}

View File

@ -0,0 +1,116 @@
package ctconfig
import (
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
func TestTemporalSetup(t *testing.T) {
for _, tc := range []struct {
ts TemporalSet
err string
}{
{
ts: TemporalSet{},
err: "Name cannot be empty",
},
{
ts: TemporalSet{
Name: "temporal set",
},
err: "temporal set contains no shards",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{},
WindowEnd: time.Time{},
},
},
},
err: "WindowStart must be before WindowEnd",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{}.Add(time.Hour),
WindowEnd: time.Time{},
},
},
},
err: "WindowStart must be before WindowEnd",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{},
WindowEnd: time.Time{}.Add(time.Hour),
},
},
},
err: "",
},
} {
err := tc.ts.Setup()
if err != nil && tc.err != err.Error() {
t.Errorf("got error %q, wanted %q", err, tc.err)
} else if err == nil && tc.err != "" {
t.Errorf("unexpected error %q", err)
}
}
}
func TestLogInfo(t *testing.T) {
ld := LogDescription{
URI: "basic-uri",
Key: "basic-key",
}
uri, key, err := ld.Info(time.Time{})
test.AssertNotError(t, err, "Info failed")
test.AssertEquals(t, uri, ld.URI)
test.AssertEquals(t, key, ld.Key)
fc := clock.NewFake()
ld.TemporalSet = &TemporalSet{}
_, _, err = ld.Info(fc.Now())
test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards")
ld.TemporalSet.Shards = []LogShard{{WindowStart: fc.Now().Add(time.Hour), WindowEnd: fc.Now().Add(time.Hour * 2)}}
_, _, err = ld.Info(fc.Now())
test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards")
fc.Add(time.Hour * 4)
now := fc.Now()
ld.TemporalSet.Shards = []LogShard{
{
WindowStart: now.Add(time.Hour * -4),
WindowEnd: now.Add(time.Hour * -2),
URI: "a",
Key: "a",
},
{
WindowStart: now.Add(time.Hour * -2),
WindowEnd: now.Add(time.Hour * 2),
URI: "b",
Key: "b",
},
{
WindowStart: now.Add(time.Hour * 2),
WindowEnd: now.Add(time.Hour * 4),
URI: "c",
Key: "c",
},
}
uri, key, err = ld.Info(now)
test.AssertNotError(t, err, "Info failed")
test.AssertEquals(t, uri, "b")
test.AssertEquals(t, key, "b")
}

Some files were not shown because too many files have changed in this diff Show More