janitor: add cleanup of Orders and assoc. rows. (#4544)
The `boulder-janitor` is extended to cleanup rows from the `orders` table that have expired beyond the configured grace period, and the associated referencing rows in `requestedNames`, `orderFqdnSets`, and `orderToAuthz2`. To make implementing the transaction work for the deletions easier/consistent I lifted the SA's `WithTransaction` code and assoc. functions to a new shared `db` package. This also let me drop the one-off `janitorDb` interface from the existing code. There is an associated change to the `GRANT` statements for the `janitor` DB user to allow it to find/delete the rows related to orders. Resolves https://github.com/letsencrypt/boulder/issues/4527
This commit is contained in:
parent
88236799d5
commit
df059e093b
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
|
|
@ -171,12 +172,12 @@ func main() {
|
|||
|
||||
tx, err := dbMap.Begin()
|
||||
if err != nil {
|
||||
cmd.FailOnError(sa.Rollback(tx, err), "Couldn't begin transaction")
|
||||
cmd.FailOnError(db.Rollback(tx, err), "Couldn't begin transaction")
|
||||
}
|
||||
|
||||
err = revokeBySerial(ctx, serial, revocation.Reason(reasonCode), rac, logger, tx)
|
||||
if err != nil {
|
||||
cmd.FailOnError(sa.Rollback(tx, err), "Couldn't revoke certificate")
|
||||
cmd.FailOnError(db.Rollback(tx, err), "Couldn't revoke certificate")
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
|
|
@ -194,7 +195,7 @@ func main() {
|
|||
|
||||
tx, err := dbMap.Begin()
|
||||
if err != nil {
|
||||
cmd.FailOnError(sa.Rollback(tx, err), "Couldn't begin transaction")
|
||||
cmd.FailOnError(db.Rollback(tx, err), "Couldn't begin transaction")
|
||||
}
|
||||
|
||||
_, err = sac.GetRegistration(ctx, regID)
|
||||
|
|
@ -204,7 +205,7 @@ func main() {
|
|||
|
||||
err = revokeByReg(ctx, regID, revocation.Reason(reasonCode), rac, logger, tx)
|
||||
if err != nil {
|
||||
cmd.FailOnError(sa.Rollback(tx, err), "Couldn't revoke certificate")
|
||||
cmd.FailOnError(db.Rollback(tx, err), "Couldn't revoke certificate")
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ package main
|
|||
|
||||
import (
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
)
|
||||
|
||||
// newCertificatesJob returns a batchedDBJob configured to delete expired rows
|
||||
// from the certificates table.
|
||||
func newCertificatesJob(
|
||||
db janitorDB,
|
||||
dbMap db.DatabaseMap,
|
||||
log blog.Logger,
|
||||
clk clock.Clock,
|
||||
config Config) *batchedDBJob {
|
||||
|
|
@ -20,7 +21,7 @@ func newCertificatesJob(
|
|||
LIMIT :limit`
|
||||
log.Debugf("Creating Certificates job from config: %#v\n", config.Janitor.Certificates)
|
||||
return &batchedDBJob{
|
||||
db: db,
|
||||
db: dbMap,
|
||||
log: log,
|
||||
clk: clk,
|
||||
purgeBefore: purgeBefore,
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ package main
|
|||
|
||||
import (
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
)
|
||||
|
||||
// newCertificatesPerNameJob returns a batchedDBJob configured to delete expired
|
||||
// rows from the certificatesPerName table.
|
||||
func newCertificatesPerNameJob(
|
||||
db janitorDB,
|
||||
dbMap db.DatabaseMap,
|
||||
log blog.Logger,
|
||||
clk clock.Clock,
|
||||
config Config) *batchedDBJob {
|
||||
|
|
@ -23,7 +24,7 @@ func newCertificatesPerNameJob(
|
|||
LIMIT :limit`
|
||||
log.Debugf("Creating CertificatesPerName job from config: %#v\n", config.Janitor.CertificatesPerName)
|
||||
return &batchedDBJob{
|
||||
db: db,
|
||||
db: dbMap,
|
||||
log: log,
|
||||
clk: clk,
|
||||
purgeBefore: purgeBefore,
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ package main
|
|||
|
||||
import (
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
)
|
||||
|
||||
// newCertificateStatusJob returns a batchedDBJob configured to delete expired
|
||||
// rows from the certificateStatus table.
|
||||
func newCertificateStatusJob(
|
||||
db janitorDB,
|
||||
dbMap db.DatabaseMap,
|
||||
log blog.Logger,
|
||||
clk clock.Clock,
|
||||
config Config) *batchedDBJob {
|
||||
|
|
@ -19,7 +20,7 @@ func newCertificateStatusJob(
|
|||
LIMIT :limit`
|
||||
log.Debugf("Creating CertificateStatus job from config: %#v\n", config.Janitor.CertificateStatus)
|
||||
return &batchedDBJob{
|
||||
db: db,
|
||||
db: dbMap,
|
||||
log: log,
|
||||
clk: clk,
|
||||
purgeBefore: purgeBefore,
|
||||
|
|
|
|||
|
|
@ -70,6 +70,10 @@ type Config struct {
|
|||
|
||||
// CertificatesPerName describes a cleanup job for the certificatesPerName table.
|
||||
CertificatesPerName CleanupConfig
|
||||
|
||||
// Orders describes a cleanup job for the orders table and related rows
|
||||
// (requestedNames, orderToAuthz2, orderFqdnSets).
|
||||
Orders CleanupConfig
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,24 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/sa"
|
||||
)
|
||||
|
||||
// janitorDB is an interface describing the two functions of a sql.DB that the
|
||||
// janitor uses. It allows easy mocking of the DB for unit tests.
|
||||
type janitorDB interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
Select(i interface{}, query string, args ...interface{}) ([]interface{}, error)
|
||||
}
|
||||
|
||||
var (
|
||||
// errNoJobsConfigured is returned from New() when there are no jobs enabled
|
||||
// in the provided Config.
|
||||
|
|
@ -30,7 +23,7 @@ var (
|
|||
type janitor struct {
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
db janitorDB
|
||||
db db.DatabaseMap
|
||||
jobs []*batchedDBJob
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +75,7 @@ func New(clk clock.Clock, config Config) (*janitor, error) {
|
|||
// newJobs constructs a list of batchedDBJobs based on the provided config. If
|
||||
// no jobs are enabled in the config then errNoJobsConfigured is returned.
|
||||
func newJobs(
|
||||
dbMap janitorDB,
|
||||
dbMap db.DatabaseMap,
|
||||
logger blog.Logger,
|
||||
clk clock.Clock,
|
||||
config Config) ([]*batchedDBJob, error) {
|
||||
|
|
@ -103,9 +96,23 @@ func newJobs(
|
|||
}
|
||||
jobs = append(jobs, newCertificatesPerNameJob(dbMap, logger, clk, config))
|
||||
}
|
||||
if config.Janitor.Orders.Enabled {
|
||||
jobs = append(jobs, newOrdersJob(dbMap, logger, clk, config.Janitor.Orders))
|
||||
}
|
||||
// There must be at least one job
|
||||
if len(jobs) == 0 {
|
||||
return nil, errNoJobsConfigured
|
||||
}
|
||||
// The jobs must all be valid
|
||||
for _, j := range jobs {
|
||||
if err := j.valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if no explicit deleteHandler is specified use the default
|
||||
if j.deleteHandler == nil {
|
||||
j.deleteHandler = j.simpleResourceDelete
|
||||
}
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@ func TestNewJobs(t *testing.T) {
|
|||
},
|
||||
"certificateStatus": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "1h"
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 1,
|
||||
"parallelism": 1
|
||||
},
|
||||
"certificatesPerName": {
|
||||
"enabled": false
|
||||
|
|
@ -28,15 +30,21 @@ func TestNewJobs(t *testing.T) {
|
|||
"janitor": {
|
||||
"certificates": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "1h"
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 1,
|
||||
"parallelism": 1
|
||||
},
|
||||
"certificateStatus": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "1h"
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 1,
|
||||
"parallelism": 1
|
||||
},
|
||||
"certificatesPerName": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "169h"
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 1,
|
||||
"parallelism": 1
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
|
|
|||
|
|
@ -2,16 +2,25 @@ package main
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// minPurgeBefore is the smallest purgeBefore time.Duration that can be
|
||||
// configured for a job. We set this to 90 days to match the default validity
|
||||
// window of Let's Encrypt certificates.
|
||||
minPurgeBefore = time.Hour * 24 * 90
|
||||
)
|
||||
|
||||
var (
|
||||
// errStat is a prometheus counter vector tracking the number of errors
|
||||
// experienced by the janitor during operation sliced by a table label and a
|
||||
|
|
@ -46,7 +55,7 @@ var (
|
|||
// cleanup job based on cursoring across a database table's auto incrementing
|
||||
// primary key.
|
||||
type batchedDBJob struct {
|
||||
db janitorDB
|
||||
db db.DatabaseMap
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
// table is the name of the table that this job cleans up.
|
||||
|
|
@ -71,7 +80,37 @@ type batchedDBJob struct {
|
|||
// * id - the primary key value for each row.
|
||||
// * expires - the expiry datetime used to calculate if the row is within the cutoff window. The column name
|
||||
// may need to be aliased to expiry if it has another name.
|
||||
workQuery string
|
||||
workQuery string
|
||||
deleteHandler func(id int64) error
|
||||
}
|
||||
|
||||
var (
|
||||
errNoTable = errors.New("table must not be empty")
|
||||
errNoPurgeBefore = fmt.Errorf("purgeBefore must be greater than %s", minPurgeBefore)
|
||||
errNoBatchSize = errors.New("batchSize must be > 0")
|
||||
errNoParallelism = errors.New("parallelism must be > 0")
|
||||
errNoWorkQuery = errors.New("workQuery must not be empty")
|
||||
)
|
||||
|
||||
// valid checks that the batchedDBJob has all required fields set correctly and
|
||||
// returns an error if not satisfied.
|
||||
func (j *batchedDBJob) valid() error {
|
||||
if j.table == "" {
|
||||
return errNoTable
|
||||
}
|
||||
if j.purgeBefore <= minPurgeBefore {
|
||||
return errNoPurgeBefore
|
||||
}
|
||||
if j.batchSize <= 0 {
|
||||
return errNoBatchSize
|
||||
}
|
||||
if j.parallelism <= 0 {
|
||||
return errNoParallelism
|
||||
}
|
||||
if j.workQuery == "" {
|
||||
return errNoWorkQuery
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type workUnit struct {
|
||||
|
|
@ -140,7 +179,7 @@ func (j batchedDBJob) cleanResource(work <-chan int64) {
|
|||
if ticker != nil {
|
||||
<-ticker.C
|
||||
}
|
||||
if err := j.deleteResource(id); err != nil {
|
||||
if err := j.deleteHandler(id); err != nil {
|
||||
j.log.Errf(
|
||||
"error deleting ID %d from table %q: %s",
|
||||
id, j.table, err)
|
||||
|
|
@ -157,9 +196,10 @@ func (j batchedDBJob) cleanResource(work <-chan int64) {
|
|||
deleted, j.table)
|
||||
}
|
||||
|
||||
// deleteResource performs a delete of the given ID from the batchedDBJob's
|
||||
// table or returns an error.
|
||||
func (j batchedDBJob) deleteResource(id int64) error {
|
||||
// simpleResourceDelete performs a delete of the given ID from the batchedDBJob's
|
||||
// table or returns an error. It does not use a transaction and assumes there
|
||||
// are no foreign key constraints or referencing rows in other tables to manage.
|
||||
func (j batchedDBJob) simpleResourceDelete(id int64) error {
|
||||
// NOTE(@cpu): We throw away the sql.Result here without checking the rows
|
||||
// affected because the query is always specific to the ID auto-increment
|
||||
// primary key. If there are multiple rows with the same primary key MariaDB
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/jmhodges/clock"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
"gopkg.in/go-gorp/gorp.v2"
|
||||
)
|
||||
|
||||
func setup() (*blog.Mock, clock.FakeClock) {
|
||||
|
|
@ -62,6 +63,18 @@ func (m mockDB) Select(result interface{}, query string, args ...interface{}) ([
|
|||
return nil, m.errResult
|
||||
}
|
||||
|
||||
func (m mockDB) SelectOne(interface{}, string, ...interface{}) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m mockDB) Insert(...interface{}) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m mockDB) Begin() (*gorp.Transaction, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func TestGetWork(t *testing.T) {
|
||||
log, clk := setup()
|
||||
startID := int64(10)
|
||||
|
|
@ -154,24 +167,29 @@ func TestDeleteResource(t *testing.T) {
|
|||
expectedExecArg: testID,
|
||||
}
|
||||
|
||||
// create a batchedDBJob with the simpleResourceDelete function as the
|
||||
// deleteHandler
|
||||
job := &batchedDBJob{
|
||||
db: testDB,
|
||||
log: log,
|
||||
table: table,
|
||||
}
|
||||
// Normally this would be set when deleteHandler is nil inside of the janitor
|
||||
// newJobs function.
|
||||
job.deleteHandler = job.simpleResourceDelete
|
||||
|
||||
// Mock Exec() to return a non-nil error result
|
||||
testDB.errResult = errors.New("database is on vacation")
|
||||
err := job.deleteResource(testID)
|
||||
err := job.deleteHandler(testID)
|
||||
// We expect an err result back
|
||||
test.AssertError(t, err, "no error returned from deleteResource with bad DB")
|
||||
test.AssertError(t, err, "no error returned from deleteHandler with bad DB")
|
||||
// We expect no deletes to have been tracked in the deletedStat
|
||||
test.AssertEquals(t, test.CountCounterVec("table", "certificates", deletedStat), 0)
|
||||
|
||||
// With the mock error removed we expect no error returned from deleteResource
|
||||
// With the mock error removed we expect no error returned from simpleDeleteResource
|
||||
testDB.errResult = nil
|
||||
err = job.deleteResource(testID)
|
||||
test.AssertNotError(t, err, "unexpected error from deleteResource")
|
||||
err = job.deleteHandler(testID)
|
||||
test.AssertNotError(t, err, "unexpected error from deleteHandler")
|
||||
// We expect a delete to have been tracked in the deletedStat
|
||||
test.AssertEquals(t, test.CountCounterVec("table", "certificates", deletedStat), 1)
|
||||
}
|
||||
|
|
@ -187,6 +205,18 @@ func (db slowDB) Select(result interface{}, _ string, _ ...interface{}) ([]inter
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (db slowDB) SelectOne(interface{}, string, ...interface{}) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (db slowDB) Insert(...interface{}) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (db slowDB) Begin() (*gorp.Transaction, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func TestCleanResource(t *testing.T) {
|
||||
log, _ := setup()
|
||||
|
||||
|
|
@ -200,6 +230,9 @@ func TestCleanResource(t *testing.T) {
|
|||
// Start with a parallelism of 1
|
||||
parallelism: 1,
|
||||
}
|
||||
// Normally this would be set when deleteHandler is nil inside of the janitor
|
||||
// newJobs function.
|
||||
job.deleteHandler = job.simpleResourceDelete
|
||||
|
||||
busyWork := func() <-chan int64 {
|
||||
work := make(chan int64, 2)
|
||||
|
|
@ -273,3 +306,77 @@ func TestCleanResource(t *testing.T) {
|
|||
matches = log.GetAllMatching(expectedLog)
|
||||
test.AssertEquals(t, len(matches), 1)
|
||||
}
|
||||
|
||||
func TestBatchedDBJobValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
j batchedDBJob
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "no table",
|
||||
j: batchedDBJob{},
|
||||
expectedErr: errNoTable,
|
||||
},
|
||||
{
|
||||
name: "no purgeBefore",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
},
|
||||
expectedErr: errNoPurgeBefore,
|
||||
},
|
||||
{
|
||||
name: "too small purgeBefore",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
purgeBefore: minPurgeBefore,
|
||||
},
|
||||
expectedErr: errNoPurgeBefore,
|
||||
},
|
||||
{
|
||||
name: "no batchSize",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
purgeBefore: minPurgeBefore + time.Hour,
|
||||
},
|
||||
expectedErr: errNoBatchSize,
|
||||
},
|
||||
{
|
||||
name: "no parallelism",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
purgeBefore: minPurgeBefore + time.Hour,
|
||||
batchSize: 1,
|
||||
},
|
||||
expectedErr: errNoParallelism,
|
||||
},
|
||||
{
|
||||
name: "no workQuery",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
purgeBefore: minPurgeBefore + time.Hour,
|
||||
batchSize: 1,
|
||||
parallelism: 1,
|
||||
},
|
||||
expectedErr: errNoWorkQuery,
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
j: batchedDBJob{
|
||||
table: "chef's",
|
||||
purgeBefore: time.Hour * 24 * 91,
|
||||
batchSize: 1,
|
||||
parallelism: 1,
|
||||
workQuery: "GET food FROM kitchen",
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.j.valid()
|
||||
test.AssertEquals(t, err, tc.expectedErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
)
|
||||
|
||||
type ordersJob struct {
|
||||
*batchedDBJob
|
||||
}
|
||||
|
||||
func newOrdersJob(
|
||||
dbMap db.DatabaseMap,
|
||||
log blog.Logger,
|
||||
clk clock.Clock,
|
||||
config CleanupConfig) *batchedDBJob {
|
||||
purgeBefore := config.GracePeriod.Duration
|
||||
workQuery := `SELECT id, expires FROM orders
|
||||
WHERE
|
||||
id > :startID
|
||||
LIMIT :limit`
|
||||
log.Debugf("Creating Orders job from config: %#v\n", config)
|
||||
j := &ordersJob{
|
||||
batchedDBJob: &batchedDBJob{
|
||||
db: dbMap,
|
||||
log: log,
|
||||
clk: clk,
|
||||
purgeBefore: purgeBefore,
|
||||
workSleep: config.WorkSleep.Duration,
|
||||
batchSize: config.BatchSize,
|
||||
maxDPS: config.MaxDPS,
|
||||
parallelism: config.Parallelism,
|
||||
table: "orders",
|
||||
workQuery: workQuery,
|
||||
},
|
||||
}
|
||||
j.batchedDBJob.deleteHandler = j.deleteOrder
|
||||
return j.batchedDBJob
|
||||
}
|
||||
|
||||
func (j *ordersJob) deleteOrder(orderID int64) error {
|
||||
ctx := context.Background()
|
||||
// Perform a multi-table delete inside of a transaction using the order ID.
|
||||
// Either all of the rows associated with the order ID will be deleted or the
|
||||
// transaction will be rolled back.
|
||||
_, err := db.WithTransaction(ctx, j.db, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
// Delete table rows in the childTables that reference the order being deleted.
|
||||
childTables := []string{"requestedNames", "orderFqdnSets", "orderToAuthz2"}
|
||||
for _, t := range childTables {
|
||||
query := fmt.Sprintf(`DELETE FROM %s WHERE orderID = ?`, t)
|
||||
res, err := txWithCtx.Exec(query, orderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
affected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deletedStat.WithLabelValues(t).Add(float64(affected))
|
||||
}
|
||||
// Finally delete the order itself
|
||||
if _, err := txWithCtx.Exec(`DELETE FROM orders WHERE id = ?`, orderID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deletedStat.WithLabelValues("orders").Inc()
|
||||
j.log.Debugf("deleted order ID %d and associated rows", orderID)
|
||||
return nil, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
corepb "github.com/letsencrypt/boulder/core/proto"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/sa"
|
||||
sapb "github.com/letsencrypt/boulder/sa/proto"
|
||||
"github.com/letsencrypt/boulder/sa/satest"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
"github.com/letsencrypt/boulder/test/vars"
|
||||
)
|
||||
|
||||
func TestDeleteOrder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
log, fc := setup()
|
||||
|
||||
// Create one dbMap for the SA with the SA user.
|
||||
dbMap, err := sa.NewDbMap(vars.DBConnSA, 0)
|
||||
test.AssertNotError(t, err, "error creating db map")
|
||||
// Create a SSA backed by the SA user dbMap
|
||||
ssa, err := sa.NewSQLStorageAuthority(dbMap, fc, log, metrics.NewNoopScope(), 1)
|
||||
test.AssertNotError(t, err, "error creating SA")
|
||||
|
||||
// Don't forget to cleanup!
|
||||
defer func() {
|
||||
test.ResetSATestDatabase(t)
|
||||
}()
|
||||
|
||||
// Create a test registration
|
||||
jwk := satest.GoodJWK()
|
||||
reg, err := ssa.NewRegistration(ctx, core.Registration{
|
||||
Key: jwk,
|
||||
InitialIP: net.ParseIP("127.0.0.1"),
|
||||
})
|
||||
test.AssertNotError(t, err, "error creating test registration")
|
||||
|
||||
// Create a test authorization
|
||||
ident := "test.example.com"
|
||||
pending := string(core.StatusPending)
|
||||
expires := fc.Now().Add(time.Hour).UTC().UnixNano()
|
||||
challType := string(core.ChallengeTypeDNS01)
|
||||
tokenA := "YXNkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||
authzA := &corepb.Authorization{
|
||||
Identifier: &ident,
|
||||
RegistrationID: ®.ID,
|
||||
Status: &pending,
|
||||
Expires: &expires,
|
||||
Challenges: []*corepb.Challenge{
|
||||
{
|
||||
Status: &pending,
|
||||
Type: &challType,
|
||||
Token: &tokenA,
|
||||
},
|
||||
},
|
||||
}
|
||||
req := &sapb.AddPendingAuthorizationsRequest{Authz: []*corepb.Authorization{authzA}}
|
||||
ids, err := ssa.NewAuthorizations2(context.Background(), req)
|
||||
test.AssertNotError(t, err, "error adding test authz2")
|
||||
test.AssertEquals(t, len(ids.Ids), 1)
|
||||
|
||||
// Create a test order referencing the test registration
|
||||
testOrder, err := ssa.NewOrder(ctx, &corepb.Order{
|
||||
RegistrationID: ®.ID,
|
||||
Status: &pending,
|
||||
Expires: &expires,
|
||||
Names: []string{"test.example.com"},
|
||||
V2Authorizations: []int64{ids.Ids[0]},
|
||||
})
|
||||
test.AssertNotError(t, err, "error creating test order")
|
||||
|
||||
// Create a cleanup config for the Orders job
|
||||
config := CleanupConfig{
|
||||
WorkSleep: cmd.ConfigDuration{Duration: time.Second},
|
||||
BatchSize: 1,
|
||||
MaxDPS: 1,
|
||||
Parallelism: 1,
|
||||
}
|
||||
|
||||
// Create a dbMap for the janitor user. We don't want to use the SA dbMap
|
||||
// because it doesn't have DELETE grants.
|
||||
// Create one dbMap for the SA with the SA user.
|
||||
janitorDbMap, err := sa.NewDbMap("janitor@tcp(boulder-mysql:3306)/boulder_sa_test", 0)
|
||||
test.AssertNotError(t, err, "error creating db map")
|
||||
|
||||
// Create an Orders job and delete the mock order by its ID
|
||||
j := newOrdersJob(janitorDbMap, log, fc, config)
|
||||
err = j.deleteHandler(*testOrder.Id)
|
||||
// It should not error
|
||||
test.AssertNotError(t, err, "error calling deleteHandler")
|
||||
|
||||
// The order should be gone
|
||||
_, err = ssa.GetOrder(ctx, &sapb.OrderRequest{Id: testOrder.Id})
|
||||
test.AssertError(t, err, "found order after deleting it")
|
||||
test.AssertEquals(t, berrors.Is(err, berrors.NotFound), true)
|
||||
|
||||
// The orderToAuthz2 rows should be gone
|
||||
var authzIDs []int64
|
||||
_, err = janitorDbMap.Select(
|
||||
&authzIDs,
|
||||
"SELECT authzID FROM orderToAuthz2 WHERE orderID = ?;",
|
||||
*testOrder.Id)
|
||||
test.AssertNotError(t, err, "error finding orderToAuthz2 rows")
|
||||
test.AssertEquals(t, len(authzIDs), 0)
|
||||
|
||||
// The requested names rows should be gone
|
||||
var requestedNamesIDs []int64
|
||||
_, err = janitorDbMap.Select(
|
||||
&requestedNamesIDs,
|
||||
"SELECT id FROM requestedNames WHERE orderID = ?;",
|
||||
*testOrder.Id)
|
||||
test.AssertNotError(t, err, "error finding requestedNames rows")
|
||||
test.AssertEquals(t, len(requestedNamesIDs), 0)
|
||||
|
||||
// The orderFqdnSets rows should be gone
|
||||
var orderFqdnSetIDs []int64
|
||||
_, err = janitorDbMap.Select(
|
||||
&requestedNamesIDs,
|
||||
"SELECT id FROM orderFqdnSets WHERE orderID = ?;",
|
||||
*testOrder.Id)
|
||||
test.AssertNotError(t, err, "error finding orderFqdnSets rows")
|
||||
test.AssertEquals(t, len(orderFqdnSetIDs), 0)
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"gopkg.in/go-gorp/gorp.v2"
|
||||
)
|
||||
|
||||
// These interfaces exist to aid in mocking database operations for unit tests.
|
||||
//
|
||||
// By convention, any function that takes a OneSelector, Selector,
|
||||
// Inserter, Execer, or SelectExecer as as an argument expects
|
||||
// that a context has already been applied to the relevant DbMap or
|
||||
// Transaction object.
|
||||
|
||||
// A `dbOneSelector` is anything that provides a `SelectOne` function.
|
||||
type OneSelector interface {
|
||||
SelectOne(interface{}, string, ...interface{}) error
|
||||
}
|
||||
|
||||
// A `Selector` is anything that provides a `Select` function.
|
||||
type Selector interface {
|
||||
Select(interface{}, string, ...interface{}) ([]interface{}, error)
|
||||
}
|
||||
|
||||
// A `Inserter` is anything that provides an `Insert` function
|
||||
type Inserter interface {
|
||||
Insert(list ...interface{}) error
|
||||
}
|
||||
|
||||
// A `Execer` is anything that provides an `Exec` function
|
||||
type Execer interface {
|
||||
Exec(string, ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
// SelectExecer offers a subset of gorp.SqlExecutor's methods: Select and
|
||||
// Exec.
|
||||
type SelectExecer interface {
|
||||
Selector
|
||||
Execer
|
||||
}
|
||||
|
||||
// DatabaseMap offers the full combination of OneSelector, Inserter,
|
||||
// SelectExecer, and a Begin function for creating a Transaction.
|
||||
type DatabaseMap interface {
|
||||
OneSelector
|
||||
Inserter
|
||||
SelectExecer
|
||||
Begin() (*gorp.Transaction, error)
|
||||
}
|
||||
|
||||
// Transaction offers the combination of OneSelector, Inserter, SelectExecer
|
||||
// interface as well as Delete, Get, and Update.
|
||||
type Transaction interface {
|
||||
OneSelector
|
||||
Inserter
|
||||
SelectExecer
|
||||
Delete(...interface{}) (int64, error)
|
||||
Get(interface{}, ...interface{}) (interface{}, error)
|
||||
Update(...interface{}) (int64, error)
|
||||
}
|
||||
|
|
@ -1,9 +1,4 @@
|
|||
// Copyright 2016 ISRG. All rights reserved
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sa
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
"github.com/letsencrypt/boulder/test/vars"
|
||||
"gopkg.in/go-gorp/gorp.v2"
|
||||
)
|
||||
|
||||
func TestRollback(t *testing.T) {
|
||||
// NOTE(@cpu): We avoid using sa.NewDBMapFromConfig here because it would
|
||||
// create a cyclic dependency. The `sa` package depends on `db` for
|
||||
// `WithTransaction`. The `db` package can't depend on the `sa` for creating
|
||||
// a DBMap. Since we're only doing this for a simple unit test we can make our
|
||||
// own dbMap by hand (how artisanal).
|
||||
var config *mysql.Config
|
||||
config, err := mysql.ParseDSN(vars.DBConnSA)
|
||||
test.AssertNotError(t, err, "parsing DBConnSA DSN")
|
||||
|
||||
dbConn, err := sql.Open("mysql", config.FormatDSN())
|
||||
test.AssertNotError(t, err, "opening DB connection")
|
||||
|
||||
dialect := gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}
|
||||
// NOTE(@cpu): We avoid giving a sa.BoulderTypeConverter to the DbMap field to
|
||||
// avoid the cyclic dep. We don't need to convert any types in this test.
|
||||
dbMap := &gorp.DbMap{Db: dbConn, Dialect: dialect, TypeConverter: nil}
|
||||
|
||||
tx, _ := dbMap.Begin()
|
||||
// Commit the transaction so that a subsequent Rollback will always fail.
|
||||
_ = tx.Commit()
|
||||
|
||||
innerErr := berrors.NotFoundError("Gone, gone, gone")
|
||||
result := Rollback(tx, innerErr)
|
||||
|
||||
// Since the tx.Rollback will fail we expect the result to be a wrapped error
|
||||
test.AssertNotEquals(t, result, innerErr)
|
||||
if rbErr, ok := result.(*RollbackError); !ok {
|
||||
t.Fatal("Result was not a RollbackError")
|
||||
test.AssertEquals(t, rbErr.Err, innerErr)
|
||||
test.AssertNotNil(t, rbErr.RollbackErr, "RollbackErr was nil")
|
||||
}
|
||||
|
||||
// Create a new transaction and don't commit it this time. The rollback should
|
||||
// succeed.
|
||||
tx, _ = dbMap.Begin()
|
||||
result = Rollback(tx, innerErr)
|
||||
|
||||
// We expect that the err is returned unwrapped.
|
||||
test.AssertEquals(t, result, innerErr)
|
||||
}
|
||||
|
|
@ -1,27 +1,15 @@
|
|||
package sa
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gopkg.in/go-gorp/gorp.v2"
|
||||
)
|
||||
|
||||
type transaction interface {
|
||||
dbOneSelector
|
||||
dbInserter
|
||||
dbSelectExecer
|
||||
Delete(...interface{}) (int64, error)
|
||||
Get(interface{}, ...interface{}) (interface{}, error)
|
||||
Update(...interface{}) (int64, error)
|
||||
}
|
||||
import "context"
|
||||
|
||||
// txFunc represents a function that does work in the context of a transaction.
|
||||
type txFunc func(transaction) (interface{}, error)
|
||||
type txFunc func(Transaction) (interface{}, error)
|
||||
|
||||
// withTransaction runs the given function in a transaction, rolling back if it
|
||||
// WithTransaction runs the given function in a transaction, rolling back if it
|
||||
// returns an error and committing if not. The provided context is also attached
|
||||
// to the transaction. withTransaction also passes through a value returned by
|
||||
// to the transaction. WithTransaction also passes through a value returned by
|
||||
// `f`, if there is no error.
|
||||
func withTransaction(ctx context.Context, dbMap *gorp.DbMap, f txFunc) (interface{}, error) {
|
||||
func WithTransaction(ctx context.Context, dbMap DatabaseMap, f txFunc) (interface{}, error) {
|
||||
tx, err := dbMap.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -5,11 +5,12 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
)
|
||||
|
||||
const getAuthorizationIDsMax = 1000
|
||||
|
||||
func getAuthorizationIDsByDomain(db dbSelector, tableName string, ident string, now time.Time) ([]string, error) {
|
||||
func getAuthorizationIDsByDomain(db db.Selector, tableName string, ident string, now time.Time) ([]string, error) {
|
||||
var allIDs []string
|
||||
_, err := db.Select(
|
||||
&allIDs,
|
||||
|
|
|
|||
50
sa/model.go
50
sa/model.go
|
|
@ -1,7 +1,6 @@
|
|||
package sa
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
|
@ -15,6 +14,7 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
corepb "github.com/letsencrypt/boulder/core/proto"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
"github.com/letsencrypt/boulder/grpc"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
|
|
@ -50,42 +50,10 @@ func badJSONError(msg string, jsonData []byte, err error) error {
|
|||
}
|
||||
}
|
||||
|
||||
// By convention, any function that takes a dbOneSelector, dbSelector,
|
||||
// dbInserter, dbExecer, or dbSelectExecer as as an argument expects
|
||||
// that a context has already been applied to the relevant DbMap or
|
||||
// Transaction object.
|
||||
|
||||
// A `dbOneSelector` is anything that provides a `SelectOne` function.
|
||||
type dbOneSelector interface {
|
||||
SelectOne(interface{}, string, ...interface{}) error
|
||||
}
|
||||
|
||||
// A `dbSelector` is anything that provides a `Select` function.
|
||||
type dbSelector interface {
|
||||
Select(interface{}, string, ...interface{}) ([]interface{}, error)
|
||||
}
|
||||
|
||||
// a `dbInserter` is anything that provides an `Insert` function
|
||||
type dbInserter interface {
|
||||
Insert(list ...interface{}) error
|
||||
}
|
||||
|
||||
// A `dbExecer` is anything that provides an `Exec` function
|
||||
type dbExecer interface {
|
||||
Exec(string, ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
// dbSelectExecer offers a subset of gorp.SqlExecutor's methods: Select and
|
||||
// Exec.
|
||||
type dbSelectExecer interface {
|
||||
dbSelector
|
||||
dbExecer
|
||||
}
|
||||
|
||||
const regFields = "id, jwk, jwk_sha256, contact, agreement, initialIP, createdAt, LockCol, status"
|
||||
|
||||
// selectRegistration selects all fields of one registration model
|
||||
func selectRegistration(s dbOneSelector, q string, args ...interface{}) (*regModel, error) {
|
||||
func selectRegistration(s db.OneSelector, q string, args ...interface{}) (*regModel, error) {
|
||||
var model regModel
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -96,7 +64,7 @@ func selectRegistration(s dbOneSelector, q string, args ...interface{}) (*regMod
|
|||
}
|
||||
|
||||
// selectPendingAuthz selects all fields of one pending authorization model
|
||||
func selectPendingAuthz(s dbOneSelector, q string, args ...interface{}) (*pendingauthzModel, error) {
|
||||
func selectPendingAuthz(s db.OneSelector, q string, args ...interface{}) (*pendingauthzModel, error) {
|
||||
var model pendingauthzModel
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -109,7 +77,7 @@ func selectPendingAuthz(s dbOneSelector, q string, args ...interface{}) (*pendin
|
|||
const authzFields = "id, identifier, registrationID, status, expires"
|
||||
|
||||
// selectAuthz selects all fields of one authorization model
|
||||
func selectAuthz(s dbOneSelector, q string, args ...interface{}) (*authzModel, error) {
|
||||
func selectAuthz(s db.OneSelector, q string, args ...interface{}) (*authzModel, error) {
|
||||
var model authzModel
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -122,7 +90,7 @@ func selectAuthz(s dbOneSelector, q string, args ...interface{}) (*authzModel, e
|
|||
const certFields = "registrationID, serial, digest, der, issued, expires"
|
||||
|
||||
// SelectCertificate selects all fields of one certificate object
|
||||
func SelectCertificate(s dbOneSelector, q string, args ...interface{}) (core.Certificate, error) {
|
||||
func SelectCertificate(s db.OneSelector, q string, args ...interface{}) (core.Certificate, error) {
|
||||
var model core.Certificate
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -136,7 +104,7 @@ const precertFields = "registrationID, serial, der, issued, expires"
|
|||
|
||||
// SelectPrecertificate selects all fields of one precertificate object
|
||||
// identified by serial.
|
||||
func SelectPrecertificate(s dbOneSelector, serial string) (core.Certificate, error) {
|
||||
func SelectPrecertificate(s db.OneSelector, serial string) (core.Certificate, error) {
|
||||
var model precertificateModel
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -157,7 +125,7 @@ type CertWithID struct {
|
|||
}
|
||||
|
||||
// SelectCertificates selects all fields of multiple certificate objects
|
||||
func SelectCertificates(s dbSelector, q string, args map[string]interface{}) ([]CertWithID, error) {
|
||||
func SelectCertificates(s db.Selector, q string, args map[string]interface{}) ([]CertWithID, error) {
|
||||
var models []CertWithID
|
||||
_, err := s.Select(
|
||||
&models,
|
||||
|
|
@ -168,7 +136,7 @@ func SelectCertificates(s dbSelector, q string, args map[string]interface{}) ([]
|
|||
const certStatusFields = "serial, status, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, ocspResponse, notAfter, isExpired"
|
||||
|
||||
// SelectCertificateStatus selects all fields of one certificate status model
|
||||
func SelectCertificateStatus(s dbOneSelector, q string, args ...interface{}) (certStatusModel, error) {
|
||||
func SelectCertificateStatus(s db.OneSelector, q string, args ...interface{}) (certStatusModel, error) {
|
||||
var model certStatusModel
|
||||
err := s.SelectOne(
|
||||
&model,
|
||||
|
|
@ -179,7 +147,7 @@ func SelectCertificateStatus(s dbOneSelector, q string, args ...interface{}) (ce
|
|||
}
|
||||
|
||||
// SelectCertificateStatuses selects all fields of multiple certificate status objects
|
||||
func SelectCertificateStatuses(s dbSelector, q string, args ...interface{}) ([]core.CertificateStatus, error) {
|
||||
func SelectCertificateStatuses(s db.Selector, q string, args ...interface{}) ([]core.CertificateStatus, error) {
|
||||
var models []core.CertificateStatus
|
||||
_, err := s.Select(
|
||||
&models,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
"github.com/weppos/publicsuffix-go/publicsuffix"
|
||||
)
|
||||
|
||||
|
|
@ -32,7 +33,7 @@ func baseDomain(name string) string {
|
|||
// input timeToTheHour must be a time rounded to an hour.
|
||||
func (ssa *SQLStorageAuthority) addCertificatesPerName(
|
||||
ctx context.Context,
|
||||
db dbSelectExecer,
|
||||
db db.SelectExecer,
|
||||
names []string,
|
||||
timeToTheHour time.Time,
|
||||
) error {
|
||||
|
|
@ -63,7 +64,7 @@ func (ssa *SQLStorageAuthority) addCertificatesPerName(
|
|||
// certificates issued in the given time range for that domain's eTLD+1 (aka
|
||||
// base domain). It uses the certificatesPerName table to make this lookup fast.
|
||||
func (ssa *SQLStorageAuthority) countCertificates(
|
||||
db dbSelector,
|
||||
db db.Selector,
|
||||
domain string,
|
||||
earliest,
|
||||
latest time.Time,
|
||||
|
|
|
|||
|
|
@ -1,36 +0,0 @@
|
|||
package sa
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
)
|
||||
|
||||
func TestRollback(t *testing.T) {
|
||||
sa, _, cleanUp := initSA(t)
|
||||
defer cleanUp()
|
||||
|
||||
tx, _ := sa.dbMap.Begin()
|
||||
// Commit the transaction so that a subsequent Rollback will always fail.
|
||||
_ = tx.Commit()
|
||||
|
||||
innerErr := berrors.NotFoundError("Gone, gone, gone")
|
||||
result := Rollback(tx, innerErr)
|
||||
|
||||
// Since the tx.Rollback will fail we expect the result to be a wrapped error
|
||||
test.AssertNotEquals(t, result, innerErr)
|
||||
if rbErr, ok := result.(*RollbackError); !ok {
|
||||
t.Fatal("Result was not a RollbackError")
|
||||
test.AssertEquals(t, rbErr.Err, innerErr)
|
||||
test.AssertNotNil(t, rbErr.RollbackErr, "RollbackErr was nil")
|
||||
}
|
||||
|
||||
// Create a new transaction and don't commit it this time. The rollback should
|
||||
// succeed.
|
||||
tx, _ = sa.dbMap.Begin()
|
||||
result = Rollback(tx, innerErr)
|
||||
|
||||
// We expect that the err is returned unwrapped.
|
||||
test.AssertEquals(t, result, innerErr)
|
||||
}
|
||||
35
sa/sa.go
35
sa/sa.go
|
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
corepb "github.com/letsencrypt/boulder/core/proto"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
|
|
@ -29,7 +30,7 @@ import (
|
|||
sapb "github.com/letsencrypt/boulder/sa/proto"
|
||||
)
|
||||
|
||||
type certCountFunc func(db dbSelector, domain string, earliest, latest time.Time) (int, error)
|
||||
type certCountFunc func(db db.Selector, domain string, earliest, latest time.Time) (int, error)
|
||||
|
||||
// SQLStorageAuthority defines a Storage Authority
|
||||
type SQLStorageAuthority struct {
|
||||
|
|
@ -113,15 +114,15 @@ func statusIsPending(status core.AcmeStatus) bool {
|
|||
return status == core.StatusPending || status == core.StatusProcessing || status == core.StatusUnknown
|
||||
}
|
||||
|
||||
func existingPending(db dbOneSelector, id string) bool {
|
||||
func existingPending(dbMap db.OneSelector, id string) bool {
|
||||
var count int64
|
||||
_ = db.SelectOne(&count, "SELECT count(*) FROM pendingAuthorizations WHERE id = :id", map[string]interface{}{"id": id})
|
||||
_ = dbMap.SelectOne(&count, "SELECT count(*) FROM pendingAuthorizations WHERE id = :id", map[string]interface{}{"id": id})
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func existingFinal(db dbOneSelector, id string) bool {
|
||||
func existingFinal(dbMap db.OneSelector, id string) bool {
|
||||
var count int64
|
||||
_ = db.SelectOne(&count, "SELECT count(*) FROM authz WHERE id = :id", map[string]interface{}{"id": id})
|
||||
_ = dbMap.SelectOne(&count, "SELECT count(*) FROM authz WHERE id = :id", map[string]interface{}{"id": id})
|
||||
return count > 0
|
||||
}
|
||||
|
||||
|
|
@ -478,7 +479,7 @@ func (ssa *SQLStorageAuthority) AddCertificate(
|
|||
certStatus.OCSPLastUpdated = ssa.clk.Now()
|
||||
}
|
||||
|
||||
_, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
err = txWithCtx.Insert(cert)
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "Error 1062: Duplicate entry") {
|
||||
|
|
@ -568,7 +569,7 @@ func hashNames(names []string) []byte {
|
|||
return hash[:]
|
||||
}
|
||||
|
||||
func addFQDNSet(db dbInserter, names []string, serial string, issued time.Time, expires time.Time) error {
|
||||
func addFQDNSet(db db.Inserter, names []string, serial string, issued time.Time, expires time.Time) error {
|
||||
return db.Insert(&core.FQDNSet{
|
||||
SetHash: hashNames(names),
|
||||
Serial: serial,
|
||||
|
|
@ -582,7 +583,7 @@ func addFQDNSet(db dbInserter, names []string, serial string, issued time.Time,
|
|||
// addition can take place within the order addition transaction. The caller is
|
||||
// required to rollback the transaction if an error is returned.
|
||||
func addOrderFQDNSet(
|
||||
db dbInserter,
|
||||
db db.Inserter,
|
||||
names []string,
|
||||
orderID int64,
|
||||
regID int64,
|
||||
|
|
@ -600,7 +601,7 @@ func addOrderFQDNSet(
|
|||
// take place within the finalization transaction. The caller is required to
|
||||
// rollback the transaction if an error is returned.
|
||||
func deleteOrderFQDNSet(
|
||||
db dbExecer,
|
||||
db db.Execer,
|
||||
orderID int64) error {
|
||||
|
||||
result, err := db.Exec(`
|
||||
|
|
@ -623,7 +624,7 @@ func deleteOrderFQDNSet(
|
|||
return nil
|
||||
}
|
||||
|
||||
func addIssuedNames(db dbExecer, cert *x509.Certificate, isRenewal bool) error {
|
||||
func addIssuedNames(db db.Execer, cert *x509.Certificate, isRenewal bool) error {
|
||||
if len(cert.DNSNames) == 0 {
|
||||
return berrors.InternalServerError("certificate has no DNSNames")
|
||||
}
|
||||
|
|
@ -664,7 +665,7 @@ type setHash []byte
|
|||
// certificate serials. These serials can be used to check whether any
|
||||
// certificates have been issued for the same set of names previously.
|
||||
func (ssa *SQLStorageAuthority) getFQDNSetsBySerials(
|
||||
db dbSelector,
|
||||
db db.Selector,
|
||||
serials []string,
|
||||
) ([]setHash, error) {
|
||||
var fqdnSets []setHash
|
||||
|
|
@ -708,7 +709,7 @@ func (ssa *SQLStorageAuthority) getFQDNSetsBySerials(
|
|||
// included) for a given slice of fqdnSets that occurred after the earliest
|
||||
// parameter.
|
||||
func (ssa *SQLStorageAuthority) getNewIssuancesByFQDNSet(
|
||||
db dbSelector,
|
||||
db db.Selector,
|
||||
fqdnSets []setHash,
|
||||
earliest time.Time,
|
||||
) (int, error) {
|
||||
|
|
@ -899,7 +900,7 @@ func (ssa *SQLStorageAuthority) NewOrder(ctx context.Context, req *corepb.Order)
|
|||
Created: ssa.clk.Now(),
|
||||
}
|
||||
|
||||
output, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
output, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
if err := txWithCtx.Insert(order); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -962,7 +963,7 @@ func (ssa *SQLStorageAuthority) NewOrder(ctx context.Context, req *corepb.Order)
|
|||
// in processing status by updating the `beganProcessing` field of the
|
||||
// corresponding Order table row in the DB.
|
||||
func (ssa *SQLStorageAuthority) SetOrderProcessing(ctx context.Context, req *corepb.Order) error {
|
||||
_, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
result, err := txWithCtx.Exec(`
|
||||
UPDATE orders
|
||||
SET beganProcessing = ?
|
||||
|
|
@ -987,7 +988,7 @@ func (ssa *SQLStorageAuthority) SetOrderProcessing(ctx context.Context, req *cor
|
|||
|
||||
// SetOrderError updates a provided Order's error field.
|
||||
func (ssa *SQLStorageAuthority) SetOrderError(ctx context.Context, order *corepb.Order) error {
|
||||
_, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
om, err := orderToModel(order)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1018,7 +1019,7 @@ func (ssa *SQLStorageAuthority) SetOrderError(ctx context.Context, order *corepb
|
|||
// CertificateSerial and the order ID on the provided order are processed (e.g.
|
||||
// this is not a generic update RPC).
|
||||
func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *corepb.Order) error {
|
||||
_, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
result, err := txWithCtx.Exec(`
|
||||
UPDATE orders
|
||||
SET certificateSerial = ?
|
||||
|
|
@ -1594,7 +1595,7 @@ func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req
|
|||
// RevokeCertificate stores revocation information about a certificate. It will only store this
|
||||
// information if the certificate is not already marked as revoked.
|
||||
func (ssa *SQLStorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) error {
|
||||
_, overallError := withTransaction(ctx, ssa.dbMap, func(txWithCtx transaction) (interface{}, error) {
|
||||
_, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Transaction) (interface{}, error) {
|
||||
status, err := SelectCertificateStatus(
|
||||
txWithCtx,
|
||||
"WHERE serial = ? AND status != ?",
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
corepb "github.com/letsencrypt/boulder/core/proto"
|
||||
"github.com/letsencrypt/boulder/db"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
|
|
@ -325,7 +326,7 @@ func TestCountCertificatesByNames(t *testing.T) {
|
|||
interlocker.Add(len(names))
|
||||
sa.parallelismPerRPC = len(names)
|
||||
oldCertCountFunc := sa.countCertificatesByName
|
||||
sa.countCertificatesByName = func(sel dbSelector, domain string, earliest, latest time.Time) (int, error) {
|
||||
sa.countCertificatesByName = func(sel db.Selector, domain string, earliest, latest time.Time) (int, error) {
|
||||
interlocker.Done()
|
||||
interlocker.Wait()
|
||||
return oldCertCountFunc(sel, domain, earliest, latest)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
"debugAddr": ":8014",
|
||||
"certificates": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "1h",
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
},
|
||||
"certificateStatus": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "1h",
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
|
|
@ -24,7 +24,15 @@
|
|||
},
|
||||
"certificatesPerName": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "169h",
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
"maxDPS": 50
|
||||
},
|
||||
"orders": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"janitor": {
|
||||
"syslog": {
|
||||
"stdoutLevel": 6
|
||||
},
|
||||
"dbConnectFile": "test/secrets/janitor_dburl",
|
||||
"maxDBConns": 10,
|
||||
"debugAddr": ":8014",
|
||||
"certificates": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
"maxDPS": 50
|
||||
},
|
||||
"certificateStatus": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
"maxDPS": 50
|
||||
},
|
||||
"certificatesPerName": {
|
||||
"enabled": true,
|
||||
"gracePeriod": "2184h",
|
||||
"batchSize": 100,
|
||||
"workSleep": "500ms",
|
||||
"parallelism": 2,
|
||||
"maxDPS": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -100,7 +100,7 @@ def run_janitor():
|
|||
e.setdefault("FAKECLOCK", fakeclock(target_time))
|
||||
|
||||
# Note: Must use exec here so that killing this process kills the command.
|
||||
cmdline = "exec ./bin/boulder-janitor --config test/config-next/janitor.json"
|
||||
cmdline = "exec ./bin/boulder-janitor --config {0}/janitor.json".format(config_dir)
|
||||
p = subprocess.Popen(cmdline, shell=True, env=e)
|
||||
|
||||
# Wait for the janitor to come up
|
||||
|
|
@ -136,6 +136,7 @@ def run_janitor():
|
|||
certStatusWorkBatch = get_stat_line(8014, statline("workbatch", "certificateStatus"))
|
||||
certsWorkBatch = get_stat_line(8014, statline("workbatch", "certificates"))
|
||||
certsPerNameWorkBatch = get_stat_line(8014, statline("workbatch", "certificatesPerName"))
|
||||
ordersWorkBatch = get_stat_line(8014, statline("workbatch", "orders"))
|
||||
|
||||
# sleep for double the configured workSleep for each job
|
||||
time.sleep(1)
|
||||
|
|
@ -143,10 +144,12 @@ def run_janitor():
|
|||
newCertStatusWorkBatch = get_stat_line(8014, statline("workbatch", "certificateStatus"))
|
||||
newCertsWorkBatch = get_stat_line(8014, statline("workbatch", "certificates"))
|
||||
newCertsPerNameWorkBatch = get_stat_line(8014, statline("workbatch", "certificatesPerName"))
|
||||
newOrdersWorkBatch = get_stat_line(8014, statline("workbatch", "orders"))
|
||||
|
||||
if (certStatusWorkBatch == newCertStatusWorkBatch
|
||||
and certsWorkBatch == newCertsWorkBatch
|
||||
and certsPerNameWorkBatch == newCertsPerNameWorkBatch):
|
||||
and certsPerNameWorkBatch == newCertsPerNameWorkBatch
|
||||
and ordersWorkBatch == newOrdersWorkBatch):
|
||||
break
|
||||
|
||||
attempts = attempts + 1
|
||||
|
|
@ -156,13 +159,14 @@ def run_janitor():
|
|||
certStatusDeletes = get_stat_line(8014, statline("deletions", "certificateStatus"))
|
||||
certsDeletes = get_stat_line(8014, statline("deletions", "certificates"))
|
||||
certsPerNameDeletes = get_stat_line(8014, statline("deletions", "certificatesPerName"))
|
||||
ordersDeletes = get_stat_line(8014, statline("deletions", "orders"))
|
||||
|
||||
if certStatusDeletes is None or certsDeletes is None or certsPerNameDeletes is None:
|
||||
if certStatusDeletes is None or certsDeletes is None or certsPerNameDeletes is None or ordersDeletes is None:
|
||||
print("delete stats not present after check {0}. Sleeping".format(i))
|
||||
time.sleep(2)
|
||||
continue
|
||||
|
||||
for l in [certStatusDeletes, certsDeletes, certsPerNameDeletes]:
|
||||
for l in [certStatusDeletes, certsDeletes, certsPerNameDeletes, ordersDeletes]:
|
||||
if stat_value(l) == "0":
|
||||
raise Exception("Expected a non-zero number of deletes to be performed. Found {0}".format(l))
|
||||
|
||||
|
|
@ -171,6 +175,7 @@ def run_janitor():
|
|||
statline("errors", "certificateStatus"),
|
||||
statline("errors", "certificates"),
|
||||
statline("errors", "certificatesPerName"),
|
||||
statline("errors", "orders"),
|
||||
]
|
||||
for eStat in errorStats:
|
||||
actual = get_stat_line(8014, eStat)
|
||||
|
|
|
|||
|
|
@ -60,6 +60,10 @@ GRANT SELECT,DELETE ON certificates TO 'janitor'@'localhost';
|
|||
GRANT SELECT,DELETE ON certificateStatus TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON certificatesPerName TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON sctReceipts TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON orders TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON requestedNames TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON orderFqdnSets TO 'janitor'@'localhost';
|
||||
GRANT SELECT,DELETE ON orderToAuthz2 TO 'janitor'@'localhost';
|
||||
|
||||
-- Test setup and teardown
|
||||
GRANT ALL PRIVILEGES ON * to 'test_setup'@'localhost';
|
||||
|
|
|
|||
Loading…
Reference in New Issue