parent
67927390e7
commit
5c49231ea6
|
@ -20,7 +20,7 @@ import (
|
|||
)
|
||||
|
||||
type client struct {
|
||||
redis rocsp.Writer
|
||||
redis *rocsp.RWClient
|
||||
db *db.WrappedMap // optional
|
||||
ocspGenerator capb.OCSPGeneratorClient
|
||||
clk clock.Clock
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -39,8 +38,8 @@ func makeClient() (*rocsp.RWClient, clock.Clock) {
|
|||
|
||||
rdb := redis.NewRing(&redis.RingOptions{
|
||||
Addrs: map[string]string{
|
||||
"shard1": "10.33.33.8:4218",
|
||||
"shard2": "10.33.33.9:4218",
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218",
|
||||
},
|
||||
Username: "unittest-rw",
|
||||
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
|
||||
|
@ -50,32 +49,6 @@ func makeClient() (*rocsp.RWClient, clock.Clock) {
|
|||
return rocsp.NewWritingClient(rdb, 500*time.Millisecond, clk, metrics.NoopRegisterer), clk
|
||||
}
|
||||
|
||||
// TODO(#6517) remove this helper.
|
||||
func makeClusterClient() (*rocsp.CRWClient, clock.Clock) {
|
||||
CACertFile := "../../test/redis-tls/minica.pem"
|
||||
CertFile := "../../test/redis-tls/boulder/cert.pem"
|
||||
KeyFile := "../../test/redis-tls/boulder/key.pem"
|
||||
tlsConfig := cmd.TLSConfig{
|
||||
CACertFile: &CACertFile,
|
||||
CertFile: &CertFile,
|
||||
KeyFile: &KeyFile,
|
||||
}
|
||||
tlsConfig2, err := tlsConfig.Load()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: []string{"10.33.33.2:4218"},
|
||||
Username: "unittest-rw",
|
||||
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
|
||||
TLSConfig: tlsConfig2,
|
||||
})
|
||||
clk := clock.NewFake()
|
||||
|
||||
return rocsp.NewClusterWritingClient(rdb, 5*time.Second, clk, metrics.NoopRegisterer), clk
|
||||
}
|
||||
|
||||
func TestGetStartingID(t *testing.T) {
|
||||
clk := clock.NewFake()
|
||||
dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{})
|
||||
|
@ -109,14 +82,7 @@ func TestGetStartingID(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStoreResponse(t *testing.T) {
|
||||
// TODO(#6517) remove this block.
|
||||
var redisClient rocsp.Writer
|
||||
var clk clock.Clock
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config" {
|
||||
redisClient, clk = makeClusterClient()
|
||||
} else {
|
||||
redisClient, clk = makeClient()
|
||||
}
|
||||
redisClient, clk := makeClient()
|
||||
|
||||
issuer, err := core.LoadCert("../../test/hierarchy/int-e1.cert.pem")
|
||||
test.AssertNotError(t, err, "loading int-e1")
|
||||
|
@ -153,14 +119,7 @@ func (mog mockOCSPGenerator) GenerateOCSP(ctx context.Context, in *capb.Generate
|
|||
}
|
||||
|
||||
func TestLoadFromDB(t *testing.T) {
|
||||
// TODO(#6517) remove this block.
|
||||
var redisClient rocsp.Writer
|
||||
var clk clock.Clock
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config" {
|
||||
redisClient, clk = makeClusterClient()
|
||||
} else {
|
||||
redisClient, clk = makeClient()
|
||||
}
|
||||
redisClient, clk := makeClient()
|
||||
|
||||
dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{})
|
||||
if err != nil {
|
||||
|
|
|
@ -5,26 +5,3 @@ services:
|
|||
FAKE_DNS: 10.77.77.77
|
||||
BOULDER_CONFIG_DIR: &boulder_config_dir test/config-next
|
||||
GOFLAGS: -mod=vendor
|
||||
# TODO(#6517): remove bredis_clusterer
|
||||
bredis_clusterer:
|
||||
depends_on:
|
||||
- bredis_7
|
||||
- bredis_8
|
||||
# TODO(#6517): move both nodes to docker-compose.yml
|
||||
bredis_7:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.8
|
||||
|
||||
bredis_8:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.9
|
||||
|
|
|
@ -41,7 +41,8 @@ services:
|
|||
- 4003:4003 # OCSP
|
||||
depends_on:
|
||||
- bmysql
|
||||
- bredis_clusterer
|
||||
- bredis_1
|
||||
- bredis_2
|
||||
- bconsul
|
||||
entrypoint: test/entrypoint.sh
|
||||
working_dir: &boulder_working_dir /boulder
|
||||
|
@ -62,13 +63,12 @@ services:
|
|||
command: mysqld --bind-address=0.0.0.0 --slow-query-log --log-output=TABLE --log-queries-not-using-indexes=ON
|
||||
logging:
|
||||
driver: none
|
||||
# TODO(#6517): replace all bredis_ services with those from
|
||||
# docker-compose.next.yml.
|
||||
|
||||
bredis_1:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
command: redis-server /test/redis.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.2
|
||||
|
@ -77,68 +77,11 @@ services:
|
|||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
command: redis-server /test/redis.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.3
|
||||
|
||||
bredis_3:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.4
|
||||
|
||||
bredis_4:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.5
|
||||
|
||||
bredis_5:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.6
|
||||
|
||||
bredis_6:
|
||||
image: redis:6.2.7
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
command: redis-server /test/redis-cluster.config
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.7
|
||||
# TODO(#6517): remove bredis_clusterer.
|
||||
bredis_clusterer:
|
||||
image: redis:6.2.7
|
||||
environment:
|
||||
BOULDER_CONFIG_DIR: *boulder_config_dir
|
||||
volumes:
|
||||
- ./test/:/test/:cached
|
||||
- ./cluster/:/cluster/:cached
|
||||
command: /test/wait-for-it.sh 10.33.33.2 4218 /test/redis-create.sh
|
||||
depends_on:
|
||||
- bredis_1
|
||||
- bredis_2
|
||||
- bredis_3
|
||||
- bredis_4
|
||||
- bredis_5
|
||||
- bredis_6
|
||||
networks:
|
||||
redisnet:
|
||||
ipv4_address: 10.33.33.10
|
||||
aliases:
|
||||
- boulder-redis-clusterer
|
||||
|
||||
bconsul:
|
||||
image: hashicorp/consul:1.13.1
|
||||
volumes:
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
# Redis
|
||||
|
||||
TODO(#6517): Update this to reflect the use of Redis Ring.
|
||||
|
||||
We use Redis Cluster for OCSP. The Boulder dev environment stands up a cluster
|
||||
of 6 nodes, with 3 primaries and 3 replicas. Check docker-compose.yml for
|
||||
details of those.
|
||||
|
||||
The initial setup is done by test/redis-create.sh, which assigns all the
|
||||
individual Redis nodes to their roles as primaries or replicas.
|
||||
We use Redis for OCSP. The Boulder dev environment stands up a two nodes. We use
|
||||
the Ring client in the github.com/redis/go-redis package to consistently hash
|
||||
our reads and writes across these two nodes.
|
||||
|
||||
## Debugging
|
||||
|
||||
|
@ -19,18 +14,24 @@ The first tool you might turn to is `redis-cli`. You probably don't
|
|||
have redis-cli on your host, so we'll run it in a Docker container. We
|
||||
also need to pass some specific arguments for TLS and authentication. There's a
|
||||
script that handles all that for you: `test/redis-cli.sh`. First, make sure your
|
||||
redis cluster is running:
|
||||
redis is running:
|
||||
|
||||
```
|
||||
docker compose up bredis_clusterer
|
||||
```shell
|
||||
docker compose up boulder
|
||||
```
|
||||
|
||||
Then, in a different window, run:
|
||||
Then, in a different window, run the following to connect to `bredis_1`:
|
||||
|
||||
```
|
||||
```shell
|
||||
./test/redis-cli.sh -h 10.33.33.2
|
||||
```
|
||||
|
||||
Similarly, to connect to `bredis_2`:
|
||||
|
||||
```shell
|
||||
./test/redis-cli.sh -h 10.33.33.3
|
||||
```
|
||||
|
||||
You can pass any IP address for the -h (host) parameter. The full list of IP
|
||||
addresses for Redis nodes is in `docker-compose.yml`. You can also pass other
|
||||
redis-cli commandline parameters. They'll get passed through.
|
||||
|
@ -38,7 +39,7 @@ redis-cli commandline parameters. They'll get passed through.
|
|||
You may want to go a level deeper and communicate with a Redis node using the
|
||||
Redis protocol. Here's the command to do that (run from the Boulder root):
|
||||
|
||||
```
|
||||
```shell
|
||||
openssl s_client -connect 10.33.33.2:4218 \
|
||||
-CAfile test/redis-tls/minica.pem \
|
||||
-cert test/redis-tls/boulder/cert.pem \
|
||||
|
|
|
@ -49,7 +49,7 @@ type redisSource struct {
|
|||
// NewRedisSource returns a responder.Source which will look up OCSP responses in a
|
||||
// Redis table.
|
||||
func NewRedisSource(
|
||||
client rocsp.Writer,
|
||||
client *rocsp.RWClient,
|
||||
signer responder.Source,
|
||||
liveSigningPeriod time.Duration,
|
||||
clk clock.Clock,
|
||||
|
|
|
@ -25,12 +25,6 @@ type RedisConfig struct {
|
|||
TLS cmd.TLSConfig
|
||||
// Username is a Redis username.
|
||||
Username string
|
||||
// Addrs is a list of IP address:port pairs. The go-redis `ClusterClient`
|
||||
// will use this list to discover a cluster of Redis servers configured in
|
||||
// Cluster mode.
|
||||
//
|
||||
// DEPRECATED: Use `ShardAddrs` instead. TODO(#6517) remove `Addrs`.
|
||||
Addrs []string
|
||||
// ShardAddrs is a map of shard names to IP address:port pairs. The go-redis
|
||||
// `Ring` client will shard reads and writes across the provided Redis
|
||||
// Servers based on a consistent hashing algorithm.
|
||||
|
@ -100,7 +94,7 @@ type RedisConfig struct {
|
|||
}
|
||||
|
||||
// MakeClient produces a read-write ROCSP client from a config.
|
||||
func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (rocsp.Writer, error) {
|
||||
func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.RWClient, error) {
|
||||
password, err := c.PasswordConfig.Pass()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading password: %w", err)
|
||||
|
@ -111,38 +105,6 @@ func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (r
|
|||
return nil, fmt.Errorf("loading TLS config: %w", err)
|
||||
}
|
||||
|
||||
timeout := c.Timeout.Duration
|
||||
|
||||
// TODO(#6517) remove this block.
|
||||
if len(c.Addrs) > 0 && len(c.ShardAddrs) > 0 {
|
||||
return nil, errors.New("both 'addrs' and 'shardAddrs' were provided, only one is allowed")
|
||||
}
|
||||
|
||||
// TODO(#6517) remove this block.
|
||||
if len(c.Addrs) > 0 {
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: c.Addrs,
|
||||
Username: c.Username,
|
||||
Password: password,
|
||||
TLSConfig: tlsConfig,
|
||||
|
||||
MaxRetries: c.MaxRetries,
|
||||
MinRetryBackoff: c.MinRetryBackoff.Duration,
|
||||
MaxRetryBackoff: c.MaxRetryBackoff.Duration,
|
||||
DialTimeout: c.DialTimeout.Duration,
|
||||
ReadTimeout: c.ReadTimeout.Duration,
|
||||
WriteTimeout: c.WriteTimeout.Duration,
|
||||
|
||||
PoolSize: c.PoolSize,
|
||||
MinIdleConns: c.MinIdleConns,
|
||||
MaxConnAge: c.MaxConnAge.Duration,
|
||||
PoolTimeout: c.PoolTimeout.Duration,
|
||||
IdleTimeout: c.IdleTimeout.Duration,
|
||||
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
|
||||
})
|
||||
return rocsp.NewClusterWritingClient(rdb, timeout, clk, stats), nil
|
||||
}
|
||||
|
||||
rdb := redis.NewRing(&redis.RingOptions{
|
||||
Addrs: c.ShardAddrs,
|
||||
Username: c.Username,
|
||||
|
@ -163,13 +125,13 @@ func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (r
|
|||
IdleTimeout: c.IdleTimeout.Duration,
|
||||
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
|
||||
})
|
||||
return rocsp.NewWritingClient(rdb, timeout, clk, stats), nil
|
||||
return rocsp.NewWritingClient(rdb, c.Timeout.Duration, clk, stats), nil
|
||||
}
|
||||
|
||||
// MakeReadClient produces a read-only ROCSP client from a config.
|
||||
func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (rocsp.Reader, error) {
|
||||
if len(c.Addrs) == 0 {
|
||||
return nil, errors.New("redis config's 'addrs' field was empty")
|
||||
func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.ROClient, error) {
|
||||
if len(c.ShardAddrs) == 0 {
|
||||
return nil, errors.New("redis config's 'shardAddrs' field was empty")
|
||||
}
|
||||
|
||||
password, err := c.PasswordConfig.Pass()
|
||||
|
@ -182,42 +144,6 @@ func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer
|
|||
return nil, fmt.Errorf("loading TLS config: %w", err)
|
||||
}
|
||||
|
||||
timeout := c.Timeout.Duration
|
||||
|
||||
// TODO(#6517) remove this block.
|
||||
if len(c.Addrs) > 0 && len(c.ShardAddrs) > 0 {
|
||||
return nil, errors.New("both 'addrs' and 'shardAddrs' were provided, only one is allowed")
|
||||
}
|
||||
|
||||
// TODO(#6517) remove this block.
|
||||
if len(c.Addrs) > 0 {
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: c.Addrs,
|
||||
Username: c.Username,
|
||||
Password: password,
|
||||
TLSConfig: tlsConfig,
|
||||
|
||||
ReadOnly: c.ReadOnly,
|
||||
RouteByLatency: c.RouteByLatency,
|
||||
RouteRandomly: c.RouteRandomly,
|
||||
|
||||
PoolFIFO: c.PoolFIFO,
|
||||
|
||||
MaxRetries: c.MaxRetries,
|
||||
MinRetryBackoff: c.MinRetryBackoff.Duration,
|
||||
MaxRetryBackoff: c.MaxRetryBackoff.Duration,
|
||||
DialTimeout: c.DialTimeout.Duration,
|
||||
ReadTimeout: c.ReadTimeout.Duration,
|
||||
|
||||
PoolSize: c.PoolSize,
|
||||
MinIdleConns: c.MinIdleConns,
|
||||
MaxConnAge: c.MaxConnAge.Duration,
|
||||
PoolTimeout: c.PoolTimeout.Duration,
|
||||
IdleTimeout: c.IdleTimeout.Duration,
|
||||
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
|
||||
})
|
||||
return rocsp.NewClusterReadingClient(rdb, timeout, clk, stats), nil
|
||||
}
|
||||
rdb := redis.NewRing(&redis.RingOptions{
|
||||
Addrs: c.ShardAddrs,
|
||||
Username: c.Username,
|
||||
|
@ -239,7 +165,7 @@ func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer
|
|||
IdleTimeout: c.IdleTimeout.Duration,
|
||||
IdleCheckFrequency: c.IdleCheckFrequency.Duration,
|
||||
})
|
||||
return rocsp.NewReadingClient(rdb, timeout, clk, stats), nil
|
||||
return rocsp.NewReadingClient(rdb, c.Timeout.Duration, clk, stats), nil
|
||||
}
|
||||
|
||||
// A ShortIDIssuer combines an issuance.Certificate with some fields necessary
|
||||
|
|
|
@ -31,8 +31,8 @@ func makeClient() (*RWClient, clock.Clock) {
|
|||
|
||||
rdb := redis.NewRing(&redis.RingOptions{
|
||||
Addrs: map[string]string{
|
||||
"shard1": "10.33.33.8:4218",
|
||||
"shard2": "10.33.33.9:4218",
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218",
|
||||
},
|
||||
Username: "unittest-rw",
|
||||
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
|
||||
|
@ -43,10 +43,6 @@ func makeClient() (*RWClient, clock.Clock) {
|
|||
}
|
||||
|
||||
func TestSetAndGet(t *testing.T) {
|
||||
// TODO(#6517) remove this block.
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config" {
|
||||
t.Skip("Skipping test in config mode")
|
||||
}
|
||||
client, _ := makeClient()
|
||||
fmt.Println(client.Ping(context.Background()))
|
||||
|
||||
|
|
|
@ -1,188 +0,0 @@
|
|||
package rocsp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// TODO(#6517) remove this file and replace the Reader and Writer interfaces
|
||||
// with the structs in rocsp.go.
|
||||
|
||||
// Reader is an interface for read-only Redis clients. It's implemented by
|
||||
// CROClient and ROClient.
|
||||
type Reader interface {
|
||||
GetResponse(context.Context, string) ([]byte, error)
|
||||
Ping(context.Context) error
|
||||
ScanResponses(context.Context, string) <-chan ScanResponsesResult
|
||||
}
|
||||
|
||||
// CROClient represents a read-only Redis client.
|
||||
type CROClient struct {
|
||||
rdb *redis.ClusterClient
|
||||
timeout time.Duration
|
||||
clk clock.Clock
|
||||
getLatency *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewClusterReadingClient creates a read-only client for use with a Redis Cluster. The
|
||||
// timeout applies to all requests, though a shorter timeout can be applied on a
|
||||
// per-request basis using context.Context. rdb.Options().Addrs must have at
|
||||
// least one entry.
|
||||
func NewClusterReadingClient(rdb *redis.ClusterClient, timeout time.Duration, clk clock.Clock, stats prometheus.Registerer) *CROClient {
|
||||
if len(rdb.Options().Addrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
labels := prometheus.Labels{
|
||||
"addresses": strings.Join(rdb.Options().Addrs, ", "),
|
||||
"user": rdb.Options().Username,
|
||||
}
|
||||
stats.MustRegister(newMetricsCollector(rdb, labels))
|
||||
getLatency := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rocsp_get_latency",
|
||||
Help: "Histogram of latencies of rocsp.GetResponse calls with result",
|
||||
// 8 buckets, ranging from 0.5ms to 2s
|
||||
Buckets: prometheus.ExponentialBucketsRange(0.0005, 2, 8),
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
stats.MustRegister(getLatency)
|
||||
|
||||
return &CROClient{
|
||||
rdb: rdb,
|
||||
timeout: timeout,
|
||||
clk: clk,
|
||||
getLatency: getLatency,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CROClient) Ping(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
return c.rdb.Ping(ctx).Err()
|
||||
}
|
||||
|
||||
// Writer is an interface for read-only Redis clients. It's implemented by
|
||||
// CRWClient and RWClient.
|
||||
type Writer interface {
|
||||
StoreResponse(context.Context, *ocsp.Response) error
|
||||
GetResponse(context.Context, string) ([]byte, error)
|
||||
Ping(context.Context) error
|
||||
ScanResponses(context.Context, string) <-chan ScanResponsesResult
|
||||
}
|
||||
|
||||
// WritingClient represents a Redis client that can both read and write.
|
||||
type CRWClient struct {
|
||||
*CROClient
|
||||
storeResponseLatency *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewWritingClient creates a WritingClient.
|
||||
func NewClusterWritingClient(rdb *redis.ClusterClient, timeout time.Duration, clk clock.Clock, stats prometheus.Registerer) *CRWClient {
|
||||
storeResponseLatency := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rocsp_store_response_latency",
|
||||
Help: "Histogram of latencies of rocsp.StoreResponse calls with result labels",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
stats.MustRegister(storeResponseLatency)
|
||||
return &CRWClient{NewClusterReadingClient(rdb, timeout, clk, stats), storeResponseLatency}
|
||||
}
|
||||
|
||||
// StoreResponse parses the given bytes as an OCSP response, and stores it
|
||||
// into Redis. The expiration time (ttl) of the Redis key is set to OCSP
|
||||
// response `NextUpdate`.
|
||||
func (c *CRWClient) StoreResponse(ctx context.Context, resp *ocsp.Response) error {
|
||||
start := c.clk.Now()
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
serial := core.SerialToString(resp.SerialNumber)
|
||||
|
||||
// Set the ttl duration to the response `NextUpdate - now()`
|
||||
ttl := time.Until(resp.NextUpdate)
|
||||
|
||||
err := c.rdb.Set(ctx, serial, resp.Raw, ttl).Err()
|
||||
if err != nil {
|
||||
state := "failed"
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
state = "deadlineExceeded"
|
||||
} else if errors.Is(err, context.Canceled) {
|
||||
state = "canceled"
|
||||
}
|
||||
c.storeResponseLatency.With(prometheus.Labels{"result": state}).Observe(time.Since(start).Seconds())
|
||||
return fmt.Errorf("setting response: %w", err)
|
||||
}
|
||||
|
||||
c.storeResponseLatency.With(prometheus.Labels{"result": "success"}).Observe(time.Since(start).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResponse fetches a response for the given serial number.
|
||||
// Returns error if the OCSP response fails to parse.
|
||||
func (c *CROClient) GetResponse(ctx context.Context, serial string) ([]byte, error) {
|
||||
start := c.clk.Now()
|
||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := c.rdb.Get(ctx, serial).Result()
|
||||
if err != nil {
|
||||
// go-redis `Get` returns redis.Nil error when key does not exist. In
|
||||
// that case return a `ErrRedisNotFound` error.
|
||||
if errors.Is(err, redis.Nil) {
|
||||
c.getLatency.With(prometheus.Labels{"result": "notFound"}).Observe(time.Since(start).Seconds())
|
||||
return nil, ErrRedisNotFound
|
||||
}
|
||||
|
||||
state := "failed"
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
state = "deadlineExceeded"
|
||||
} else if errors.Is(err, context.Canceled) {
|
||||
state = "canceled"
|
||||
}
|
||||
c.getLatency.With(prometheus.Labels{"result": state}).Observe(time.Since(start).Seconds())
|
||||
return nil, fmt.Errorf("getting response: %w", err)
|
||||
}
|
||||
|
||||
c.getLatency.With(prometheus.Labels{"result": "success"}).Observe(time.Since(start).Seconds())
|
||||
return []byte(resp), nil
|
||||
}
|
||||
|
||||
// ScanResponses scans Redis for all OCSP responses where the serial number matches the provided pattern.
|
||||
// It returns immediately and emits results and errors on `<-chan ScanResponsesResult`. It closes the
|
||||
// channel when it is done or hits an error.
|
||||
func (c *CROClient) ScanResponses(ctx context.Context, serialPattern string) <-chan ScanResponsesResult {
|
||||
pattern := fmt.Sprintf("r{%s}", serialPattern)
|
||||
results := make(chan ScanResponsesResult)
|
||||
go func() {
|
||||
defer close(results)
|
||||
err := c.rdb.ForEachMaster(ctx, func(ctx context.Context, rdb *redis.Client) error {
|
||||
iter := rdb.Scan(ctx, 0, pattern, 0).Iterator()
|
||||
for iter.Next(ctx) {
|
||||
key := iter.Val()
|
||||
val, err := c.rdb.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
results <- ScanResponsesResult{Err: fmt.Errorf("getting response: %w", err)}
|
||||
continue
|
||||
}
|
||||
results <- ScanResponsesResult{Serial: key, Body: []byte(val)}
|
||||
}
|
||||
return iter.Err()
|
||||
})
|
||||
if err != nil {
|
||||
results <- ScanResponsesResult{Err: err}
|
||||
return
|
||||
}
|
||||
}()
|
||||
return results
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package rocsp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
func makeClusterClient() (*CRWClient, clock.Clock) {
|
||||
CACertFile := "../test/redis-tls/minica.pem"
|
||||
CertFile := "../test/redis-tls/boulder/cert.pem"
|
||||
KeyFile := "../test/redis-tls/boulder/key.pem"
|
||||
tlsConfig := cmd.TLSConfig{
|
||||
CACertFile: &CACertFile,
|
||||
CertFile: &CertFile,
|
||||
KeyFile: &KeyFile,
|
||||
}
|
||||
tlsConfig2, err := tlsConfig.Load()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: []string{"10.33.33.2:4218"},
|
||||
Username: "unittest-rw",
|
||||
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
|
||||
TLSConfig: tlsConfig2,
|
||||
})
|
||||
clk := clock.NewFake()
|
||||
|
||||
return NewClusterWritingClient(rdb, 5*time.Second, clk, metrics.NoopRegisterer), clk
|
||||
}
|
||||
|
||||
func TestClusterSetAndGet(t *testing.T) {
|
||||
// TODO(#6517) remove this block.
|
||||
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config-next" {
|
||||
t.Skip("Skipping test in config-next mode")
|
||||
}
|
||||
client, _ := makeClusterClient()
|
||||
|
||||
respBytes, err := os.ReadFile("testdata/ocsp.response")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, err := ocsp.ParseResponse(respBytes, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = client.StoreResponse(context.Background(), response)
|
||||
if err != nil {
|
||||
t.Fatalf("storing response: %s", err)
|
||||
}
|
||||
|
||||
serial := "ffaa13f9c34be80b8e2532b83afe063b59a6"
|
||||
resp2, err := client.GetResponse(context.Background(), serial)
|
||||
if err != nil {
|
||||
t.Fatalf("getting response: %s", err)
|
||||
}
|
||||
if !bytes.Equal(resp2, respBytes) {
|
||||
t.Errorf("response written and response retrieved were not equal")
|
||||
}
|
||||
}
|
|
@ -4,8 +4,8 @@
|
|||
"username": "ocsp-responder",
|
||||
"passwordFile": "test/secrets/ocsp_responder_redis_password",
|
||||
"shardAddrs": {
|
||||
"shard1": "10.33.33.8:4218",
|
||||
"shard2": "10.33.33.9:4218"
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218"
|
||||
},
|
||||
"timeout": "5s",
|
||||
"poolSize": 100,
|
||||
|
|
|
@ -50,8 +50,8 @@
|
|||
"username": "boulder-sa",
|
||||
"passwordFile": "test/secrets/sa_redis_password",
|
||||
"shardAddrs": {
|
||||
"shard1": "10.33.33.8:4218",
|
||||
"shard2": "10.33.33.9:4218"
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218"
|
||||
},
|
||||
"timeout": "5s",
|
||||
"tls": {
|
||||
|
|
|
@ -7,9 +7,10 @@
|
|||
"redis": {
|
||||
"username": "ocsp-responder",
|
||||
"passwordFile": "test/secrets/ocsp_responder_redis_password",
|
||||
"addrs": [
|
||||
"10.33.33.7:4218"
|
||||
],
|
||||
"shardAddrs": {
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218"
|
||||
},
|
||||
"timeout": "5s",
|
||||
"poolSize": 100,
|
||||
"routeRandomly": true,
|
||||
|
|
|
@ -4,9 +4,10 @@
|
|||
"redis": {
|
||||
"username": "ocsp-updater",
|
||||
"passwordFile": "test/secrets/rocsp_tool_password",
|
||||
"addrs": [
|
||||
"10.33.33.7:4218"
|
||||
],
|
||||
"shardAddrs": {
|
||||
"shard1": "10.33.33.2:4218",
|
||||
"shard2": "10.33.33.3:4218"
|
||||
},
|
||||
"timeout": "5s",
|
||||
"tls": {
|
||||
"caCertFile": "test/redis-tls/minica.pem",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e -u
|
||||
|
||||
|
@ -10,9 +10,8 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
rm -f /var/run/rsyslogd.pid
|
||||
service rsyslog start
|
||||
|
||||
# make sure we can reach the mysqldb and Redis cluster is done being created.
|
||||
# make sure we can reach the mysqldb.
|
||||
./test/wait-for-it.sh boulder-mysql 3306
|
||||
./test/wait-for-it.sh 10.33.33.10 4218
|
||||
|
||||
# create the database
|
||||
MYSQL_CONTAINER=1 $DIR/create_db.sh
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
|
||||
set -feuo pipefail
|
||||
|
||||
ARGS="--tls \
|
||||
-p 4218 \
|
||||
ARGS="-p 4218 \
|
||||
--tls \
|
||||
--cert /test/redis-tls/redis/cert.pem \
|
||||
--key /test/redis-tls/redis/key.pem \
|
||||
--cacert /test/redis-tls/minica.pem \
|
||||
--user replication-user \
|
||||
--pass 435e9c4225f08813ef3af7c725f0d30d263b9cd3"
|
||||
|
||||
exec docker compose exec bredis_clusterer redis-cli "${ARGS}" "${@}"
|
||||
exec docker compose exec bredis_1 redis-cli $ARGS "${@}"
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
port 0
|
||||
tls-port 4218
|
||||
save 60 1
|
||||
maxmemory-policy noeviction
|
||||
loglevel warning
|
||||
# List of renamed commands comes from:
|
||||
# https://www.digitalocean.com/community/tutorials/how-to-secure-your-redis-installation-on-ubuntu-18-04
|
||||
rename-command BGREWRITEAOF ""
|
||||
rename-command BGSAVE ""
|
||||
rename-command CONFIG ""
|
||||
rename-command DEBUG ""
|
||||
rename-command DEL ""
|
||||
rename-command FLUSHALL ""
|
||||
rename-command FLUSHDB ""
|
||||
rename-command KEYS ""
|
||||
rename-command PEXPIRE ""
|
||||
rename-command RENAME ""
|
||||
rename-command SAVE ""
|
||||
rename-command SHUTDOWN ""
|
||||
rename-command SPOP ""
|
||||
rename-command SREM ""
|
||||
user default off
|
||||
user ocsp-updater on +@all ~* >e4e9ce7845cb6adbbc44fb1d9deb05e6b4dc1386
|
||||
user ocsp-responder on +@all ~* >0e5a4c8b5faaf3194c8ad83c3dd9a0dd8a75982b
|
||||
user boulder-sa on +@all ~* >de75ae663596735b90e461e5924f71a4c5f622ab
|
||||
user boulder-ra on +@all ~* >b3b2fcbbf46fe39fd522c395a51f84d93a98ff2f
|
||||
user replication-user on +@all ~* >435e9c4225f08813ef3af7c725f0d30d263b9cd3
|
||||
user unittest-rw on +@all ~* >824968fa490f4ecec1e52d5e34916bdb60d45f8d
|
||||
masteruser replication-user
|
||||
masterauth 435e9c4225f08813ef3af7c725f0d30d263b9cd3
|
||||
tls-protocols "TLSv1.3"
|
||||
tls-cert-file /test/redis-tls/redis/cert.pem
|
||||
tls-key-file /test/redis-tls/redis/key.pem
|
||||
tls-ca-cert-file /test/redis-tls/minica.pem
|
||||
tls-cluster yes
|
||||
tls-replication yes
|
||||
cluster-enabled yes
|
||||
cluster-config-file cluster-config-1234.conf
|
||||
cluster-node-timeout 5000
|
||||
cluster-require-full-coverage no
|
|
@ -1,35 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# TODO(#6517) remove this file.
|
||||
|
||||
set -feuo pipefail
|
||||
|
||||
ARGS="--tls \
|
||||
--cert /test/redis-tls/redis/cert.pem \
|
||||
--key /test/redis-tls/redis/key.pem \
|
||||
--cacert /test/redis-tls/minica.pem \
|
||||
--user replication-user \
|
||||
--pass 435e9c4225f08813ef3af7c725f0d30d263b9cd3"
|
||||
|
||||
if [[ "${BOULDER_CONFIG_DIR}" == "test/config" ]]
|
||||
then
|
||||
if ! redis-cli \
|
||||
--cluster check \
|
||||
10.33.33.2:4218 \
|
||||
$ARGS ; then
|
||||
echo "Cluster needs creation!"
|
||||
redis-cli \
|
||||
--cluster-yes \
|
||||
--cluster create \
|
||||
10.33.33.2:4218 10.33.33.3:4218 10.33.33.4:4218 \
|
||||
10.33.33.5:4218 10.33.33.6:4218 10.33.33.7:4218 \
|
||||
--cluster-replicas 1 \
|
||||
$ARGS
|
||||
fi
|
||||
fi
|
||||
|
||||
# Hack: run redis-server so we have something listening on a port.
|
||||
# The Boulder container will wait for this port on this container to be
|
||||
# available before starting up.
|
||||
echo "Starting a server so everything knows we're done."
|
||||
exec redis-server /test/redis.config
|
Loading…
Reference in New Issue