Remove direct usages of go-statsd-client in favor of using metrics.Scope (#2136)
Fixes #2118, fixes #2082.
This commit is contained in:
parent
a9d476ecf6
commit
c8f1fb3e2f
|
@ -14,11 +14,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -52,7 +52,7 @@ type CachePurgeClient struct {
|
|||
retries int
|
||||
retryBackoff time.Duration
|
||||
log blog.Logger
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
clk clock.Clock
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,9 @@ func NewCachePurgeClient(
|
|||
retries int,
|
||||
retryBackoff time.Duration,
|
||||
log blog.Logger,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
) (*CachePurgeClient, error) {
|
||||
stats = stats.NewScope("CCU")
|
||||
if strings.HasSuffix(endpoint, "/") {
|
||||
endpoint = endpoint[:len(endpoint)-1]
|
||||
}
|
||||
|
@ -181,7 +182,7 @@ func (cpc *CachePurgeClient) purge(urls []string) error {
|
|||
|
||||
rS := cpc.clk.Now()
|
||||
resp, err := cpc.client.Do(req)
|
||||
cpc.stats.TimingDuration("CCU.PurgeRequestLatency", time.Since(rS), 1.0)
|
||||
cpc.stats.TimingDuration("PurgeRequestLatency", time.Since(rS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -229,10 +230,10 @@ func (cpc *CachePurgeClient) Purge(urls []string) error {
|
|||
if err != nil {
|
||||
if _, ok := err.(errFatal); ok {
|
||||
cpc.log.AuditErr(err.Error())
|
||||
cpc.stats.Inc("CCU.FatalFailures", 1, 1.0)
|
||||
cpc.stats.Inc("FatalFailures", 1)
|
||||
return err
|
||||
}
|
||||
cpc.stats.Inc("CCU.RetryableFailures", 1, 1.0)
|
||||
cpc.stats.Inc("RetryableFailures", 1)
|
||||
continue
|
||||
}
|
||||
successful = true
|
||||
|
@ -240,10 +241,10 @@ func (cpc *CachePurgeClient) Purge(urls []string) error {
|
|||
}
|
||||
|
||||
if !successful {
|
||||
cpc.stats.Inc("CCU.FatalFailures", 1, 1.0)
|
||||
cpc.stats.Inc("FatalFailures", 1)
|
||||
return ErrAllRetriesFailed
|
||||
}
|
||||
|
||||
cpc.stats.Inc("CCU.SuccessfulPurges", 1, 1.0)
|
||||
cpc.stats.Inc("SuccessfulPurges", 1)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,14 +7,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
)
|
||||
|
||||
func TestConstructAuthHeader(t *testing.T) {
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
cpc, err := NewCachePurgeClient(
|
||||
"https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net",
|
||||
"akab-client-token-xxx-xxxxxxxxxxxxxxxx",
|
||||
|
|
|
@ -184,6 +184,7 @@ func NewDNSResolverImpl(
|
|||
clk clock.Clock,
|
||||
maxTries int,
|
||||
) *DNSResolverImpl {
|
||||
stats = stats.NewScope("DNS")
|
||||
// TODO(jmhodges): make constructor use an Option func pattern
|
||||
dnsClient := new(dns.Client)
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
|
@ -220,8 +219,7 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func newTestStats() metrics.Scope {
|
||||
c, _ := statsd.NewNoopClient()
|
||||
return metrics.NewStatsdScope(c, "fakesvc")
|
||||
return metrics.NewNoopScope()
|
||||
}
|
||||
|
||||
var testStats = newTestStats()
|
||||
|
|
28
ca/ca.go
28
ca/ca.go
|
@ -17,7 +17,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
cfsslConfig "github.com/cloudflare/cfssl/config"
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/ocsp"
|
||||
|
@ -32,6 +31,7 @@ import (
|
|||
csrlib "github.com/letsencrypt/boulder/csr"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
)
|
||||
|
||||
// Miscellaneous PKIX OIDs that we need to refer to
|
||||
|
@ -74,25 +74,25 @@ var (
|
|||
// Metrics for CA statistics
|
||||
const (
|
||||
// Increments when CA observes an HSM or signing error
|
||||
metricSigningError = "CA.SigningError"
|
||||
metricSigningError = "SigningError"
|
||||
metricHSMError = metricSigningError + ".HSMError"
|
||||
|
||||
// Increments when CA handles a CSR requesting a "basic" extension:
|
||||
// authorityInfoAccess, authorityKeyIdentifier, extKeyUsage, keyUsage,
|
||||
// basicConstraints, certificatePolicies, crlDistributionPoints,
|
||||
// subjectAlternativeName, subjectKeyIdentifier,
|
||||
metricCSRExtensionBasic = "CA.CSRExtensions.Basic"
|
||||
metricCSRExtensionBasic = "CSRExtensions.Basic"
|
||||
|
||||
// Increments when CA handles a CSR requesting a TLS Feature extension
|
||||
metricCSRExtensionTLSFeature = "CA.CSRExtensions.TLSFeature"
|
||||
metricCSRExtensionTLSFeature = "CSRExtensions.TLSFeature"
|
||||
|
||||
// Increments when CA handles a CSR requesting a TLS Feature extension with
|
||||
// an invalid value
|
||||
metricCSRExtensionTLSFeatureInvalid = "CA.CSRExtensions.TLSFeatureInvalid"
|
||||
metricCSRExtensionTLSFeatureInvalid = "CSRExtensions.TLSFeatureInvalid"
|
||||
|
||||
// Increments when CA handles a CSR requesting an extension other than those
|
||||
// listed above
|
||||
metricCSRExtensionOther = "CA.CSRExtensions.Other"
|
||||
metricCSRExtensionOther = "CSRExtensions.Other"
|
||||
)
|
||||
|
||||
type certificateStorage interface {
|
||||
|
@ -114,7 +114,7 @@ type CertificateAuthorityImpl struct {
|
|||
keyPolicy goodkey.KeyPolicy
|
||||
clk clock.Clock
|
||||
log blog.Logger
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
prefix int // Prepended to the serial number
|
||||
validityPeriod time.Duration
|
||||
maxNames int
|
||||
|
@ -179,7 +179,7 @@ func makeInternalIssuers(
|
|||
func NewCertificateAuthorityImpl(
|
||||
config cmd.CAConfig,
|
||||
clk clock.Clock,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
issuers []Issuer,
|
||||
keyPolicy goodkey.KeyPolicy,
|
||||
logger blog.Logger,
|
||||
|
@ -255,9 +255,9 @@ func NewCertificateAuthorityImpl(
|
|||
func (ca *CertificateAuthorityImpl) noteSignError(err error) {
|
||||
if err != nil {
|
||||
if _, ok := err.(*pkcs11.Error); ok {
|
||||
ca.stats.Inc(metricHSMError, 1, 1.0)
|
||||
ca.stats.Inc(metricHSMError, 1)
|
||||
} else if cfErr, ok := err.(*cferr.Error); ok {
|
||||
ca.stats.Inc(fmt.Sprintf("%s.%d", metricSigningError, cfErr.ErrorCode), 1, 1.0)
|
||||
ca.stats.Inc(fmt.Sprintf("%s.%d", metricSigningError, cfErr.ErrorCode), 1)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -292,14 +292,14 @@ func (ca *CertificateAuthorityImpl) extensionsFromCSR(csr *x509.CertificateReque
|
|||
|
||||
switch {
|
||||
case ext.Type.Equal(oidTLSFeature):
|
||||
ca.stats.Inc(metricCSRExtensionTLSFeature, 1, 1.0)
|
||||
ca.stats.Inc(metricCSRExtensionTLSFeature, 1)
|
||||
value, ok := ext.Value.([]byte)
|
||||
if !ok {
|
||||
msg := fmt.Sprintf("Malformed extension with OID %v", ext.Type)
|
||||
return nil, core.MalformedRequestError(msg)
|
||||
} else if !bytes.Equal(value, mustStapleFeatureValue) {
|
||||
msg := fmt.Sprintf("Unsupported value for extension with OID %v", ext.Type)
|
||||
ca.stats.Inc(metricCSRExtensionTLSFeatureInvalid, 1, 1.0)
|
||||
ca.stats.Inc(metricCSRExtensionTLSFeatureInvalid, 1)
|
||||
return nil, core.MalformedRequestError(msg)
|
||||
}
|
||||
|
||||
|
@ -324,11 +324,11 @@ func (ca *CertificateAuthorityImpl) extensionsFromCSR(csr *x509.CertificateReque
|
|||
}
|
||||
|
||||
if hasBasic {
|
||||
ca.stats.Inc(metricCSRExtensionBasic, 1, 1.0)
|
||||
ca.stats.Inc(metricCSRExtensionBasic, 1)
|
||||
}
|
||||
|
||||
if hasOther {
|
||||
ca.stats.Inc(metricCSRExtensionOther, 1, 1.0)
|
||||
ca.stats.Inc(metricCSRExtensionOther, 1)
|
||||
}
|
||||
|
||||
return extensions, nil
|
||||
|
|
|
@ -13,7 +13,9 @@ import (
|
|||
|
||||
cfsslConfig "github.com/cloudflare/cfssl/config"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/metrics/mock_metrics"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
|
@ -21,6 +23,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/mocks"
|
||||
"github.com/letsencrypt/boulder/policy"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
|
@ -133,7 +136,7 @@ type testCtx struct {
|
|||
issuers []Issuer
|
||||
keyPolicy goodkey.KeyPolicy
|
||||
fc clock.FakeClock
|
||||
stats *mocks.Statter
|
||||
stats metrics.Scope
|
||||
logger blog.Logger
|
||||
}
|
||||
|
||||
|
@ -236,8 +239,6 @@ func setup(t *testing.T) *testCtx {
|
|||
},
|
||||
}
|
||||
|
||||
stats := mocks.NewStatter()
|
||||
|
||||
issuers := []Issuer{{caKey, caCert}}
|
||||
|
||||
keyPolicy := goodkey.KeyPolicy{
|
||||
|
@ -254,7 +255,7 @@ func setup(t *testing.T) *testCtx {
|
|||
issuers,
|
||||
keyPolicy,
|
||||
fc,
|
||||
stats,
|
||||
metrics.NewNoopScope(),
|
||||
logger,
|
||||
}
|
||||
}
|
||||
|
@ -684,10 +685,15 @@ func countMustStaple(t *testing.T, cert *x509.Certificate) (count int) {
|
|||
func TestExtensions(t *testing.T) {
|
||||
testCtx := setup(t)
|
||||
testCtx.caConfig.MaxNames = 3
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
stats := mock_metrics.NewMockScope(ctrl)
|
||||
|
||||
ca, err := NewCertificateAuthorityImpl(
|
||||
testCtx.caConfig,
|
||||
testCtx.fc,
|
||||
testCtx.stats,
|
||||
stats,
|
||||
testCtx.issuers,
|
||||
testCtx.keyPolicy,
|
||||
testCtx.logger)
|
||||
|
@ -717,36 +723,34 @@ func TestExtensions(t *testing.T) {
|
|||
|
||||
// With enableMustStaple = false, should issue successfully and not add
|
||||
// Must Staple.
|
||||
stats.EXPECT().Inc(metricCSRExtensionTLSFeature, int64(1)).Return(nil)
|
||||
noStapleCert := sign(mustStapleCSR)
|
||||
test.AssertEquals(t, countMustStaple(t, noStapleCert), 0)
|
||||
|
||||
// With enableMustStaple = true, a TLS feature extension should put a must-staple
|
||||
// extension into the cert
|
||||
ca.enableMustStaple = true
|
||||
stats.EXPECT().Inc(metricCSRExtensionTLSFeature, int64(1)).Return(nil)
|
||||
singleStapleCert := sign(mustStapleCSR)
|
||||
test.AssertEquals(t, countMustStaple(t, singleStapleCert), 1)
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionTLSFeature], int64(2))
|
||||
|
||||
// Even if there are multiple TLS Feature extensions, only one extension should be included
|
||||
stats.EXPECT().Inc(metricCSRExtensionTLSFeature, int64(1)).Return(nil)
|
||||
duplicateMustStapleCert := sign(duplicateMustStapleCSR)
|
||||
test.AssertEquals(t, countMustStaple(t, duplicateMustStapleCert), 1)
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionTLSFeature], int64(3))
|
||||
|
||||
// ... but if it doesn't ask for stapling, there should be an error
|
||||
stats.EXPECT().Inc(metricCSRExtensionTLSFeature, int64(1)).Return(nil)
|
||||
stats.EXPECT().Inc(metricCSRExtensionTLSFeatureInvalid, int64(1)).Return(nil)
|
||||
_, err = ca.IssueCertificate(ctx, *tlsFeatureUnknownCSR, 1001)
|
||||
test.AssertError(t, err, "Allowed a CSR with an empty TLS feature extension")
|
||||
if _, ok := err.(core.MalformedRequestError); !ok {
|
||||
t.Errorf("Wrong error type when rejecting a CSR with empty TLS feature extension")
|
||||
}
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionTLSFeature], int64(4))
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionTLSFeatureInvalid], int64(1))
|
||||
|
||||
// Unsupported extensions should be silently ignored, having the same
|
||||
// extensions as the TLS Feature cert above, minus the TLS Feature Extension
|
||||
stats.EXPECT().Inc(metricCSRExtensionOther, int64(1)).Return(nil)
|
||||
unsupportedExtensionCert := sign(unsupportedExtensionCSR)
|
||||
test.AssertEquals(t, len(unsupportedExtensionCert.Extensions), len(singleStapleCert.Extensions)-1)
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionOther], int64(1))
|
||||
|
||||
// None of the above CSRs have basic extensions
|
||||
test.AssertEquals(t, testCtx.stats.Counters[metricCSRExtensionBasic], int64(0))
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
gorp "gopkg.in/gorp.v1"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
|
@ -55,23 +54,24 @@ type config struct {
|
|||
Syslog cmd.SyslogConfig
|
||||
}
|
||||
|
||||
func setupContext(c config) (rpc.RegistrationAuthorityClient, blog.Logger, *gorp.DbMap, rpc.StorageAuthorityClient, statsd.Statter) {
|
||||
func setupContext(c config) (rpc.RegistrationAuthorityClient, blog.Logger, *gorp.DbMap, rpc.StorageAuthorityClient, metrics.Scope) {
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "AdminRevoker")
|
||||
|
||||
amqpConf := c.Revoker.AMQP
|
||||
rac, err := rpc.NewRegistrationAuthorityClient(clientName, amqpConf, stats)
|
||||
rac, err := rpc.NewRegistrationAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Unable to create CA client")
|
||||
|
||||
dbURL, err := c.Revoker.DBConfig.URL()
|
||||
cmd.FailOnError(err, "Couldn't load DB URL")
|
||||
dbMap, err := sa.NewDbMap(dbURL, c.Revoker.DBConfig.MaxDBConns)
|
||||
cmd.FailOnError(err, "Couldn't setup database connection")
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "AdminRevoker"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, stats)
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Failed to create SA client")
|
||||
|
||||
return *rac, logger, dbMap, *sac, stats
|
||||
return *rac, logger, dbMap, *sac, scope
|
||||
}
|
||||
|
||||
func revokeBySerial(ctx context.Context, serial string, reasonCode revocation.Reason, rac rpc.RegistrationAuthorityClient, logger blog.Logger, tx *gorp.Transaction) (err error) {
|
||||
|
@ -224,8 +224,8 @@ func main() {
|
|||
authsRevoked,
|
||||
pendingAuthsRevoked,
|
||||
))
|
||||
stats.Inc("admin-revoker.revokedAuthorizations", authsRevoked, 1.0)
|
||||
stats.Inc("admin-revoker.revokedPendingAuthorizations", pendingAuthsRevoked, 1.0)
|
||||
stats.Inc("RevokedAuthorizations", authsRevoked)
|
||||
stats.Inc("RevokedPendingAuthorizations", pendingAuthsRevoked)
|
||||
|
||||
default:
|
||||
usage()
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/policy"
|
||||
pubPB "github.com/letsencrypt/boulder/publisher/proto"
|
||||
"github.com/letsencrypt/boulder/rpc"
|
||||
|
@ -132,6 +133,7 @@ func main() {
|
|||
go cmd.DebugServer(c.CA.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "CA")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
|
@ -152,17 +154,17 @@ func main() {
|
|||
cai, err := ca.NewCertificateAuthorityImpl(
|
||||
c.CA,
|
||||
clock.Default(),
|
||||
stats,
|
||||
scope,
|
||||
issuers,
|
||||
goodkey.NewKeyPolicy(),
|
||||
logger)
|
||||
cmd.FailOnError(err, "Failed to create CA impl")
|
||||
cai.PA = pa
|
||||
|
||||
go cmd.ProfileCmd("CA", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
amqpConf := c.CA.AMQP
|
||||
cai.SA, err = rpc.NewStorageAuthorityClient(clientName, amqpConf, stats)
|
||||
cai.SA, err = rpc.NewStorageAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Failed to create SA client")
|
||||
|
||||
if c.CA.PublisherService != nil {
|
||||
|
@ -170,11 +172,11 @@ func main() {
|
|||
cmd.FailOnError(err, "Failed to load credentials and create connection to service")
|
||||
cai.Publisher = bgrpc.NewPublisherClientWrapper(pubPB.NewPublisherClient(conn), c.CA.PublisherService.Timeout.Duration)
|
||||
} else {
|
||||
cai.Publisher, err = rpc.NewPublisherClient(clientName, amqpConf, stats)
|
||||
cai.Publisher, err = rpc.NewPublisherClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Failed to create Publisher client")
|
||||
}
|
||||
|
||||
cas, err := rpc.NewAmqpRPCServer(amqpConf, c.CA.MaxConcurrentRPCServerRequests, stats, logger)
|
||||
cas, err := rpc.NewAmqpRPCServer(amqpConf, c.CA.MaxConcurrentRPCServerRequests, scope, logger)
|
||||
cmd.FailOnError(err, "Unable to create CA RPC server")
|
||||
err = rpc.NewCertificateAuthorityServer(cas, cai)
|
||||
cmd.FailOnError(err, "Failed to create Certificate Authority RPC server")
|
||||
|
|
|
@ -51,6 +51,7 @@ func main() {
|
|||
go cmd.DebugServer(c.Publisher.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "Publisher")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
|
@ -72,11 +73,9 @@ func main() {
|
|||
}
|
||||
|
||||
amqpConf := c.Publisher.AMQP
|
||||
sa, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, stats)
|
||||
sa, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Unable to create SA client")
|
||||
|
||||
scope := metrics.NewStatsdScope(stats, "Publisher")
|
||||
|
||||
pubi := publisher.New(
|
||||
bundle,
|
||||
logs,
|
||||
|
@ -85,7 +84,7 @@ func main() {
|
|||
scope,
|
||||
sa)
|
||||
|
||||
go cmd.ProfileCmd("Publisher", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
if c.Publisher.GRPC != nil {
|
||||
s, l, err := bgrpc.NewServer(c.Publisher.GRPC, scope)
|
||||
|
@ -98,7 +97,7 @@ func main() {
|
|||
}()
|
||||
}
|
||||
|
||||
pubs, err := rpc.NewAmqpRPCServer(amqpConf, c.Publisher.MaxConcurrentRPCServerRequests, stats, logger)
|
||||
pubs, err := rpc.NewAmqpRPCServer(amqpConf, c.Publisher.MaxConcurrentRPCServerRequests, scope, logger)
|
||||
cmd.FailOnError(err, "Unable to create Publisher RPC server")
|
||||
err = rpc.NewPublisherServer(pubs, pubi)
|
||||
cmd.FailOnError(err, "Unable to setup Publisher RPC server")
|
||||
|
|
|
@ -92,6 +92,7 @@ func main() {
|
|||
go cmd.DebugServer(c.RA.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "RA")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
|
@ -107,7 +108,7 @@ func main() {
|
|||
err = pa.SetHostnamePolicyFile(c.RA.HostnamePolicyFile)
|
||||
cmd.FailOnError(err, "Couldn't load hostname policy file")
|
||||
|
||||
go cmd.ProfileCmd("RA", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
amqpConf := c.RA.AMQP
|
||||
var vac core.ValidationAuthority
|
||||
|
@ -116,14 +117,14 @@ func main() {
|
|||
cmd.FailOnError(err, "Unable to create VA client")
|
||||
vac = bgrpc.NewValidationAuthorityGRPCClient(conn)
|
||||
} else {
|
||||
vac, err = rpc.NewValidationAuthorityClient(clientName, amqpConf, stats)
|
||||
vac, err = rpc.NewValidationAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Unable to create VA client")
|
||||
}
|
||||
|
||||
cac, err := rpc.NewCertificateAuthorityClient(clientName, amqpConf, stats)
|
||||
cac, err := rpc.NewCertificateAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Unable to create CA client")
|
||||
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, stats)
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Unable to create SA client")
|
||||
|
||||
// TODO(patf): remove once RA.authorizationLifetimeDays is deployed
|
||||
|
@ -141,7 +142,7 @@ func main() {
|
|||
rai := ra.NewRegistrationAuthorityImpl(
|
||||
clock.Default(),
|
||||
logger,
|
||||
stats,
|
||||
scope,
|
||||
c.RA.MaxContactsPerRegistration,
|
||||
goodkey.NewKeyPolicy(),
|
||||
c.RA.MaxNames,
|
||||
|
@ -156,7 +157,6 @@ func main() {
|
|||
|
||||
raDNSTimeout, err := time.ParseDuration(c.Common.DNSTimeout)
|
||||
cmd.FailOnError(err, "Couldn't parse RA DNS timeout")
|
||||
scoped := metrics.NewStatsdScope(stats, "RA", "DNS")
|
||||
dnsTries := c.RA.DNSTries
|
||||
if dnsTries < 1 {
|
||||
dnsTries = 1
|
||||
|
@ -166,14 +166,14 @@ func main() {
|
|||
raDNSTimeout,
|
||||
[]string{c.Common.DNSResolver},
|
||||
nil,
|
||||
scoped,
|
||||
scope,
|
||||
clock.Default(),
|
||||
dnsTries)
|
||||
} else {
|
||||
rai.DNSResolver = bdns.NewTestDNSResolverImpl(
|
||||
raDNSTimeout,
|
||||
[]string{c.Common.DNSResolver},
|
||||
scoped,
|
||||
scope,
|
||||
clock.Default(),
|
||||
dnsTries)
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func main() {
|
|||
rai.CA = cac
|
||||
rai.SA = sac
|
||||
|
||||
ras, err := rpc.NewAmqpRPCServer(amqpConf, c.RA.MaxConcurrentRPCServerRequests, stats, logger)
|
||||
ras, err := rpc.NewAmqpRPCServer(amqpConf, c.RA.MaxConcurrentRPCServerRequests, scope, logger)
|
||||
cmd.FailOnError(err, "Unable to create RA RPC server")
|
||||
err = rpc.NewRegistrationAuthorityServer(ras, rai, logger)
|
||||
cmd.FailOnError(err, "Unable to setup RA RPC server")
|
||||
|
|
|
@ -42,6 +42,7 @@ func main() {
|
|||
go cmd.DebugServer(c.SA.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "SA")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
|
@ -53,15 +54,15 @@ func main() {
|
|||
dbMap, err := sa.NewDbMap(dbURL, saConf.DBConfig.MaxDBConns)
|
||||
cmd.FailOnError(err, "Couldn't connect to SA database")
|
||||
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "SA"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
sai, err := sa.NewSQLStorageAuthority(dbMap, clock.Default(), logger)
|
||||
cmd.FailOnError(err, "Failed to create SA impl")
|
||||
|
||||
go cmd.ProfileCmd("SA", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
amqpConf := saConf.AMQP
|
||||
sas, err := rpc.NewAmqpRPCServer(amqpConf, c.SA.MaxConcurrentRPCServerRequests, stats, logger)
|
||||
sas, err := rpc.NewAmqpRPCServer(amqpConf, c.SA.MaxConcurrentRPCServerRequests, scope, logger)
|
||||
cmd.FailOnError(err, "Unable to create SA RPC server")
|
||||
|
||||
err = rpc.NewStorageAuthorityServer(sas, sai)
|
||||
|
|
|
@ -74,10 +74,11 @@ func main() {
|
|||
go cmd.DebugServer(c.VA.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "VA")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
go cmd.ProfileCmd("VA", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
pc := &cmd.PortConfig{
|
||||
HTTPPort: 80,
|
||||
|
@ -101,14 +102,13 @@ func main() {
|
|||
caaClient = caaPB.NewCAACheckerClient(conn)
|
||||
}
|
||||
|
||||
scoped := metrics.NewStatsdScope(stats, "VA", "DNS")
|
||||
sbc := newGoogleSafeBrowsing(c.VA.GoogleSafeBrowsing)
|
||||
|
||||
var cdrClient *cdr.CAADistributedResolver
|
||||
if c.VA.CAADistributedResolver != nil {
|
||||
var err error
|
||||
cdrClient, err = cdr.New(
|
||||
scoped,
|
||||
scope,
|
||||
c.VA.CAADistributedResolver.Timeout.Duration,
|
||||
c.VA.CAADistributedResolver.MaxFailures,
|
||||
c.VA.CAADistributedResolver.Proxies,
|
||||
|
@ -131,13 +131,13 @@ func main() {
|
|||
dnsTimeout,
|
||||
[]string{c.Common.DNSResolver},
|
||||
caaSERVFAILExceptions,
|
||||
scoped,
|
||||
scope,
|
||||
clk,
|
||||
dnsTries)
|
||||
r.LookupIPv6 = c.VA.LookupIPv6
|
||||
resolver = r
|
||||
} else {
|
||||
r := bdns.NewTestDNSResolverImpl(dnsTimeout, []string{c.Common.DNSResolver}, scoped, clk, dnsTries)
|
||||
r := bdns.NewTestDNSResolverImpl(dnsTimeout, []string{c.Common.DNSResolver}, scope, clk, dnsTries)
|
||||
r.LookupIPv6 = c.VA.LookupIPv6
|
||||
resolver = r
|
||||
}
|
||||
|
@ -150,13 +150,13 @@ func main() {
|
|||
resolver,
|
||||
c.VA.UserAgent,
|
||||
c.VA.IssuerDomain,
|
||||
stats,
|
||||
scope,
|
||||
clk,
|
||||
logger)
|
||||
|
||||
amqpConf := c.VA.AMQP
|
||||
if c.VA.GRPC != nil {
|
||||
s, l, err := bgrpc.NewServer(c.VA.GRPC, metrics.NewStatsdScope(stats, "VA"))
|
||||
s, l, err := bgrpc.NewServer(c.VA.GRPC, scope)
|
||||
cmd.FailOnError(err, "Unable to setup VA gRPC server")
|
||||
err = bgrpc.RegisterValidationAuthorityGRPCServer(s, vai)
|
||||
cmd.FailOnError(err, "Unable to register VA gRPC server")
|
||||
|
@ -166,7 +166,7 @@ func main() {
|
|||
}()
|
||||
}
|
||||
|
||||
vas, err := rpc.NewAmqpRPCServer(amqpConf, c.VA.MaxConcurrentRPCServerRequests, stats, logger)
|
||||
vas, err := rpc.NewAmqpRPCServer(amqpConf, c.VA.MaxConcurrentRPCServerRequests, scope, logger)
|
||||
cmd.FailOnError(err, "Unable to create VA RPC server")
|
||||
err = rpc.NewValidationAuthorityServer(vas, vai)
|
||||
cmd.FailOnError(err, "Unable to setup VA RPC server")
|
||||
|
|
|
@ -54,7 +54,7 @@ type config struct {
|
|||
}
|
||||
}
|
||||
|
||||
func setupWFE(c config, logger blog.Logger, stats metrics.Statter) (*rpc.RegistrationAuthorityClient, *rpc.StorageAuthorityClient) {
|
||||
func setupWFE(c config, logger blog.Logger, stats metrics.Scope) (*rpc.RegistrationAuthorityClient, *rpc.StorageAuthorityClient) {
|
||||
amqpConf := c.WFE.AMQP
|
||||
rac, err := rpc.NewRegistrationAuthorityClient(clientName, amqpConf, stats)
|
||||
cmd.FailOnError(err, "Unable to create RA client")
|
||||
|
@ -80,12 +80,13 @@ func main() {
|
|||
go cmd.DebugServer(c.WFE.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "WFE")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString(clientName))
|
||||
|
||||
wfe, err := wfe.NewWebFrontEndImpl(stats, clock.Default(), goodkey.NewKeyPolicy(), logger)
|
||||
wfe, err := wfe.NewWebFrontEndImpl(scope, clock.Default(), goodkey.NewKeyPolicy(), logger)
|
||||
cmd.FailOnError(err, "Unable to create WFE")
|
||||
rac, sac := setupWFE(c, logger, stats)
|
||||
rac, sac := setupWFE(c, logger, scope)
|
||||
wfe.RA = rac
|
||||
wfe.SA = sac
|
||||
|
||||
|
@ -111,14 +112,14 @@ func main() {
|
|||
|
||||
logger.Info(fmt.Sprintf("WFE using key policy: %#v", goodkey.NewKeyPolicy()))
|
||||
|
||||
go cmd.ProfileCmd("WFE", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
// Set up paths
|
||||
wfe.BaseURL = c.Common.BaseURL
|
||||
h, err := wfe.Handler()
|
||||
cmd.FailOnError(err, "Problem setting up HTTP handlers")
|
||||
|
||||
httpMonitor := metrics.NewHTTPMonitor(stats, h, "WFE")
|
||||
httpMonitor := metrics.NewHTTPMonitor(scope, h)
|
||||
|
||||
logger.Info(fmt.Sprintf("Server running, listening on %s...\n", c.WFE.ListenAddress))
|
||||
srv := &http.Server{
|
||||
|
@ -129,7 +130,7 @@ func main() {
|
|||
hd := &httpdown.HTTP{
|
||||
StopTimeout: c.WFE.ShutdownStopTimeout.Duration,
|
||||
KillTimeout: c.WFE.ShutdownKillTimeout.Duration,
|
||||
Stats: metrics.NewFBAdapter(stats, "WFE", clock.Default()),
|
||||
Stats: metrics.NewFBAdapter(scope, clock.Default()),
|
||||
}
|
||||
err = httpdown.ListenAndServe(srv, hd)
|
||||
cmd.FailOnError(err, "Error starting HTTP server")
|
||||
|
|
|
@ -230,7 +230,7 @@ func main() {
|
|||
|
||||
stats, err := statsd.NewClient(c.StatsdServer, c.StatsdPrefix)
|
||||
cmd.FailOnError(err, "Failed to create StatsD client")
|
||||
scope := metrics.NewStatsdScope(stats, "caa-service")
|
||||
scope := metrics.NewStatsdScope(stats, "CAAService")
|
||||
|
||||
caaSERVFAILExceptions, err := bdns.ReadHostList(c.CAASERVFAILExceptions)
|
||||
cmd.FailOnError(err, "Couldn't read CAASERVFAILExceptions file")
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"gopkg.in/gorp.v1"
|
||||
|
||||
|
@ -44,7 +43,7 @@ type regStore interface {
|
|||
}
|
||||
|
||||
type mailer struct {
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
log blog.Logger
|
||||
dbMap *gorp.DbMap
|
||||
rs regStore
|
||||
|
@ -107,7 +106,7 @@ func (m *mailer) sendNags(contacts []string, certs []*x509.Certificate) error {
|
|||
msgBuf := new(bytes.Buffer)
|
||||
err := m.emailTemplate.Execute(msgBuf, email)
|
||||
if err != nil {
|
||||
m.stats.Inc("Mailer.Expiration.Errors.SendingNag.TemplateFailure", 1, 1.0)
|
||||
m.stats.Inc("Errors.SendingNag.TemplateFailure", 1)
|
||||
return err
|
||||
}
|
||||
startSending := m.clk.Now()
|
||||
|
@ -117,7 +116,7 @@ func (m *mailer) sendNags(contacts []string, certs []*x509.Certificate) error {
|
|||
}
|
||||
finishSending := m.clk.Now()
|
||||
elapsed := finishSending.Sub(startSending)
|
||||
m.stats.TimingDuration("Mailer.Expiration.SendLatency", elapsed, 1.0)
|
||||
m.stats.TimingDuration("SendLatency", elapsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -160,7 +159,7 @@ func (m *mailer) processCerts(allCerts []core.Certificate) {
|
|||
reg, err := m.rs.GetRegistration(ctx, regID)
|
||||
if err != nil {
|
||||
m.log.AuditErr(fmt.Sprintf("Error fetching registration %d: %s", regID, err))
|
||||
m.stats.Inc("Mailer.Expiration.Errors.GetRegistration", 1, 1.0)
|
||||
m.stats.Inc("Errors.GetRegistration", 1)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -170,7 +169,7 @@ func (m *mailer) processCerts(allCerts []core.Certificate) {
|
|||
if err != nil {
|
||||
// TODO(#1420): tell registration about this error
|
||||
m.log.AuditErr(fmt.Sprintf("Error parsing certificate %s: %s", cert.Serial, err))
|
||||
m.stats.Inc("Mailer.Expiration.Errors.ParseCertificate", 1, 1.0)
|
||||
m.stats.Inc("Errors.ParseCertificate", 1)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -179,10 +178,10 @@ func (m *mailer) processCerts(allCerts []core.Certificate) {
|
|||
m.log.AuditErr(fmt.Sprintf("expiration-mailer: error fetching renewal state: %v", err))
|
||||
// assume not renewed
|
||||
} else if renewed {
|
||||
m.stats.Inc("Mailer.Expiration.Renewed", 1, 1.0)
|
||||
m.stats.Inc("Renewed", 1)
|
||||
if err := m.updateCertStatus(cert.Serial); err != nil {
|
||||
m.log.AuditErr(fmt.Sprintf("Error updating certificate status for %s: %s", cert.Serial, err))
|
||||
m.stats.Inc("Mailer.Expiration.Errors.UpdateCertificateStatus", 1, 1.0)
|
||||
m.stats.Inc("Errors.UpdateCertificateStatus", 1)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -209,7 +208,7 @@ func (m *mailer) processCerts(allCerts []core.Certificate) {
|
|||
err = m.updateCertStatus(serial)
|
||||
if err != nil {
|
||||
m.log.AuditErr(fmt.Sprintf("Error updating certificate status for %s: %s", serial, err))
|
||||
m.stats.Inc("Mailer.Expiration.Errors.UpdateCertificateStatus", 1, 1.0)
|
||||
m.stats.Inc("Errors.UpdateCertificateStatus", 1)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -271,15 +270,15 @@ func (m *mailer) findExpiringCertificates() error {
|
|||
"nag group %s expiring certificates at configured capacity (cert limit %d)\n",
|
||||
expiresIn.String(),
|
||||
m.limit))
|
||||
statName := fmt.Sprintf("Mailer.Expiration.Errors.Nag-%s.AtCapacity", expiresIn.String())
|
||||
m.stats.Inc(statName, 1, 1.0)
|
||||
statName := fmt.Sprintf("Errors.Nag-%s.AtCapacity", expiresIn.String())
|
||||
m.stats.Inc(statName, 1)
|
||||
}
|
||||
|
||||
processingStarted := m.clk.Now()
|
||||
m.processCerts(certs)
|
||||
processingEnded := m.clk.Now()
|
||||
elapsed := processingEnded.Sub(processingStarted)
|
||||
m.stats.TimingDuration("Mailer.Expiration.ProcessingCertificatesLatency", elapsed, 1.0)
|
||||
m.stats.TimingDuration("ProcessingCertificatesLatency", elapsed)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -299,7 +298,7 @@ func (ds durationSlice) Swap(a, b int) {
|
|||
ds[a], ds[b] = ds[b], ds[a]
|
||||
}
|
||||
|
||||
const clientName = "ExpirationMailer"
|
||||
const clientName = "Expiration"
|
||||
|
||||
type config struct {
|
||||
Mailer struct {
|
||||
|
@ -345,6 +344,7 @@ func main() {
|
|||
go cmd.DebugServer(c.Mailer.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "Expiration")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(clientName)
|
||||
|
||||
|
@ -362,10 +362,10 @@ func main() {
|
|||
dbMap, err := sa.NewDbMap(dbURL, c.Mailer.DBConfig.MaxDBConns)
|
||||
sa.SetSQLDebug(dbMap, logger)
|
||||
cmd.FailOnError(err, "Could not connect to database")
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "ExpirationMailer"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
amqpConf := c.Mailer.AMQP
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, stats)
|
||||
sac, err := rpc.NewStorageAuthorityClient(clientName, amqpConf, scope)
|
||||
cmd.FailOnError(err, "Failed to create SA client")
|
||||
|
||||
// Load email template
|
||||
|
@ -386,7 +386,7 @@ func main() {
|
|||
smtpPassword,
|
||||
*fromAddress,
|
||||
logger,
|
||||
stats,
|
||||
scope,
|
||||
*reconnBase,
|
||||
*reconnMax)
|
||||
err = mailClient.Connect()
|
||||
|
@ -421,7 +421,7 @@ func main() {
|
|||
subject = c.Mailer.Subject
|
||||
}
|
||||
m := mailer{
|
||||
stats: stats,
|
||||
stats: scope,
|
||||
subject: subject,
|
||||
log: logger,
|
||||
dbMap: dbMap,
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/square/go-jose"
|
||||
|
@ -101,7 +100,7 @@ var (
|
|||
)
|
||||
|
||||
func TestSendNags(t *testing.T) {
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
mc := mocks.Mailer{}
|
||||
rs := newFakeRegStore()
|
||||
fc := newFakeClock(t)
|
||||
|
@ -422,7 +421,8 @@ func TestFindCertsAtCapacity(t *testing.T) {
|
|||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
statter := metrics.NewMockStatter(ctrl)
|
||||
testCtx.m.stats = statter
|
||||
stats := metrics.NewStatsdScope(statter, "Expiration")
|
||||
testCtx.m.stats = stats
|
||||
|
||||
// Set the limit to 1 so we are "at capacity" with one result
|
||||
testCtx.m.limit = 1
|
||||
|
@ -431,14 +431,14 @@ func TestFindCertsAtCapacity(t *testing.T) {
|
|||
// Note: this is not the 24h0m0s nag as you would expect sending time.Hour
|
||||
// * 24 to setup() for the nag duration. This is because all of the nags are
|
||||
// offset by defaultNagCheckInterval, which is 24hrs.
|
||||
statter.EXPECT().Inc("Mailer.Expiration.Errors.Nag-48h0m0s.AtCapacity",
|
||||
statter.EXPECT().Inc("Expiration.Errors.Nag-48h0m0s.AtCapacity",
|
||||
int64(1), float32(1.0))
|
||||
|
||||
// findExpiringCertificates() ends up invoking sendNags which calls
|
||||
// TimingDuration so we need to EXPECT that with the mock
|
||||
statter.EXPECT().TimingDuration("Mailer.Expiration.SendLatency", time.Duration(0), float32(1.0))
|
||||
statter.EXPECT().TimingDuration("Expiration.SendLatency", time.Duration(0), float32(1.0))
|
||||
// Similarly, findExpiringCerticates() sends its latency as well
|
||||
statter.EXPECT().TimingDuration("Mailer.Expiration.ProcessingCertificatesLatency", time.Duration(0), float32(1.0))
|
||||
statter.EXPECT().TimingDuration("Expiration.ProcessingCertificatesLatency", time.Duration(0), float32(1.0))
|
||||
|
||||
err := testCtx.m.findExpiringCertificates()
|
||||
test.AssertNotError(t, err, "Failed to find expiring certs")
|
||||
|
@ -855,7 +855,7 @@ func setup(t *testing.T, nagTimes []time.Duration) *testCtx {
|
|||
}
|
||||
cleanUp := test.ResetSATestDatabase(t)
|
||||
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
mc := &mocks.Mailer{}
|
||||
|
||||
offsetNags := make([]time.Duration, len(nagTimes))
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"gopkg.in/gorp.v1"
|
||||
|
||||
|
@ -33,7 +32,7 @@ type eapConfig struct {
|
|||
}
|
||||
|
||||
type expiredAuthzPurger struct {
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
db *gorp.DbMap
|
||||
|
@ -85,7 +84,7 @@ func (p *expiredAuthzPurger) purgeAuthzs(purgeBefore time.Time, yes bool) (int64
|
|||
return rowsAffected, err
|
||||
}
|
||||
|
||||
p.stats.Inc("PendingAuthzDeleted", rows, 1.0)
|
||||
p.stats.Inc("PendingAuthzDeleted", rows)
|
||||
rowsAffected += rows
|
||||
p.log.Info(fmt.Sprintf("Progress: Deleted %d (%d total) expired pending authorizations", rows, rowsAffected))
|
||||
|
||||
|
@ -113,6 +112,7 @@ func main() {
|
|||
|
||||
// Set up logging
|
||||
stats, auditlogger := cmd.StatsAndLogging(config.ExpiredAuthzPurger.Statsd, config.ExpiredAuthzPurger.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "AuthzPurger")
|
||||
auditlogger.Info(cmd.Version())
|
||||
|
||||
defer auditlogger.AuditPanic()
|
||||
|
@ -122,10 +122,10 @@ func main() {
|
|||
cmd.FailOnError(err, "Couldn't load DB URL")
|
||||
dbMap, err := sa.NewDbMap(dbURL, config.ExpiredAuthzPurger.DBConfig.MaxDBConns)
|
||||
cmd.FailOnError(err, "Could not connect to database")
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "AuthzPurger"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
purger := &expiredAuthzPurger{
|
||||
stats: stats,
|
||||
stats: scope,
|
||||
log: auditlogger,
|
||||
clk: cmd.Clock(),
|
||||
db: dbMap,
|
||||
|
|
|
@ -4,12 +4,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/sa"
|
||||
"github.com/letsencrypt/boulder/sa/satest"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
|
@ -30,7 +30,7 @@ func TestPurgeAuthzs(t *testing.T) {
|
|||
}
|
||||
cleanUp := test.ResetSATestDatabase(t)
|
||||
defer cleanUp()
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
|
||||
p := expiredAuthzPurger{stats, log, fc, dbMap, 1}
|
||||
|
||||
|
|
|
@ -300,13 +300,14 @@ func main() {
|
|||
cmd.FailOnError(err, "Unmarshaling config")
|
||||
|
||||
stats, log := cmd.StatsAndLogging(cfg.Statsd, cfg.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "NotificationMailer")
|
||||
defer log.AuditPanic()
|
||||
|
||||
dbURL, err := cfg.NotifyMailer.DBConfig.URL()
|
||||
cmd.FailOnError(err, "Couldn't load DB URL")
|
||||
dbMap, err := sa.NewDbMap(dbURL, 10)
|
||||
cmd.FailOnError(err, "Could not connect to database")
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "NotificationMailer"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
// Load email body
|
||||
body, err := ioutil.ReadFile(*bodyFile)
|
||||
|
@ -336,7 +337,7 @@ func main() {
|
|||
smtpPassword,
|
||||
*address,
|
||||
log,
|
||||
stats,
|
||||
scope,
|
||||
*reconnBase,
|
||||
*reconnMax)
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
cfocsp "github.com/cloudflare/cfssl/ocsp"
|
||||
"github.com/facebookgo/httpdown"
|
||||
"github.com/jmhodges/clock"
|
||||
|
@ -166,10 +165,11 @@ func main() {
|
|||
go cmd.DebugServer(c.OCSPResponder.DebugAddr)
|
||||
|
||||
stats, logger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "OCSPResponder")
|
||||
defer logger.AuditPanic()
|
||||
logger.Info(cmd.VersionString("ocsp-responder"))
|
||||
|
||||
go cmd.ProfileCmd("OCSP", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
config := c.OCSPResponder
|
||||
var source cfocsp.Source
|
||||
|
@ -196,7 +196,7 @@ func main() {
|
|||
dbMap, err := sa.NewDbMap(dbConnect, config.DBConfig.MaxDBConns)
|
||||
cmd.FailOnError(err, "Could not connect to database")
|
||||
sa.SetSQLDebug(dbMap, logger)
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "OCSPResponder"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
source, err = makeDBSource(dbMap, c.Common.IssuerCert, logger)
|
||||
cmd.FailOnError(err, "Couldn't load OCSP DB")
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func main() {
|
|||
cmd.FailOnError(err, "Couldn't parse shutdown stop timeout")
|
||||
killTimeout, err := time.ParseDuration(c.OCSPResponder.ShutdownKillTimeout)
|
||||
cmd.FailOnError(err, "Couldn't parse shutdown kill timeout")
|
||||
m := mux(stats, c.OCSPResponder.Path, source)
|
||||
m := mux(scope, c.OCSPResponder.Path, source)
|
||||
srv := &http.Server{
|
||||
Addr: c.OCSPResponder.ListenAddress,
|
||||
Handler: m,
|
||||
|
@ -214,13 +214,13 @@ func main() {
|
|||
hd := &httpdown.HTTP{
|
||||
StopTimeout: stopTimeout,
|
||||
KillTimeout: killTimeout,
|
||||
Stats: metrics.NewFBAdapter(stats, "OCSP", clock.Default()),
|
||||
Stats: metrics.NewFBAdapter(scope, clock.Default()),
|
||||
}
|
||||
err = httpdown.ListenAndServe(srv, hd)
|
||||
cmd.FailOnError(err, "Error starting HTTP server")
|
||||
}
|
||||
|
||||
func mux(stats statsd.Statter, responderPath string, source cfocsp.Source) http.Handler {
|
||||
func mux(scope metrics.Scope, responderPath string, source cfocsp.Source) http.Handler {
|
||||
m := http.StripPrefix(responderPath, cfocsp.NewResponder(source))
|
||||
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "GET" && r.URL.Path == "/" {
|
||||
|
@ -230,5 +230,5 @@ func mux(stats statsd.Statter, responderPath string, source cfocsp.Source) http.
|
|||
}
|
||||
m.ServeHTTP(w, r)
|
||||
})
|
||||
return metrics.NewHTTPMonitor(stats, h, "OCSP")
|
||||
return metrics.NewHTTPMonitor(scope, h)
|
||||
}
|
||||
|
|
|
@ -10,17 +10,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
|
||||
cfocsp "github.com/cloudflare/cfssl/ocsp"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
var (
|
||||
req = mustRead("./testdata/ocsp.req")
|
||||
resp = dbResponse{mustRead("./testdata/ocsp.resp"), time.Now()}
|
||||
stats, _ = statsd.NewNoopClient()
|
||||
req = mustRead("./testdata/ocsp.req")
|
||||
resp = dbResponse{mustRead("./testdata/ocsp.resp"), time.Now()}
|
||||
stats = metrics.NewNoopScope()
|
||||
)
|
||||
|
||||
func TestMux(t *testing.T) {
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -40,7 +39,7 @@ type ocspDB interface {
|
|||
|
||||
// OCSPUpdater contains the useful objects for the Updater
|
||||
type OCSPUpdater struct {
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
|
||||
|
@ -66,7 +65,7 @@ type OCSPUpdater struct {
|
|||
// This is somewhat gross but can be pared down a bit once the publisher and this
|
||||
// are fully smooshed together
|
||||
func newUpdater(
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
clk clock.Clock,
|
||||
dbMap ocspDB,
|
||||
ca core.CertificateAuthority,
|
||||
|
@ -105,7 +104,7 @@ func newUpdater(
|
|||
updater.loops = []*looper{
|
||||
{
|
||||
clk: clk,
|
||||
stats: stats,
|
||||
stats: stats.NewScope("NewCertificates"),
|
||||
batchSize: config.NewCertificateBatchSize,
|
||||
tickDur: config.NewCertificateWindow.Duration,
|
||||
tickFunc: updater.newCertificateTick,
|
||||
|
@ -115,7 +114,7 @@ func newUpdater(
|
|||
},
|
||||
{
|
||||
clk: clk,
|
||||
stats: stats,
|
||||
stats: stats.NewScope("OldOCSPResponses"),
|
||||
batchSize: config.OldOCSPBatchSize,
|
||||
tickDur: config.OldOCSPWindow.Duration,
|
||||
tickFunc: updater.oldOCSPResponsesTick,
|
||||
|
@ -127,7 +126,7 @@ func newUpdater(
|
|||
// failureBackoffMax as it doesn't make any calls to the CA
|
||||
{
|
||||
clk: clk,
|
||||
stats: stats,
|
||||
stats: stats.NewScope("MissingSCTReceipts"),
|
||||
batchSize: config.MissingSCTBatchSize,
|
||||
tickDur: config.MissingSCTWindow.Duration,
|
||||
tickFunc: updater.missingReceiptsTick,
|
||||
|
@ -342,7 +341,7 @@ func (updater *OCSPUpdater) newCertificateTick(ctx context.Context, batchSize in
|
|||
// OCSP responses
|
||||
statuses, err := updater.getCertificatesWithMissingResponses(batchSize)
|
||||
if err != nil {
|
||||
updater.stats.Inc("OCSP.Errors.FindMissingResponses", 1, 1.0)
|
||||
updater.stats.Inc("Errors.FindMissingResponses", 1)
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to find certificates with missing OCSP responses: %s", err))
|
||||
return err
|
||||
}
|
||||
|
@ -369,7 +368,7 @@ func (updater *OCSPUpdater) findRevokedCertificatesToUpdate(batchSize int) ([]co
|
|||
func (updater *OCSPUpdater) revokedCertificatesTick(ctx context.Context, batchSize int) error {
|
||||
statuses, err := updater.findRevokedCertificatesToUpdate(batchSize)
|
||||
if err != nil {
|
||||
updater.stats.Inc("OCSP.Errors.FindRevokedCertificates", 1, 1.0)
|
||||
updater.stats.Inc("Errors.FindRevokedCertificates", 1)
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to find revoked certificates: %s", err))
|
||||
return err
|
||||
}
|
||||
|
@ -378,12 +377,12 @@ func (updater *OCSPUpdater) revokedCertificatesTick(ctx context.Context, batchSi
|
|||
meta, err := updater.generateRevokedResponse(ctx, status)
|
||||
if err != nil {
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to generate revoked OCSP response: %s", err))
|
||||
updater.stats.Inc("OCSP.Errors.RevokedResponseGeneration", 1, 1.0)
|
||||
updater.stats.Inc("Errors.RevokedResponseGeneration", 1)
|
||||
return err
|
||||
}
|
||||
err = updater.storeResponse(meta)
|
||||
if err != nil {
|
||||
updater.stats.Inc("OCSP.Errors.StoreRevokedResponse", 1, 1.0)
|
||||
updater.stats.Inc("Errors.StoreRevokedResponse", 1)
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to store OCSP response: %s", err))
|
||||
continue
|
||||
}
|
||||
|
@ -396,17 +395,17 @@ func (updater *OCSPUpdater) generateOCSPResponses(ctx context.Context, statuses
|
|||
meta, err := updater.generateResponse(ctx, status)
|
||||
if err != nil {
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to generate OCSP response: %s", err))
|
||||
updater.stats.Inc("OCSP.Errors.ResponseGeneration", 1, 1.0)
|
||||
updater.stats.Inc("Errors.ResponseGeneration", 1)
|
||||
return err
|
||||
}
|
||||
updater.stats.Inc("OCSP.GeneratedResponses", 1, 1.0)
|
||||
updater.stats.Inc("GeneratedResponses", 1)
|
||||
err = updater.storeResponse(meta)
|
||||
if err != nil {
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to store OCSP response: %s", err))
|
||||
updater.stats.Inc("OCSP.Errors.StoreResponse", 1, 1.0)
|
||||
updater.stats.Inc("Errors.StoreResponse", 1)
|
||||
continue
|
||||
}
|
||||
updater.stats.Inc("OCSP.StoredResponses", 1, 1.0)
|
||||
updater.stats.Inc("StoredResponses", 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -417,7 +416,7 @@ func (updater *OCSPUpdater) oldOCSPResponsesTick(ctx context.Context, batchSize
|
|||
now := time.Now()
|
||||
statuses, err := updater.findStaleOCSPResponses(now.Add(-updater.ocspMinTimeToExpiry), batchSize)
|
||||
if err != nil {
|
||||
updater.stats.Inc("OCSP.Errors.FindStaleResponses", 1, 1.0)
|
||||
updater.stats.Inc("Errors.FindStaleResponses", 1)
|
||||
updater.log.AuditErr(fmt.Sprintf("Failed to find stale OCSP responses: %s", err))
|
||||
return err
|
||||
}
|
||||
|
@ -498,7 +497,7 @@ func (updater *OCSPUpdater) missingReceiptsTick(ctx context.Context, batchSize i
|
|||
|
||||
type looper struct {
|
||||
clk clock.Clock
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
batchSize int
|
||||
tickDur time.Duration
|
||||
tickFunc func(context.Context, int) error
|
||||
|
@ -512,12 +511,12 @@ func (l *looper) tick() {
|
|||
tickStart := l.clk.Now()
|
||||
ctx := context.TODO()
|
||||
err := l.tickFunc(ctx, l.batchSize)
|
||||
l.stats.TimingDuration(fmt.Sprintf("OCSP.%s.TickDuration", l.name), time.Since(tickStart), 1.0)
|
||||
l.stats.Inc(fmt.Sprintf("OCSP.%s.Ticks", l.name), 1, 1.0)
|
||||
l.stats.TimingDuration("TickDuration", time.Since(tickStart))
|
||||
l.stats.Inc("Ticks", 1)
|
||||
tickEnd := tickStart.Add(time.Since(tickStart))
|
||||
expectedTickEnd := tickStart.Add(l.tickDur)
|
||||
if tickEnd.After(expectedTickEnd) {
|
||||
l.stats.Inc(fmt.Sprintf("OCSP.%s.LongTicks", l.name), 1, 1.0)
|
||||
l.stats.Inc("LongTicks", 1)
|
||||
}
|
||||
|
||||
// After we have all the stats stuff out of the way let's check if the tick
|
||||
|
@ -525,7 +524,7 @@ func (l *looper) tick() {
|
|||
// sleepDur using the exponentially increasing duration returned by core.RetryBackoff.
|
||||
sleepDur := expectedTickEnd.Sub(tickEnd)
|
||||
if err != nil {
|
||||
l.stats.Inc(fmt.Sprintf("OCSP.%s.FailedTicks", l.name), 1, 1.0)
|
||||
l.stats.Inc("FailedTicks", 1)
|
||||
l.failures++
|
||||
sleepDur = core.RetryBackoff(l.failures, l.tickDur, l.failureBackoffMax, l.failureBackoffFactor)
|
||||
} else if l.failures > 0 {
|
||||
|
@ -564,7 +563,7 @@ type config struct {
|
|||
}
|
||||
}
|
||||
|
||||
func setupClients(c cmd.OCSPUpdaterConfig, stats metrics.Statter) (
|
||||
func setupClients(c cmd.OCSPUpdaterConfig, stats metrics.Scope) (
|
||||
core.CertificateAuthority,
|
||||
core.Publisher,
|
||||
core.StorageAuthority,
|
||||
|
@ -604,22 +603,23 @@ func main() {
|
|||
go cmd.DebugServer(conf.DebugAddr)
|
||||
|
||||
stats, auditlogger := cmd.StatsAndLogging(c.Statsd, c.Syslog)
|
||||
scope := metrics.NewStatsdScope(stats, "OCSPUpdater")
|
||||
defer auditlogger.AuditPanic()
|
||||
auditlogger.Info(cmd.VersionString(clientName))
|
||||
|
||||
go cmd.ProfileCmd("OCSP-Updater", stats)
|
||||
go cmd.ProfileCmd(scope)
|
||||
|
||||
// Configure DB
|
||||
dbURL, err := conf.DBConfig.URL()
|
||||
cmd.FailOnError(err, "Couldn't load DB URL")
|
||||
dbMap, err := sa.NewDbMap(dbURL, conf.DBConfig.MaxDBConns)
|
||||
cmd.FailOnError(err, "Could not connect to database")
|
||||
go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "OCSPUpdater"))
|
||||
go sa.ReportDbConnCount(dbMap, scope)
|
||||
|
||||
cac, pubc, sac := setupClients(conf, stats)
|
||||
cac, pubc, sac := setupClients(conf, scope)
|
||||
|
||||
updater, err := newUpdater(
|
||||
stats,
|
||||
scope,
|
||||
clock.Default(),
|
||||
dbMap,
|
||||
cac,
|
||||
|
|
|
@ -9,13 +9,13 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"gopkg.in/gorp.v1"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/revocation"
|
||||
"github.com/letsencrypt/boulder/sa"
|
||||
"github.com/letsencrypt/boulder/sa/satest"
|
||||
|
@ -72,10 +72,8 @@ func setup(t *testing.T) (*OCSPUpdater, core.StorageAuthority, *gorp.DbMap, cloc
|
|||
|
||||
cleanUp := test.ResetSATestDatabase(t)
|
||||
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
|
||||
updater, err := newUpdater(
|
||||
stats,
|
||||
metrics.NewNoopScope(),
|
||||
fc,
|
||||
dbMap,
|
||||
&mockCA{},
|
||||
|
@ -402,10 +400,9 @@ func TestStoreResponseGuard(t *testing.T) {
|
|||
|
||||
func TestLoopTickBackoff(t *testing.T) {
|
||||
fc := clock.NewFake()
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
l := looper{
|
||||
clk: fc,
|
||||
stats: stats,
|
||||
stats: metrics.NewNoopScope(),
|
||||
failureBackoffFactor: 1.5,
|
||||
failureBackoffMax: 10 * time.Minute,
|
||||
tickDur: time.Minute,
|
||||
|
|
|
@ -14,11 +14,10 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/rpc"
|
||||
)
|
||||
|
||||
|
@ -111,16 +110,17 @@ func parseLogLine(sa certificateStorage, logger blog.Logger, line string) (found
|
|||
return true, true
|
||||
}
|
||||
|
||||
func setup(configFile string) (statsd.Statter, blog.Logger, *rpc.StorageAuthorityClient) {
|
||||
func setup(configFile string) (metrics.Scope, blog.Logger, *rpc.StorageAuthorityClient) {
|
||||
configJSON, err := ioutil.ReadFile(configFile)
|
||||
cmd.FailOnError(err, "Failed to read config file")
|
||||
var conf config
|
||||
err = json.Unmarshal(configJSON, &conf)
|
||||
cmd.FailOnError(err, "Failed to parse config file")
|
||||
stats, logger := cmd.StatsAndLogging(conf.Statsd, conf.Syslog)
|
||||
sa, err := rpc.NewStorageAuthorityClient("orphan-finder", &conf.AMQP, stats)
|
||||
scope := metrics.NewStatsdScope(stats, "OrphanFinder")
|
||||
sa, err := rpc.NewStorageAuthorityClient("orphan-finder", &conf.AMQP, scope)
|
||||
cmd.FailOnError(err, "Failed to create SA client")
|
||||
return stats, logger, sa
|
||||
return scope, logger, sa
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -170,9 +170,9 @@ func main() {
|
|||
}
|
||||
}
|
||||
logger.Info(fmt.Sprintf("Found %d orphans and added %d to the database\n", orphansFound, orphansAdded))
|
||||
stats.Inc("orphaned-certificates.found", orphansFound, 1.0)
|
||||
stats.Inc("orphaned-certificates.added", orphansAdded, 1.0)
|
||||
stats.Inc("orphaned-certificates.adding-failed", orphansFound-orphansAdded, 1.0)
|
||||
stats.Inc("Found", orphansFound)
|
||||
stats.Inc("Added", orphansAdded)
|
||||
stats.Inc("AddingFailed", orphansFound-orphansAdded)
|
||||
|
||||
case "parse-der":
|
||||
ctx := context.Background()
|
||||
|
|
28
cmd/shell.go
28
cmd/shell.go
|
@ -33,9 +33,8 @@ import (
|
|||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
|
||||
cfsslLog "github.com/cloudflare/cfssl/log"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
|
@ -121,7 +120,8 @@ func FailOnError(err error, msg string) {
|
|||
}
|
||||
|
||||
// ProfileCmd runs forever, sending Go runtime statistics to StatsD.
|
||||
func ProfileCmd(profileName string, stats metrics.Statter) {
|
||||
func ProfileCmd(stats metrics.Scope) {
|
||||
stats = stats.NewScope("Gostats")
|
||||
var memoryStats runtime.MemStats
|
||||
prevNumGC := int64(0)
|
||||
c := time.Tick(1 * time.Second)
|
||||
|
@ -129,14 +129,14 @@ func ProfileCmd(profileName string, stats metrics.Statter) {
|
|||
runtime.ReadMemStats(&memoryStats)
|
||||
|
||||
// Gather goroutine count
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Goroutines", profileName), int64(runtime.NumGoroutine()), 1.0)
|
||||
stats.Gauge("Goroutines", int64(runtime.NumGoroutine()))
|
||||
|
||||
// Gather various heap metrics
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Heap.Alloc", profileName), int64(memoryStats.HeapAlloc), 1.0)
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Heap.Objects", profileName), int64(memoryStats.HeapObjects), 1.0)
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Heap.Idle", profileName), int64(memoryStats.HeapIdle), 1.0)
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Heap.InUse", profileName), int64(memoryStats.HeapInuse), 1.0)
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Heap.Released", profileName), int64(memoryStats.HeapReleased), 1.0)
|
||||
stats.Gauge("Heap.Alloc", int64(memoryStats.HeapAlloc))
|
||||
stats.Gauge("Heap.Objects", int64(memoryStats.HeapObjects))
|
||||
stats.Gauge("Heap.Idle", int64(memoryStats.HeapIdle))
|
||||
stats.Gauge("Heap.InUse", int64(memoryStats.HeapInuse))
|
||||
stats.Gauge("Heap.Released", int64(memoryStats.HeapReleased))
|
||||
|
||||
// Gather various GC related metrics
|
||||
if memoryStats.NumGC > 0 {
|
||||
|
@ -150,16 +150,16 @@ func ProfileCmd(profileName string, stats metrics.Statter) {
|
|||
}
|
||||
gcPauseAvg := totalRecentGC / uint64(realBufSize)
|
||||
lastGC := memoryStats.PauseNs[(memoryStats.NumGC+255)%256]
|
||||
stats.Timing(fmt.Sprintf("%s.Gostats.Gc.PauseAvg", profileName), int64(gcPauseAvg), 1.0)
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Gc.LastPause", profileName), int64(lastGC), 1.0)
|
||||
stats.Timing("Gc.PauseAvg", int64(gcPauseAvg))
|
||||
stats.Gauge("Gc.LastPause", int64(lastGC))
|
||||
}
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Gc.NextAt", profileName), int64(memoryStats.NextGC), 1.0)
|
||||
stats.Gauge("Gc.NextAt", int64(memoryStats.NextGC))
|
||||
// Send both a counter and a gauge here we can much more easily observe
|
||||
// the GC rate (versus the raw number of GCs) in graphing tools that don't
|
||||
// like deltas
|
||||
stats.Gauge(fmt.Sprintf("%s.Gostats.Gc.Count", profileName), int64(memoryStats.NumGC), 1.0)
|
||||
stats.Gauge("Gc.Count", int64(memoryStats.NumGC))
|
||||
gcInc := int64(memoryStats.NumGC) - prevNumGC
|
||||
stats.Inc(fmt.Sprintf("%s.Gostats.Gc.Rate", profileName), gcInc, 1.0)
|
||||
stats.Inc("Gc.Rate", gcInc)
|
||||
prevNumGC += gcInc
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
|
@ -57,7 +56,7 @@ type MailerImpl struct {
|
|||
client smtpClient
|
||||
clk clock.Clock
|
||||
csprgSource idGenerator
|
||||
stats *metrics.StatsdScope
|
||||
stats metrics.Scope
|
||||
reconnectBase time.Duration
|
||||
reconnectMax time.Duration
|
||||
}
|
||||
|
@ -113,7 +112,7 @@ func New(
|
|||
password string,
|
||||
from mail.Address,
|
||||
logger blog.Logger,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
reconnectBase time.Duration,
|
||||
reconnectMax time.Duration) *MailerImpl {
|
||||
return &MailerImpl{
|
||||
|
@ -127,7 +126,7 @@ func New(
|
|||
from: from,
|
||||
clk: clock.Default(),
|
||||
csprgSource: realSource{},
|
||||
stats: metrics.NewStatsdScope(stats, "Mailer"),
|
||||
stats: stats.NewScope("Mailer"),
|
||||
reconnectBase: reconnectBase,
|
||||
reconnectMax: reconnectMax,
|
||||
}
|
||||
|
@ -136,8 +135,7 @@ func New(
|
|||
// New constructs a Mailer suitable for doing a dry run. It simply logs each
|
||||
// command that would have been run, at debug level.
|
||||
func NewDryRun(from mail.Address, logger blog.Logger) *MailerImpl {
|
||||
statter, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewStatsdScope(statter)
|
||||
stats := metrics.NewNoopScope()
|
||||
return &MailerImpl{
|
||||
dialer: dryRunClient{logger},
|
||||
from: from,
|
||||
|
|
|
@ -11,10 +11,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
)
|
||||
|
||||
|
@ -26,7 +26,7 @@ func (f fakeSource) generate() *big.Int {
|
|||
|
||||
func TestGenerateMessage(t *testing.T) {
|
||||
fc := clock.NewFake()
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
fromAddress, _ := mail.ParseAddress("happy sender <send@email.com>")
|
||||
log := blog.UseMock()
|
||||
m := New("", "", "", "", *fromAddress, log, stats, 0, 0)
|
||||
|
@ -52,7 +52,7 @@ func TestGenerateMessage(t *testing.T) {
|
|||
|
||||
func TestFailNonASCIIAddress(t *testing.T) {
|
||||
log := blog.UseMock()
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
fromAddress, _ := mail.ParseAddress("send@email.com")
|
||||
m := New("", "", "", "", *fromAddress, log, stats, 0, 0)
|
||||
_, err := m.generateMessage([]string{"遗憾@email.com"}, "test subject", "this is the body\n")
|
||||
|
@ -160,7 +160,7 @@ func disconnectHandler(closeFirst int) connHandler {
|
|||
|
||||
func setup(t *testing.T) (*MailerImpl, net.Listener, func()) {
|
||||
const port = "16632"
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
fromAddress, _ := mail.ParseAddress("you-are-a-winner@example.com")
|
||||
log := blog.UseMock()
|
||||
|
||||
|
|
|
@ -13,65 +13,62 @@ import (
|
|||
|
||||
// HTTPMonitor stores some server state
|
||||
type HTTPMonitor struct {
|
||||
stats statsd.Statter
|
||||
statsPrefix string
|
||||
stats Scope
|
||||
handler http.Handler
|
||||
connectionsInFlight int64
|
||||
}
|
||||
|
||||
// NewHTTPMonitor returns a new initialized HTTPMonitor
|
||||
func NewHTTPMonitor(stats statsd.Statter, handler http.Handler, prefix string) *HTTPMonitor {
|
||||
func NewHTTPMonitor(stats Scope, handler http.Handler) *HTTPMonitor {
|
||||
return &HTTPMonitor{
|
||||
stats: stats,
|
||||
handler: handler,
|
||||
statsPrefix: prefix,
|
||||
connectionsInFlight: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h.stats.Inc(fmt.Sprintf("%s.HTTP.Rate", h.statsPrefix), 1, 1.0)
|
||||
h.stats.Inc("HTTP.Rate", 1)
|
||||
inFlight := atomic.AddInt64(&h.connectionsInFlight, 1)
|
||||
h.stats.Gauge(fmt.Sprintf("%s.HTTP.OpenConnections", h.statsPrefix), inFlight, 1.0)
|
||||
h.stats.Gauge("HTTP.OpenConnections", inFlight)
|
||||
|
||||
h.handler.ServeHTTP(w, r)
|
||||
|
||||
inFlight = atomic.AddInt64(&h.connectionsInFlight, -1)
|
||||
h.stats.Gauge(fmt.Sprintf("%s.HTTP.ConnectionsInFlight", h.statsPrefix), inFlight, 1.0)
|
||||
h.stats.Gauge("HTTP.ConnectionsInFlight", inFlight)
|
||||
}
|
||||
|
||||
// FBAdapter provides a facebookgo/stats client interface that sends metrics via
|
||||
// a StatsD client
|
||||
type FBAdapter struct {
|
||||
stats statsd.Statter
|
||||
prefix string
|
||||
clk clock.Clock
|
||||
stats Scope
|
||||
clk clock.Clock
|
||||
}
|
||||
|
||||
// NewFBAdapter returns a new adapter
|
||||
func NewFBAdapter(stats statsd.Statter, prefix string, clock clock.Clock) FBAdapter {
|
||||
return FBAdapter{stats: stats, prefix: prefix, clk: clock}
|
||||
func NewFBAdapter(stats Scope, clock clock.Clock) FBAdapter {
|
||||
return FBAdapter{stats: stats, clk: clock}
|
||||
}
|
||||
|
||||
// BumpAvg is essentially statsd.Statter.Gauge
|
||||
func (fba FBAdapter) BumpAvg(key string, val float64) {
|
||||
fba.stats.Gauge(fmt.Sprintf("%s.%s", fba.prefix, key), int64(val), 1.0)
|
||||
fba.stats.Gauge(key, int64(val))
|
||||
}
|
||||
|
||||
// BumpSum is essentially statsd.Statter.Inc (httpdown only ever uses positive
|
||||
// deltas)
|
||||
func (fba FBAdapter) BumpSum(key string, val float64) {
|
||||
fba.stats.Inc(fmt.Sprintf("%s.%s", fba.prefix, key), int64(val), 1.0)
|
||||
fba.stats.Inc(key, int64(val))
|
||||
}
|
||||
|
||||
type btHolder struct {
|
||||
key string
|
||||
stats statsd.Statter
|
||||
stats Scope
|
||||
started time.Time
|
||||
}
|
||||
|
||||
func (bth btHolder) End() {
|
||||
bth.stats.TimingDuration(bth.key, time.Since(bth.started), 1.0)
|
||||
bth.stats.TimingDuration(bth.key, time.Since(bth.started))
|
||||
}
|
||||
|
||||
// BumpTime is essentially a (much better) statsd.Statter.TimingDuration
|
||||
|
@ -79,7 +76,7 @@ func (fba FBAdapter) BumpTime(key string) interface {
|
|||
End()
|
||||
} {
|
||||
return btHolder{
|
||||
key: fmt.Sprintf("%s.%s", fba.prefix, key),
|
||||
key: key,
|
||||
started: fba.clk.Now(),
|
||||
stats: fba.stats,
|
||||
}
|
||||
|
|
|
@ -32,7 +32,8 @@ type NonceService struct {
|
|||
}
|
||||
|
||||
// NewNonceService constructs a NonceService with defaults
|
||||
func NewNonceService(parent metrics.Scope) (*NonceService, error) {
|
||||
func NewNonceService(scope metrics.Scope) (*NonceService, error) {
|
||||
scope = scope.NewScope("NonceService")
|
||||
key := make([]byte, 16)
|
||||
if _, err := rand.Read(key); err != nil {
|
||||
return nil, err
|
||||
|
@ -53,7 +54,7 @@ func NewNonceService(parent metrics.Scope) (*NonceService, error) {
|
|||
used: make(map[int64]bool, MaxUsed),
|
||||
gcm: gcm,
|
||||
maxUsed: MaxUsed,
|
||||
stats: parent.NewScope("NonceService"),
|
||||
stats: scope,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
50
ra/ra.go
50
ra/ra.go
|
@ -14,20 +14,19 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
"github.com/letsencrypt/boulder/reloader"
|
||||
"github.com/weppos/publicsuffix-go/publicsuffix"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/letsencrypt/boulder/bdns"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
csrlib "github.com/letsencrypt/boulder/csr"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
"github.com/letsencrypt/boulder/ratelimit"
|
||||
"github.com/letsencrypt/boulder/reloader"
|
||||
"github.com/letsencrypt/boulder/revocation"
|
||||
vaPB "github.com/letsencrypt/boulder/va/proto"
|
||||
)
|
||||
|
@ -47,7 +46,7 @@ type RegistrationAuthorityImpl struct {
|
|||
VA core.ValidationAuthority
|
||||
SA core.StorageAuthority
|
||||
PA core.PolicyAuthority
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
DNSResolver bdns.DNSResolver
|
||||
clk clock.Clock
|
||||
log blog.Logger
|
||||
|
@ -74,7 +73,7 @@ type RegistrationAuthorityImpl struct {
|
|||
func NewRegistrationAuthorityImpl(
|
||||
clk clock.Clock,
|
||||
logger blog.Logger,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
maxContactsPerReg int,
|
||||
keyPolicy goodkey.KeyPolicy,
|
||||
maxNames int,
|
||||
|
@ -83,7 +82,6 @@ func NewRegistrationAuthorityImpl(
|
|||
authorizationLifetime time.Duration,
|
||||
pendingAuthorizationLifetime time.Duration,
|
||||
) *RegistrationAuthorityImpl {
|
||||
scope := metrics.NewStatsdScope(stats, "RA")
|
||||
ra := &RegistrationAuthorityImpl{
|
||||
stats: stats,
|
||||
clk: clk,
|
||||
|
@ -97,10 +95,10 @@ func NewRegistrationAuthorityImpl(
|
|||
maxNames: maxNames,
|
||||
forceCNFromSAN: forceCNFromSAN,
|
||||
reuseValidAuthz: reuseValidAuthz,
|
||||
regByIPStats: scope.NewScope("RA", "RateLimit", "RegistrationsByIP"),
|
||||
pendAuthByRegIDStats: scope.NewScope("RA", "RateLimit", "PendingAuthorizationsByRegID"),
|
||||
certsForDomainStats: scope.NewScope("RA", "RateLimit", "CertificatesForDomain"),
|
||||
totalCertsStats: scope.NewScope("RA", "RateLimit", "TotalCertificates"),
|
||||
regByIPStats: stats.NewScope("RA", "RateLimit", "RegistrationsByIP"),
|
||||
pendAuthByRegIDStats: stats.NewScope("RA", "RateLimit", "PendingAuthorizationsByRegID"),
|
||||
certsForDomainStats: stats.NewScope("RA", "RateLimit", "CertificatesForDomain"),
|
||||
totalCertsStats: stats.NewScope("RA", "RateLimit", "TotalCertificates"),
|
||||
}
|
||||
return ra
|
||||
}
|
||||
|
@ -279,7 +277,7 @@ func (ra *RegistrationAuthorityImpl) NewRegistration(ctx context.Context, init c
|
|||
err = core.InternalServerError(err.Error())
|
||||
}
|
||||
|
||||
ra.stats.Inc("RA.NewRegistrations", 1, 1.0)
|
||||
ra.stats.Inc("NewRegistrations", 1)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -309,14 +307,14 @@ func (ra *RegistrationAuthorityImpl) validateContacts(ctx context.Context, conta
|
|||
}
|
||||
|
||||
start := ra.clk.Now()
|
||||
ra.stats.Inc("RA.ValidateEmail.Calls", 1, 1.0)
|
||||
ra.stats.Inc("ValidateEmail.Calls", 1)
|
||||
problem := validateEmail(ctx, parsed.Opaque, ra.DNSResolver)
|
||||
ra.stats.TimingDuration("RA.ValidateEmail.Latency", ra.clk.Now().Sub(start), 1.0)
|
||||
ra.stats.TimingDuration("ValidateEmail.Latency", ra.clk.Now().Sub(start))
|
||||
if problem != nil {
|
||||
ra.stats.Inc("RA.ValidateEmail.Errors", 1, 1.0)
|
||||
ra.stats.Inc("ValidateEmail.Errors", 1)
|
||||
return problem
|
||||
}
|
||||
ra.stats.Inc("RA.ValidateEmail.Successes", 1, 1.0)
|
||||
ra.stats.Inc("ValidateEmail.Successes", 1)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -394,7 +392,7 @@ func (ra *RegistrationAuthorityImpl) NewAuthorization(ctx context.Context, reque
|
|||
// it to be OK for reuse
|
||||
reuseCutOff := ra.clk.Now().Add(time.Hour * 24)
|
||||
if populatedAuthz.Expires.After(reuseCutOff) {
|
||||
ra.stats.Inc("RA.ReusedValidAuthz", 1, 1.0)
|
||||
ra.stats.Inc("ReusedValidAuthz", 1)
|
||||
return populatedAuthz, nil
|
||||
}
|
||||
}
|
||||
|
@ -650,7 +648,7 @@ func (ra *RegistrationAuthorityImpl) NewCertificate(ctx context.Context, req cor
|
|||
logEventResult = "successful"
|
||||
|
||||
issuanceExpvar.Set(now.Unix())
|
||||
ra.stats.Inc("RA.NewCertificates", 1, 1.0)
|
||||
ra.stats.Inc("NewCertificates", 1)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
|
@ -795,7 +793,7 @@ func (ra *RegistrationAuthorityImpl) UpdateRegistration(ctx context.Context, bas
|
|||
return core.Registration{}, err
|
||||
}
|
||||
|
||||
ra.stats.Inc("RA.UpdatedRegistrations", 1, 1.0)
|
||||
ra.stats.Inc("UpdatedRegistrations", 1)
|
||||
return base, nil
|
||||
}
|
||||
|
||||
|
@ -869,7 +867,7 @@ func (ra *RegistrationAuthorityImpl) UpdateAuthorization(ctx context.Context, ba
|
|||
|
||||
if response.Type != "" && ch.Type != response.Type {
|
||||
// TODO(riking): Check the rate on this, uncomment error return if negligible
|
||||
ra.stats.Inc("RA.StartChallengeWrongType", 1, 1.0)
|
||||
ra.stats.Inc("StartChallengeWrongType", 1)
|
||||
// err = core.MalformedRequestError(fmt.Sprintf("Invalid update to challenge - provided type was %s but actual type is %s", response.Type, ch.Type))
|
||||
// return
|
||||
}
|
||||
|
@ -880,7 +878,7 @@ func (ra *RegistrationAuthorityImpl) UpdateAuthorization(ctx context.Context, ba
|
|||
// the overall authorization is already good! We increment a stat for this
|
||||
// case and return early.
|
||||
if ra.reuseValidAuthz && authz.Status == core.StatusValid {
|
||||
ra.stats.Inc("RA.ReusedValidAuthzChallenge", 1, 1.0)
|
||||
ra.stats.Inc("ReusedValidAuthzChallenge", 1)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -919,7 +917,7 @@ func (ra *RegistrationAuthorityImpl) UpdateAuthorization(ctx context.Context, ba
|
|||
err = core.MalformedRequestError("Challenge data was corrupted")
|
||||
return
|
||||
}
|
||||
ra.stats.Inc("RA.NewPendingAuthorizations", 1, 1.0)
|
||||
ra.stats.Inc("NewPendingAuthorizations", 1)
|
||||
|
||||
// Dispatch to the VA for service
|
||||
|
||||
|
@ -955,7 +953,7 @@ func (ra *RegistrationAuthorityImpl) UpdateAuthorization(ctx context.Context, ba
|
|||
ra.log.AuditErr(fmt.Sprintf("Could not record updated validation: err=[%s] regID=[%d]", err, authz.RegistrationID))
|
||||
}
|
||||
}()
|
||||
ra.stats.Inc("RA.UpdatedPendingAuthorizations", 1, 1.0)
|
||||
ra.stats.Inc("UpdatedPendingAuthorizations", 1)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1027,7 @@ func (ra *RegistrationAuthorityImpl) AdministrativelyRevokeCertificate(ctx conte
|
|||
}
|
||||
|
||||
state = "Success"
|
||||
ra.stats.Inc("RA.RevokedCertificates", 1, 1.0)
|
||||
ra.stats.Inc("RevokedCertificates", 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1072,7 +1070,7 @@ func (ra *RegistrationAuthorityImpl) onValidationUpdate(ctx context.Context, aut
|
|||
return err
|
||||
}
|
||||
|
||||
ra.stats.Inc("RA.FinalizedAuthorizations", 1, 1.0)
|
||||
ra.stats.Inc("FinalizedAuthorizations", 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
jose "github.com/square/go-jose"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -25,6 +24,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/mocks"
|
||||
"github.com/letsencrypt/boulder/policy"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
|
@ -221,7 +221,7 @@ func initAuthorities(t *testing.T) (*DummyValidationAuthority, *sa.SQLStorageAut
|
|||
err = pa.SetHostnamePolicyFile("../test/hostname-policy.json")
|
||||
test.AssertNotError(t, err, "Couldn't set hostname policy")
|
||||
|
||||
stats, _ := statsd.NewNoopClient()
|
||||
stats := metrics.NewNoopScope()
|
||||
|
||||
ca := &mocks.MockCA{
|
||||
PEM: eeCertPEM,
|
||||
|
|
|
@ -17,16 +17,15 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
"github.com/streadway/amqp"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
)
|
||||
|
||||
// TODO: AMQP-RPC messages should be wrapped in JWS. To implement that,
|
||||
|
@ -122,7 +121,7 @@ type AmqpRPCServer struct {
|
|||
currentGoroutines int64
|
||||
maxConcurrentRPCServerRequests int64
|
||||
tooManyRequestsResponse []byte
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
clk clock.Clock
|
||||
}
|
||||
|
||||
|
@ -133,9 +132,10 @@ const wildcardRoutingKey = "#"
|
|||
func NewAmqpRPCServer(
|
||||
amqpConf *cmd.AMQPConfig,
|
||||
maxConcurrentRPCServerRequests int64,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
log blog.Logger,
|
||||
) (*AmqpRPCServer, error) {
|
||||
stats = stats.NewScope("RPC")
|
||||
|
||||
reconnectBase := amqpConf.ReconnectTimeouts.Base.Duration
|
||||
if reconnectBase == 0 {
|
||||
|
@ -412,13 +412,13 @@ func (rpc *AmqpRPCServer) Start(c *cmd.AMQPConfig) error {
|
|||
select {
|
||||
case msg, ok := <-rpc.connection.messages():
|
||||
if ok {
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("RPC.MessageLag.%s", rpc.serverQueue), rpc.clk.Now().Sub(msg.Timestamp), 1.0)
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("MessageLag.%s", rpc.serverQueue), rpc.clk.Now().Sub(msg.Timestamp))
|
||||
if rpc.maxConcurrentRPCServerRequests > 0 && atomic.LoadInt64(&rpc.currentGoroutines) >= rpc.maxConcurrentRPCServerRequests {
|
||||
_ = rpc.replyTooManyRequests(msg)
|
||||
rpc.stats.Inc(fmt.Sprintf("RPC.CallsDropped.%s", rpc.serverQueue), 1, 1.0)
|
||||
rpc.stats.Inc(fmt.Sprintf("CallsDropped.%s", rpc.serverQueue), 1)
|
||||
break // this breaks the select, not the for
|
||||
}
|
||||
rpc.stats.Inc(fmt.Sprintf("RPC.Traffic.Rx.%s", rpc.serverQueue), int64(len(msg.Body)), 1.0)
|
||||
rpc.stats.Inc(fmt.Sprintf("Traffic.Rx.%s", rpc.serverQueue), int64(len(msg.Body)))
|
||||
go func() {
|
||||
atomic.AddInt64(&rpc.currentGoroutines, 1)
|
||||
defer atomic.AddInt64(&rpc.currentGoroutines, -1)
|
||||
|
@ -428,7 +428,7 @@ func (rpc *AmqpRPCServer) Start(c *cmd.AMQPConfig) error {
|
|||
} else {
|
||||
rpc.processMessage(msg)
|
||||
}
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("RPC.ServerProcessingLatency.%s", msg.Type), time.Since(startedProcessing), 1.0)
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("ServerProcessingLatency.%s", msg.Type), time.Since(startedProcessing))
|
||||
}()
|
||||
} else {
|
||||
rpc.mu.RLock()
|
||||
|
@ -506,7 +506,7 @@ type AmqpRPCCLient struct {
|
|||
mu sync.RWMutex
|
||||
pending map[string]chan []byte
|
||||
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
}
|
||||
|
||||
// NewAmqpRPCClient constructs an RPC client using AMQP
|
||||
|
@ -514,8 +514,9 @@ func NewAmqpRPCClient(
|
|||
clientQueuePrefix string,
|
||||
amqpConf *cmd.AMQPConfig,
|
||||
rpcConf *cmd.RPCServerConfig,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
) (rpc *AmqpRPCCLient, err error) {
|
||||
stats = stats.NewScope("RPC")
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -570,7 +571,7 @@ func NewAmqpRPCClient(
|
|||
if !present {
|
||||
// occurs when a request is timed out and the arrives
|
||||
// afterwards
|
||||
stats.Inc("RPC.AfterTimeoutResponseArrivals."+clientQueuePrefix, 1, 1.0)
|
||||
stats.Inc("AfterTimeoutResponseArrivals."+clientQueuePrefix, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -630,7 +631,7 @@ func (rpc *AmqpRPCCLient) dispatch(method string, body []byte) (string, chan []b
|
|||
|
||||
// DispatchSync sends a body to the destination, and blocks waiting on a response.
|
||||
func (rpc *AmqpRPCCLient) DispatchSync(method string, body []byte) (response []byte, err error) {
|
||||
rpc.stats.Inc(fmt.Sprintf("RPC.Traffic.Tx.%s", rpc.serverQueue), int64(len(body)), 1.0)
|
||||
rpc.stats.Inc(fmt.Sprintf("Traffic.Tx.%s", rpc.serverQueue), int64(len(body)))
|
||||
callStarted := time.Now()
|
||||
corrID, responseChan, err := rpc.dispatch(method, body)
|
||||
if err != nil {
|
||||
|
@ -646,14 +647,14 @@ func (rpc *AmqpRPCCLient) DispatchSync(method string, body []byte) (response []b
|
|||
}
|
||||
err = unwrapError(rpcResponse.Error)
|
||||
if err != nil {
|
||||
rpc.stats.Inc(fmt.Sprintf("RPC.ClientCallLatency.%s.Error", method), 1, 1.0)
|
||||
rpc.stats.Inc(fmt.Sprintf("ClientCallLatency.%s.Error", method), 1)
|
||||
return nil, err
|
||||
}
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("RPC.ClientCallLatency.%s.Success", method), time.Since(callStarted), 1.0)
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("ClientCallLatency.%s.Success", method), time.Since(callStarted))
|
||||
response = rpcResponse.ReturnVal
|
||||
return response, nil
|
||||
case <-time.After(rpc.timeout):
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("RPC.ClientCallLatency.%s.Timeout", method), time.Since(callStarted), 1.0)
|
||||
rpc.stats.TimingDuration(fmt.Sprintf("ClientCallLatency.%s.Timeout", method), time.Since(callStarted))
|
||||
rpc.log.Warning(fmt.Sprintf(" [c!][%s] AMQP-RPC timeout [%s]", rpc.clientQueue, method))
|
||||
rpc.mu.Lock()
|
||||
delete(rpc.pending, corrID)
|
||||
|
|
|
@ -8,13 +8,13 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
jose "github.com/square/go-jose"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
"github.com/letsencrypt/boulder/revocation"
|
||||
vaPB "github.com/letsencrypt/boulder/va/proto"
|
||||
|
@ -383,7 +383,7 @@ type RegistrationAuthorityClient struct {
|
|||
}
|
||||
|
||||
// NewRegistrationAuthorityClient constructs an RPC client
|
||||
func NewRegistrationAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats statsd.Statter) (*RegistrationAuthorityClient, error) {
|
||||
func NewRegistrationAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats metrics.Scope) (*RegistrationAuthorityClient, error) {
|
||||
client, err := NewAmqpRPCClient(clientName+"->RA", amqpConf, amqpConf.RA, stats)
|
||||
return &RegistrationAuthorityClient{rpc: client}, err
|
||||
}
|
||||
|
@ -572,7 +572,7 @@ type ValidationAuthorityClient struct {
|
|||
}
|
||||
|
||||
// NewValidationAuthorityClient constructs an RPC client
|
||||
func NewValidationAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats statsd.Statter) (*ValidationAuthorityClient, error) {
|
||||
func NewValidationAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats metrics.Scope) (*ValidationAuthorityClient, error) {
|
||||
client, err := NewAmqpRPCClient(clientName+"->VA", amqpConf, amqpConf.VA, stats)
|
||||
return &ValidationAuthorityClient{rpc: client}, err
|
||||
}
|
||||
|
@ -636,7 +636,7 @@ type PublisherClient struct {
|
|||
}
|
||||
|
||||
// NewPublisherClient constructs an RPC client
|
||||
func NewPublisherClient(clientName string, amqpConf *cmd.AMQPConfig, stats statsd.Statter) (*PublisherClient, error) {
|
||||
func NewPublisherClient(clientName string, amqpConf *cmd.AMQPConfig, stats metrics.Scope) (*PublisherClient, error) {
|
||||
client, err := NewAmqpRPCClient(clientName+"->Publisher", amqpConf, amqpConf.Publisher, stats)
|
||||
return &PublisherClient{rpc: client}, err
|
||||
}
|
||||
|
@ -705,7 +705,7 @@ type CertificateAuthorityClient struct {
|
|||
}
|
||||
|
||||
// NewCertificateAuthorityClient constructs an RPC client
|
||||
func NewCertificateAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats statsd.Statter) (*CertificateAuthorityClient, error) {
|
||||
func NewCertificateAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats metrics.Scope) (*CertificateAuthorityClient, error) {
|
||||
client, err := NewAmqpRPCClient(clientName+"->CA", amqpConf, amqpConf.CA, stats)
|
||||
return &CertificateAuthorityClient{rpc: client}, err
|
||||
}
|
||||
|
@ -1121,7 +1121,7 @@ type StorageAuthorityClient struct {
|
|||
}
|
||||
|
||||
// NewStorageAuthorityClient constructs an RPC client
|
||||
func NewStorageAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats statsd.Statter) (*StorageAuthorityClient, error) {
|
||||
func NewStorageAuthorityClient(clientName string, amqpConf *cmd.AMQPConfig, stats metrics.Scope) (*StorageAuthorityClient, error) {
|
||||
client, err := NewAmqpRPCClient(clientName+"->SA", amqpConf, amqpConf.SA, stats)
|
||||
return &StorageAuthorityClient{rpc: client}, err
|
||||
}
|
||||
|
|
15
va/gsb.go
15
va/gsb.go
|
@ -25,29 +25,30 @@ func (va *ValidationAuthorityImpl) IsSafeDomain(ctx context.Context, req *vaPB.I
|
|||
if req == nil || req.Domain == nil {
|
||||
return nil, bgrpc.ErrMissingParameters
|
||||
}
|
||||
va.stats.Inc("VA.IsSafeDomain.Requests", 1, 1.0)
|
||||
stats := va.stats.NewScope("IsSafeDomain")
|
||||
stats.Inc("IsSafeDomain.Requests", 1)
|
||||
if va.safeBrowsing == nil {
|
||||
va.stats.Inc("VA.IsSafeDomain.Skips", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.Skips", 1)
|
||||
status := true
|
||||
return &vaPB.IsDomainSafe{IsSafe: &status}, nil
|
||||
}
|
||||
|
||||
list, err := va.safeBrowsing.IsListed(*req.Domain)
|
||||
if err != nil {
|
||||
va.stats.Inc("VA.IsSafeDomain.Errors", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.Errors", 1)
|
||||
if err == safebrowsing.ErrOutOfDateHashes {
|
||||
va.stats.Inc("VA.IsSafeDomain.OutOfDateHashErrors", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.OutOfDateHashErrors", 1)
|
||||
status := true
|
||||
return &vaPB.IsDomainSafe{IsSafe: &status}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
va.stats.Inc("VA.IsSafeDomain.Successes", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.Successes", 1)
|
||||
status := (list == "")
|
||||
if status {
|
||||
va.stats.Inc("VA.IsSafeDomain.Status.Good", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.Status.Good", 1)
|
||||
} else {
|
||||
va.stats.Inc("VA.IsSafeDomain.Status.Bad", 1, 1.0)
|
||||
stats.Inc("IsSafeDomain.Status.Bad", 1)
|
||||
}
|
||||
return &vaPB.IsDomainSafe{IsSafe: &status}, nil
|
||||
}
|
||||
|
|
|
@ -4,13 +4,13 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/jmhodges/clock"
|
||||
safebrowsing "github.com/letsencrypt/go-safe-browsing-api"
|
||||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
vaPB "github.com/letsencrypt/boulder/va/proto"
|
||||
)
|
||||
|
||||
|
@ -21,7 +21,7 @@ func TestIsSafeDomain(t *testing.T) {
|
|||
// we rely on is a little funny and overcomplicated, but still hasn't
|
||||
// learned out how not make HTTP requests in tests.
|
||||
|
||||
stats, _ := statsd.NewNoopClient()
|
||||
stats := metrics.NewNoopScope()
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -80,7 +80,7 @@ func TestIsSafeDomain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllowNilInIsSafeDomain(t *testing.T) {
|
||||
stats, _ := statsd.NewNoopClient()
|
||||
stats := metrics.NewNoopScope()
|
||||
va := NewValidationAuthorityImpl(
|
||||
&cmd.PortConfig{},
|
||||
nil,
|
||||
|
|
22
va/va.go
22
va/va.go
|
@ -17,7 +17,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/miekg/dns"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -28,6 +27,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
bgrpc "github.com/letsencrypt/boulder/grpc"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
|
||||
caaPB "github.com/letsencrypt/boulder/cmd/caa-checker/proto"
|
||||
|
@ -54,7 +54,7 @@ type ValidationAuthorityImpl struct {
|
|||
httpsPort int
|
||||
tlsPort int
|
||||
userAgent string
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
clk clock.Clock
|
||||
caaClient caaPB.CAACheckerClient
|
||||
caaDR *cdr.CAADistributedResolver
|
||||
|
@ -69,7 +69,7 @@ func NewValidationAuthorityImpl(
|
|||
resolver bdns.DNSResolver,
|
||||
userAgent string,
|
||||
issuerDomain string,
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
clk clock.Clock,
|
||||
logger blog.Logger,
|
||||
) *ValidationAuthorityImpl {
|
||||
|
@ -589,7 +589,7 @@ func (va *ValidationAuthorityImpl) PerformValidation(ctx context.Context, domain
|
|||
|
||||
logEvent.Challenge = challenge
|
||||
|
||||
va.stats.TimingDuration(fmt.Sprintf("VA.Validations.%s.%s", challenge.Type, challenge.Status), time.Since(vStart), 1.0)
|
||||
va.stats.TimingDuration(fmt.Sprintf("Validations.%s.%s", challenge.Type, challenge.Status), time.Since(vStart))
|
||||
|
||||
va.log.AuditObject("Validation result", logEvent)
|
||||
va.log.Info(fmt.Sprintf("Validations: %+v", authz))
|
||||
|
@ -715,23 +715,23 @@ func (va *ValidationAuthorityImpl) checkCAARecords(ctx context.Context, identifi
|
|||
func (va *ValidationAuthorityImpl) validateCAASet(caaSet *CAASet) (present, valid bool) {
|
||||
if caaSet == nil {
|
||||
// No CAA records found, can issue
|
||||
va.stats.Inc("VA.CAA.None", 1, 1.0)
|
||||
va.stats.Inc("CAA.None", 1)
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Record stats on directives not currently processed.
|
||||
if len(caaSet.Iodef) > 0 {
|
||||
va.stats.Inc("VA.CAA.WithIodef", 1, 1.0)
|
||||
va.stats.Inc("CAA.WithIodef", 1)
|
||||
}
|
||||
|
||||
if caaSet.criticalUnknown() {
|
||||
// Contains unknown critical directives.
|
||||
va.stats.Inc("VA.CAA.UnknownCritical", 1, 1.0)
|
||||
va.stats.Inc("CAA.UnknownCritical", 1)
|
||||
return true, false
|
||||
}
|
||||
|
||||
if len(caaSet.Unknown) > 0 {
|
||||
va.stats.Inc("VA.CAA.WithUnknownNoncritical", 1, 1.0)
|
||||
va.stats.Inc("CAA.WithUnknownNoncritical", 1)
|
||||
}
|
||||
|
||||
if len(caaSet.Issue) == 0 {
|
||||
|
@ -739,7 +739,7 @@ func (va *ValidationAuthorityImpl) validateCAASet(caaSet *CAASet) (present, vali
|
|||
// (e.g. there is only an issuewild directive, but we are checking for a
|
||||
// non-wildcard identifier, or there is only an iodef or non-critical unknown
|
||||
// directive.)
|
||||
va.stats.Inc("VA.CAA.NoneRelevant", 1, 1.0)
|
||||
va.stats.Inc("CAA.NoneRelevant", 1)
|
||||
return true, true
|
||||
}
|
||||
|
||||
|
@ -750,13 +750,13 @@ func (va *ValidationAuthorityImpl) validateCAASet(caaSet *CAASet) (present, vali
|
|||
// Our CAA identity must be found in the chosen checkSet.
|
||||
for _, caa := range caaSet.Issue {
|
||||
if extractIssuerDomain(caa) == va.issuerDomain {
|
||||
va.stats.Inc("VA.CAA.Authorized", 1, 1.0)
|
||||
va.stats.Inc("CAA.Authorized", 1)
|
||||
return true, true
|
||||
}
|
||||
}
|
||||
|
||||
// The list of authorized issuers is non-empty, but we are not in it. Fail.
|
||||
va.stats.Inc("VA.CAA.Unauthorized", 1, 1.0)
|
||||
va.stats.Inc("CAA.Unauthorized", 1)
|
||||
return true, false
|
||||
}
|
||||
|
||||
|
|
|
@ -835,6 +835,7 @@ func TestLimitedReader(t *testing.T) {
|
|||
|
||||
func setup() (*ValidationAuthorityImpl, *mocks.Statter, *blog.Mock) {
|
||||
stats := mocks.NewStatter()
|
||||
scope := metrics.NewStatsdScope(stats, "VA")
|
||||
logger := blog.NewMock()
|
||||
va := NewValidationAuthorityImpl(
|
||||
&cmd.PortConfig{},
|
||||
|
@ -844,7 +845,7 @@ func setup() (*ValidationAuthorityImpl, *mocks.Statter, *blog.Mock) {
|
|||
&bdns.MockDNSResolver{},
|
||||
"user agent 1.0",
|
||||
"letsencrypt.org",
|
||||
stats,
|
||||
scope,
|
||||
clock.Default(),
|
||||
logger)
|
||||
return va, stats, logger
|
||||
|
@ -855,6 +856,7 @@ func TestCheckCAAFallback(t *testing.T) {
|
|||
defer testSrv.Close()
|
||||
|
||||
stats := mocks.NewStatter()
|
||||
scope := metrics.NewStatsdScope(stats, "VA")
|
||||
logger := blog.NewMock()
|
||||
caaDR, err := cdr.New(metrics.NewNoopScope(), time.Second, 1, nil, blog.NewMock())
|
||||
test.AssertNotError(t, err, "Failed to create CAADistributedResolver")
|
||||
|
@ -868,7 +870,7 @@ func TestCheckCAAFallback(t *testing.T) {
|
|||
&bdns.MockDNSResolver{},
|
||||
"user agent 1.0",
|
||||
"ca.com",
|
||||
stats,
|
||||
scope,
|
||||
clock.Default(),
|
||||
logger)
|
||||
|
||||
|
|
46
wfe/wfe.go
46
wfe/wfe.go
|
@ -15,7 +15,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
jose "github.com/square/go-jose"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -52,7 +51,7 @@ const (
|
|||
type WebFrontEndImpl struct {
|
||||
RA core.RegistrationAuthority
|
||||
SA core.StorageGetter
|
||||
stats statsd.Statter
|
||||
stats metrics.Scope
|
||||
log blog.Logger
|
||||
clk clock.Clock
|
||||
|
||||
|
@ -91,13 +90,12 @@ type WebFrontEndImpl struct {
|
|||
|
||||
// NewWebFrontEndImpl constructs a web service for Boulder
|
||||
func NewWebFrontEndImpl(
|
||||
stats statsd.Statter,
|
||||
stats metrics.Scope,
|
||||
clk clock.Clock,
|
||||
keyPolicy goodkey.KeyPolicy,
|
||||
logger blog.Logger,
|
||||
) (WebFrontEndImpl, error) {
|
||||
scope := metrics.NewStatsdScope(stats, "WFE")
|
||||
nonceService, err := nonce.NewNonceService(scope)
|
||||
nonceService, err := nonce.NewNonceService(stats)
|
||||
if err != nil {
|
||||
return WebFrontEndImpl{}, err
|
||||
}
|
||||
|
@ -364,21 +362,21 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
reg := core.Registration{ID: 0}
|
||||
|
||||
if _, ok := request.Header["Content-Length"]; !ok {
|
||||
wfe.stats.Inc("WFE.HTTP.ClientErrors.LengthRequiredError", 1, 1.0)
|
||||
wfe.stats.Inc("HTTP.ClientErrors.LengthRequiredError", 1)
|
||||
logEvent.AddError("missing Content-Length header on POST")
|
||||
return nil, nil, reg, probs.ContentLengthRequired()
|
||||
}
|
||||
|
||||
// Read body
|
||||
if request.Body == nil {
|
||||
wfe.stats.Inc("WFE.Errors.NoPOSTBody", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.NoPOSTBody", 1)
|
||||
logEvent.AddError("no body on POST")
|
||||
return nil, nil, reg, probs.Malformed("No body on POST")
|
||||
}
|
||||
|
||||
bodyBytes, err := ioutil.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
wfe.stats.Inc("WFE.Errors.UnableToReadRequestBody", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.UnableToReadRequestBody", 1)
|
||||
logEvent.AddError("unable to read request body")
|
||||
return nil, nil, reg, probs.ServerInternal("unable to read request body")
|
||||
}
|
||||
|
@ -387,7 +385,7 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
// Parse as JWS
|
||||
parsedJws, err := jose.ParseSigned(body)
|
||||
if err != nil {
|
||||
wfe.stats.Inc("WFE.Errors.UnableToParseJWS", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.UnableToParseJWS", 1)
|
||||
logEvent.AddError("could not JSON parse body into JWS: %s", err)
|
||||
return nil, nil, reg, probs.Malformed("Parse error reading JWS")
|
||||
}
|
||||
|
@ -399,25 +397,25 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
// *anyway*, so it could always lie about what key was used by faking
|
||||
// the signature itself.
|
||||
if len(parsedJws.Signatures) > 1 {
|
||||
wfe.stats.Inc("WFE.Errors.TooManyJWSSignaturesInPOST", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.TooManyJWSSignaturesInPOST", 1)
|
||||
logEvent.AddError("too many signatures in POST body: %d", len(parsedJws.Signatures))
|
||||
return nil, nil, reg, probs.Malformed("Too many signatures in POST body")
|
||||
}
|
||||
if len(parsedJws.Signatures) == 0 {
|
||||
wfe.stats.Inc("WFE.Errors.JWSNotSignedInPOST", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.JWSNotSignedInPOST", 1)
|
||||
logEvent.AddError("no signatures in POST body")
|
||||
return nil, nil, reg, probs.Malformed("POST JWS not signed")
|
||||
}
|
||||
|
||||
submittedKey := parsedJws.Signatures[0].Header.JsonWebKey
|
||||
if submittedKey == nil {
|
||||
wfe.stats.Inc("WFE.Errors.NoJWKInJWSSignatureHeader", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.NoJWKInJWSSignatureHeader", 1)
|
||||
logEvent.AddError("no JWK in JWS signature header in POST body")
|
||||
return nil, nil, reg, probs.Malformed("No JWK in JWS header")
|
||||
}
|
||||
|
||||
if !submittedKey.Valid() {
|
||||
wfe.stats.Inc("WFE.Errors.InvalidJWK", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.InvalidJWK", 1)
|
||||
logEvent.AddError("invalid JWK in JWS signature header in POST body")
|
||||
return nil, nil, reg, probs.Malformed("Invalid JWK in JWS header")
|
||||
}
|
||||
|
@ -432,14 +430,14 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
// are "good". But when we are verifying against any submitted key, we want
|
||||
// to check its quality before doing the verify.
|
||||
if err = wfe.keyPolicy.GoodKey(submittedKey.Key); err != nil {
|
||||
wfe.stats.Inc("WFE.Errors.JWKRejectedByGoodKey", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.JWKRejectedByGoodKey", 1)
|
||||
logEvent.AddError("JWK in request was rejected by GoodKey: %s", err)
|
||||
return nil, nil, reg, probs.Malformed(err.Error())
|
||||
}
|
||||
key = submittedKey
|
||||
} else if err != nil {
|
||||
// For all other errors, or if regCheck is true, return error immediately.
|
||||
wfe.stats.Inc("WFE.Errors.UnableToGetRegistrationByKey", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.UnableToGetRegistrationByKey", 1)
|
||||
logEvent.AddError("unable to fetch registration by the given JWK: %s", err)
|
||||
if _, ok := err.(core.NoSuchRegistrationError); ok {
|
||||
return nil, nil, reg, probs.Unauthorized(unknownKey)
|
||||
|
@ -454,13 +452,13 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
}
|
||||
|
||||
if statName, err := checkAlgorithm(key, parsedJws); err != nil {
|
||||
wfe.stats.Inc(statName, 1, 1.0)
|
||||
wfe.stats.Inc(statName, 1)
|
||||
return nil, nil, reg, probs.Malformed(err.Error())
|
||||
}
|
||||
|
||||
payload, err := parsedJws.Verify(key)
|
||||
if err != nil {
|
||||
wfe.stats.Inc("WFE.Errors.JWSVerificationFailed", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.JWSVerificationFailed", 1)
|
||||
n := len(body)
|
||||
if n > 100 {
|
||||
n = 100
|
||||
|
@ -473,11 +471,11 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
nonce := parsedJws.Signatures[0].Header.Nonce
|
||||
logEvent.RequestNonce = nonce
|
||||
if len(nonce) == 0 {
|
||||
wfe.stats.Inc("WFE.Errors.JWSMissingNonce", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.JWSMissingNonce", 1)
|
||||
logEvent.AddError("JWS is missing an anti-replay nonce")
|
||||
return nil, nil, reg, probs.BadNonce("JWS has no anti-replay nonce")
|
||||
} else if !wfe.nonceService.Valid(nonce) {
|
||||
wfe.stats.Inc("WFE.Errors.JWSInvalidNonce", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.JWSInvalidNonce", 1)
|
||||
logEvent.AddError("JWS has an invalid anti-replay nonce: %s", nonce)
|
||||
return nil, nil, reg, probs.BadNonce(fmt.Sprintf("JWS has invalid anti-replay nonce %v", nonce))
|
||||
}
|
||||
|
@ -488,16 +486,16 @@ func (wfe *WebFrontEndImpl) verifyPOST(ctx context.Context, logEvent *requestEve
|
|||
}
|
||||
err = json.Unmarshal([]byte(payload), &parsedRequest)
|
||||
if err != nil {
|
||||
wfe.stats.Inc("WFE.Errors.UnparsableJWSPayload", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.UnparsableJWSPayload", 1)
|
||||
logEvent.AddError("unable to JSON parse resource from JWS payload: %s", err)
|
||||
return nil, nil, reg, probs.Malformed("Request payload did not parse as JSON")
|
||||
}
|
||||
if parsedRequest.Resource == "" {
|
||||
wfe.stats.Inc("WFE.Errors.NoResourceInJWSPayload", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.NoResourceInJWSPayload", 1)
|
||||
logEvent.AddError("JWS request payload does not specify a resource")
|
||||
return nil, nil, reg, probs.Malformed("Request payload does not specify a resource")
|
||||
} else if resource != core.AcmeResource(parsedRequest.Resource) {
|
||||
wfe.stats.Inc("WFE.Errors.MismatchedResourceInJWSPayload", 1, 1.0)
|
||||
wfe.stats.Inc("Errors.MismatchedResourceInJWSPayload", 1)
|
||||
logEvent.AddError("JWS request payload does not match resource")
|
||||
return nil, nil, reg, probs.Malformed("JWS resource payload does not match the HTTP resource: %s != %s", parsedRequest.Resource, resource)
|
||||
}
|
||||
|
@ -532,10 +530,10 @@ func (wfe *WebFrontEndImpl) sendError(response http.ResponseWriter, logEvent *re
|
|||
response.WriteHeader(code)
|
||||
response.Write(problemDoc)
|
||||
|
||||
wfe.stats.Inc(fmt.Sprintf("WFE.HTTP.ErrorCodes.%d", code), 1, 1.0)
|
||||
wfe.stats.Inc(fmt.Sprintf("HTTP.ErrorCodes.%d", code), 1)
|
||||
problemSegments := strings.Split(string(prob.Type), ":")
|
||||
if len(problemSegments) > 0 {
|
||||
wfe.stats.Inc(fmt.Sprintf("WFE.HTTP.ProblemTypes.%s", problemSegments[len(problemSegments)-1]), 1, 1.0)
|
||||
wfe.stats.Inc(fmt.Sprintf("HTTP.ProblemTypes.%s", problemSegments[len(problemSegments)-1]), 1)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/jmhodges/clock"
|
||||
"github.com/square/go-jose"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -28,6 +27,7 @@ import (
|
|||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/goodkey"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
"github.com/letsencrypt/boulder/metrics"
|
||||
"github.com/letsencrypt/boulder/mocks"
|
||||
"github.com/letsencrypt/boulder/nonce"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
|
@ -219,7 +219,7 @@ var ctx = context.Background()
|
|||
|
||||
func setupWFE(t *testing.T) (WebFrontEndImpl, clock.FakeClock) {
|
||||
fc := clock.NewFake()
|
||||
stats, _ := statsd.NewNoopClient()
|
||||
stats := metrics.NewNoopScope()
|
||||
|
||||
wfe, err := NewWebFrontEndImpl(stats, fc, testKeyPolicy, blog.NewMock())
|
||||
test.AssertNotError(t, err, "Unable to create WFE")
|
||||
|
@ -228,7 +228,6 @@ func setupWFE(t *testing.T) (WebFrontEndImpl, clock.FakeClock) {
|
|||
|
||||
wfe.RA = &MockRegistrationAuthority{}
|
||||
wfe.SA = mocks.NewStorageAuthority(fc)
|
||||
wfe.stats, _ = statsd.NewNoopClient()
|
||||
|
||||
return wfe, fc
|
||||
}
|
||||
|
@ -621,7 +620,7 @@ func TestIssueCertificate(t *testing.T) {
|
|||
|
||||
// TODO: Use a mock RA so we can test various conditions of authorized, not
|
||||
// authorized, etc.
|
||||
stats, _ := statsd.NewNoopClient(nil)
|
||||
stats := metrics.NewNoopScope()
|
||||
ra := ra.NewRegistrationAuthorityImpl(
|
||||
fc,
|
||||
wfe.log,
|
||||
|
@ -1294,7 +1293,6 @@ func TestRevokeCertificateAlreadyRevoked(t *testing.T) {
|
|||
wfe, fc := setupWFE(t)
|
||||
|
||||
wfe.SA = &mockSANoSuchRegistration{mocks.NewStorageAuthority(fc)}
|
||||
wfe.stats, _ = statsd.NewNoopClient()
|
||||
responseWriter := httptest.NewRecorder()
|
||||
responseWriter.Body.Reset()
|
||||
signer.SetNonceSource(wfe.nonceService)
|
||||
|
|
Loading…
Reference in New Issue