Merge branch 'master' into exact-name-rl
This commit is contained in:
commit
bee236af3a
|
|
@ -25,11 +25,6 @@
|
|||
"Comment": "1.1.0-355-g3f3fa68",
|
||||
"Rev": "3f3fa68e8d6ce6ceace60ea86461f8be41fa477b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/crypto/pkcs11key",
|
||||
"Comment": "1.1.0-355-g3f3fa68",
|
||||
"Rev": "3f3fa68e8d6ce6ceace60ea86461f8be41fa477b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/crypto/pkcs7",
|
||||
"Comment": "1.1.0-355-g3f3fa68",
|
||||
|
|
@ -114,23 +109,23 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
"Rev": "c78982664ca89efe5204eb83a91b15866b04b1f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/asn1",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
"Rev": "c78982664ca89efe5204eb83a91b15866b04b1f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/client",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
"Rev": "c78982664ca89efe5204eb83a91b15866b04b1f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/x509",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
"Rev": "c78982664ca89efe5204eb83a91b15866b04b1f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/x509/pkix",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
"Rev": "c78982664ca89efe5204eb83a91b15866b04b1f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmhodges/clock",
|
||||
|
|
@ -153,6 +148,10 @@
|
|||
"ImportPath": "github.com/letsencrypt/net/publicsuffix",
|
||||
"Rev": "7eb31e4ef6d2609d3ef5597cbc65cb68c8027f62"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/letsencrypt/pkcs11key",
|
||||
"Rev": "2c364245d9f3b4f744ed6f8b37c3b27107f3a72a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "d27455715200c7d3e321a1e5cadb27c9ee0b0f02"
|
||||
|
|
|
|||
2
Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/pkcs11uri/pkcs11uri.go
generated
vendored
2
Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/pkcs11uri/pkcs11uri.go
generated
vendored
|
|
@ -10,8 +10,8 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/errors"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
)
|
||||
|
||||
func setIfPresent(val url.Values, k string, target *string) {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@
|
|||
package pkcs11
|
||||
|
||||
import (
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/errors"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/ocsp"
|
||||
ocspConfig "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/ocsp/config"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,12 +6,12 @@ import (
|
|||
"io/ioutil"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/config"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/errors"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer/local"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
)
|
||||
|
||||
// Enabled is set to true if PKCS #11 support is present.
|
||||
|
|
|
|||
202
Godeps/_workspace/src/github.com/google/certificate-transparency/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/google/certificate-transparency/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
86
Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
86
Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/mreiferson/go-httpclient"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// URI paths for CT Log endpoints
|
||||
|
|
@ -178,44 +179,65 @@ func (c *LogClient) postAndParse(uri string, req interface{}, res interface{}) (
|
|||
return resp, string(body), nil
|
||||
}
|
||||
|
||||
func backoffForRetry(ctx context.Context, d time.Duration) error {
|
||||
backoffTimer := time.NewTimer(d)
|
||||
if ctx != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-backoffTimer.C:
|
||||
}
|
||||
} else {
|
||||
<-backoffTimer.C
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempts to add |chain| to the log, using the api end-point specified by
|
||||
// |path|.
|
||||
func (c *LogClient) addChainWithRetry(path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
// |path|. If provided context expires before submission is complete an
|
||||
// error will be returned.
|
||||
func (c *LogClient) addChainWithRetry(ctx context.Context, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
var resp addChainResponse
|
||||
var req addChainRequest
|
||||
for _, link := range chain {
|
||||
req.Chain = append(req.Chain, base64.StdEncoding.EncodeToString(link))
|
||||
}
|
||||
done := false
|
||||
httpStatus := "Unknown"
|
||||
backoffSeconds := 0
|
||||
done := false
|
||||
for !done {
|
||||
backoffSeconds := 0
|
||||
if backoffSeconds > 0 {
|
||||
log.Printf("Got %s, backing-off %d seconds", httpStatus, backoffSeconds)
|
||||
}
|
||||
err := backoffForRetry(ctx, time.Second*time.Duration(backoffSeconds))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if backoffSeconds > 0 {
|
||||
backoffSeconds = 0
|
||||
}
|
||||
httpResp, errorBody, err := c.postAndParse(c.uri+path, &req, &resp)
|
||||
if err != nil {
|
||||
log.Printf("Got %s, backing off.", err)
|
||||
backoffSeconds = 10
|
||||
} else {
|
||||
switch {
|
||||
case httpResp.StatusCode == 200:
|
||||
done = true
|
||||
break
|
||||
case httpResp.StatusCode == 408:
|
||||
case httpResp.StatusCode == 503:
|
||||
// Retry
|
||||
backoffSeconds = 10
|
||||
if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" {
|
||||
if seconds, err := strconv.Atoi(retryAfter); err != nil {
|
||||
backoffSeconds = seconds
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Got HTTP Status %s: %s", httpResp.Status, errorBody)
|
||||
}
|
||||
httpStatus = httpResp.Status
|
||||
continue
|
||||
}
|
||||
// Now back-off before retrying
|
||||
log.Printf("Got %s, backing-off %d seconds.", httpStatus, backoffSeconds)
|
||||
time.Sleep(time.Duration(backoffSeconds) * time.Second)
|
||||
switch {
|
||||
case httpResp.StatusCode == 200:
|
||||
done = true
|
||||
case httpResp.StatusCode == 408:
|
||||
// request timeout, retry immediately
|
||||
case httpResp.StatusCode == 503:
|
||||
// Retry
|
||||
backoffSeconds = 10
|
||||
if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" {
|
||||
if seconds, err := strconv.Atoi(retryAfter); err == nil {
|
||||
backoffSeconds = seconds
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("got HTTP Status %s: %s", httpResp.Status, errorBody)
|
||||
}
|
||||
httpStatus = httpResp.Status
|
||||
}
|
||||
|
||||
rawLogID, err := base64.StdEncoding.DecodeString(resp.ID)
|
||||
|
|
@ -242,12 +264,18 @@ func (c *LogClient) addChainWithRetry(path string, chain []ct.ASN1Cert) (*ct.Sig
|
|||
|
||||
// AddChain adds the (DER represented) X509 |chain| to the log.
|
||||
func (c *LogClient) AddChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(AddChainPath, chain)
|
||||
return c.addChainWithRetry(nil, AddChainPath, chain)
|
||||
}
|
||||
|
||||
// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
|
||||
func (c *LogClient) AddPreChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(AddPreChainPath, chain)
|
||||
return c.addChainWithRetry(nil, AddPreChainPath, chain)
|
||||
}
|
||||
|
||||
// AddChainWithContext adds the (DER represented) X509 |chain| to the log and
|
||||
// fails if the provided context expires before the chain is submitted.
|
||||
func (c *LogClient) AddChainWithContext(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(ctx, AddChainPath, chain)
|
||||
}
|
||||
|
||||
// GetSTH retrieves the current STH from the log.
|
||||
|
|
@ -299,7 +327,7 @@ func (c *LogClient) GetEntries(start, end int64) ([]ct.LogEntry, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries := make([]ct.LogEntry, end-start+1, end-start+1)
|
||||
entries := make([]ct.LogEntry, len(resp.Entries))
|
||||
for index, entry := range resp.Entries {
|
||||
leafBytes, err := base64.StdEncoding.DecodeString(entry.LeafInput)
|
||||
leaf, err := ct.ReadMerkleTreeLeaf(bytes.NewBuffer(leafBytes))
|
||||
|
|
|
|||
143
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go
generated
vendored
143
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go
generated
vendored
|
|
@ -1,143 +0,0 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
var defaultNumPollinationsToReturn = flag.Int("default_num_pollinations_to_return", 10,
|
||||
"Number of randomly selected STH pollination entries to return for sth-pollination requests.")
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SignatureVerifierMap is a map of SignatureVerifier by LogID
|
||||
type SignatureVerifierMap map[ct.SHA256Hash]ct.SignatureVerifier
|
||||
|
||||
// Handler for the gossip HTTP requests.
|
||||
type Handler struct {
|
||||
storage *Storage
|
||||
verifiers SignatureVerifierMap
|
||||
clock clock
|
||||
}
|
||||
|
||||
func writeWrongMethodResponse(rw *http.ResponseWriter, allowed string) {
|
||||
(*rw).Header().Add("Allow", allowed)
|
||||
(*rw).WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
func writeErrorResponse(rw *http.ResponseWriter, status int, body string) {
|
||||
(*rw).WriteHeader(status)
|
||||
(*rw).Write([]byte(body))
|
||||
}
|
||||
|
||||
// HandleSCTFeedback handles requests POSTed to .../sct-feedback.
|
||||
// It attempts to store the provided SCT Feedback
|
||||
func (h *Handler) HandleSCTFeedback(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.Method != "POST" {
|
||||
writeWrongMethodResponse(&rw, "POST")
|
||||
return
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
var feedback SCTFeedback
|
||||
if err := decoder.Decode(&feedback); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid SCT Feedback received: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(alcutter): 5.1.1 Validate leaf chains up to a trusted root
|
||||
// TODO(alcutter): 5.1.1/2 Verify each SCT is valid and from a known log, discard those which aren't
|
||||
// TODO(alcutter): 5.1.1/3 Discard leaves for domains other than ours.
|
||||
if err := h.storage.AddSCTFeedback(feedback); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Unable to store feedback: %v", err))
|
||||
return
|
||||
}
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleSTHPollination handles requests POSTed to .../sth-pollination.
|
||||
// It attempts to store the provided pollination info, and returns a random set of
|
||||
// pollination data from the last 14 days (i.e. "fresh" by the definition of the gossip RFC.)
|
||||
func (h *Handler) HandleSTHPollination(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.Method != "POST" {
|
||||
writeWrongMethodResponse(&rw, "POST")
|
||||
return
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
var p STHPollination
|
||||
if err := decoder.Decode(&p); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid STH Pollination received: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
sthToKeep := make([]ct.SignedTreeHead, 0, len(p.STHs))
|
||||
for _, sth := range p.STHs {
|
||||
v, found := h.verifiers[sth.LogID]
|
||||
if !found {
|
||||
log.Printf("Pollination entry for unknown logID: %s", sth.LogID.Base64String())
|
||||
continue
|
||||
}
|
||||
if err := v.VerifySTHSignature(sth); err != nil {
|
||||
log.Printf("Failed to verify STH, dropping: %v", err)
|
||||
continue
|
||||
}
|
||||
sthToKeep = append(sthToKeep, sth)
|
||||
}
|
||||
p.STHs = sthToKeep
|
||||
|
||||
err := h.storage.AddSTHPollination(p)
|
||||
if err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't store pollination: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
freshTime := h.clock.Now().AddDate(0, 0, -14)
|
||||
rp, err := h.storage.GetRandomSTHPollination(freshTime, *defaultNumPollinationsToReturn)
|
||||
if err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't fetch pollination to return: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
json := json.NewEncoder(rw)
|
||||
if err := json.Encode(*rp); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't encode pollination to return: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Handler object, taking a pointer a Storage object to
|
||||
// use for storing and retrieving feedback and pollination data, and a
|
||||
// SignatureVerifierMap for verifying signatures from known logs.
|
||||
func NewHandler(s *Storage, v SignatureVerifierMap) Handler {
|
||||
return Handler{
|
||||
storage: s,
|
||||
verifiers: v,
|
||||
clock: realClock{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Handler object, taking a pointer a Storage object to
|
||||
// use for storing and retrieving feedback and pollination data, and a
|
||||
// SignatureVerifierMap for verifying signatures from known logs.
|
||||
func newHandlerWithClock(s *Storage, v SignatureVerifierMap, c clock) Handler {
|
||||
return Handler{
|
||||
storage: s,
|
||||
verifiers: v,
|
||||
clock: c,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip"
|
||||
)
|
||||
|
||||
var dbPath = flag.String("database", "/tmp/gossip.sq3", "Path to database.")
|
||||
var listenAddress = flag.String("listen", ":8080", "Listen address:port for HTTP server.")
|
||||
var logKeys = flag.String("log_public_keys", "", "Comma separated list of files containing trusted Logs' public keys in PEM format")
|
||||
|
||||
func createVerifiers() (*gossip.SignatureVerifierMap, error) {
|
||||
m := make(gossip.SignatureVerifierMap)
|
||||
if len(*logKeys) == 0 {
|
||||
return nil, errors.New("--log_public_keys is empty")
|
||||
}
|
||||
keys := strings.Split(*logKeys, ",")
|
||||
for _, k := range keys {
|
||||
pem, err := ioutil.ReadFile(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read specified PEM file %s: %v", k, err)
|
||||
}
|
||||
for len(pem) > 0 {
|
||||
key, id, rest, err := ct.PublicKeyFromPEM(pem)
|
||||
pem = rest
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key from PEM in file %s: %v", k, err)
|
||||
}
|
||||
sv, err := ct.NewSignatureVerifier(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create new SignatureVerifier: %v", err)
|
||||
}
|
||||
m[id] = *sv
|
||||
log.Printf("Loaded key for LogID %v", id)
|
||||
}
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
verifierMap, err := createVerifiers()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load log public keys: %v", err)
|
||||
}
|
||||
log.Print("Starting gossip server.")
|
||||
|
||||
storage := gossip.Storage{}
|
||||
if err := storage.Open(*dbPath); err != nil {
|
||||
log.Fatalf("Failed to open storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
handler := gossip.NewHandler(&storage, *verifierMap)
|
||||
serveMux := http.NewServeMux()
|
||||
serveMux.HandleFunc("/.well-known/ct/v1/sct-feedback", handler.HandleSCTFeedback)
|
||||
serveMux.HandleFunc("/.well-known/ct/v1/sth-pollination", handler.HandleSTHPollination)
|
||||
server := &http.Server{
|
||||
Addr: *listenAddress,
|
||||
Handler: serveMux,
|
||||
}
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Printf("Error serving: %v", err)
|
||||
}
|
||||
}
|
||||
377
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go
generated
vendored
377
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go
generated
vendored
|
|
@ -1,377 +0,0 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS sths (
|
||||
version INTEGER NOT NULL,
|
||||
tree_size INTEGER NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
root_hash BYTES NOT NULL,
|
||||
signature BYTES NOT NULL,
|
||||
log_id BYTES NOT NULL,
|
||||
PRIMARY KEY (version, tree_size, timestamp, root_hash, log_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scts (
|
||||
sct_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
sct BYTES NOT NULL UNIQUE
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS chains (
|
||||
chain_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
chain STRING NOT NULL UNIQUE
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sct_feedback (
|
||||
chain_id INTEGER NOT NULL REFERENCES chains(chain_id),
|
||||
sct_id INTEGER NOT NULL REFERENCES scts(sct_id),
|
||||
PRIMARY KEY (chain_id, sct_id)
|
||||
|
||||
);`
|
||||
|
||||
const insertChain = `INSERT INTO chains(chain) VALUES ($1);`
|
||||
const insertSCT = `INSERT INTO scts(sct) VALUES ($1);`
|
||||
const insertSCTFeedback = `INSERT INTO sct_feedback(chain_id, sct_id) VALUES ($1, $2);`
|
||||
const insertSTHPollination = `INSERT INTO sths(version, tree_size, timestamp, root_hash, signature, log_id) VALUES($1, $2, $3, $4, $5, $6);`
|
||||
|
||||
const selectChainID = `SELECT chain_id FROM chains WHERE chain = $1;`
|
||||
|
||||
// Selects at most $2 rows from the sths table whose timestamp is newer than $1.
|
||||
const selectRandomRecentPollination = `SELECT version, tree_size, timestamp, root_hash, signature, log_id FROM sths
|
||||
WHERE timestamp >= $1 ORDER BY random() LIMIT $2;`
|
||||
const selectSCTID = `SELECT sct_id FROM scts WHERE sct = $1;`
|
||||
|
||||
const selectNumSCTs = `SELECT COUNT(*) FROM scts;`
|
||||
const selectNumChains = `SELECT COUNT(*) FROM chains;`
|
||||
const selectNumFeedback = `SELECT COUNT(*) FROM sct_feedback;`
|
||||
const selectNumSTHs = `SELECT COUNT(*) FROM sths;`
|
||||
|
||||
const selectFeedback = `SELECT COUNT(*) FROM sct_feedback WHERE chain_id = $1 AND sct_id = $2;`
|
||||
const selectSTH = `SELECT COUNT(*) FROM sths WHERE version = $1 AND tree_size = $2 AND timestamp = $3 AND root_hash = $4 AND signature = $5 AND log_id = $6;`
|
||||
|
||||
// Storage provides an SQLite3-backed method for persisting gossip data
|
||||
type Storage struct {
|
||||
db *sql.DB
|
||||
dbPath string
|
||||
insertChain *sql.Stmt
|
||||
insertSCT *sql.Stmt
|
||||
insertSCTFeedback *sql.Stmt
|
||||
insertSTHPollination *sql.Stmt
|
||||
selectChainID *sql.Stmt
|
||||
selectRandomRecentPollination *sql.Stmt
|
||||
selectSCTID *sql.Stmt
|
||||
|
||||
selectNumChains *sql.Stmt
|
||||
selectNumFeedback *sql.Stmt
|
||||
selectNumSCTs *sql.Stmt
|
||||
selectNumSTHs *sql.Stmt
|
||||
|
||||
selectFeedback *sql.Stmt
|
||||
selectSTH *sql.Stmt
|
||||
}
|
||||
|
||||
type statementSQLPair struct {
|
||||
Statement **sql.Stmt
|
||||
SQL string
|
||||
}
|
||||
|
||||
func prepareStatement(db *sql.DB, s statementSQLPair) error {
|
||||
stmt, err := db.Prepare(s.SQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*(s.Statement) = stmt
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the underlying persistent data store.
|
||||
// Should be called before attempting to use any of the store or search methods.
|
||||
func (s *Storage) Open(dbPath string) error {
|
||||
var err error
|
||||
if s.db != nil {
|
||||
return errors.New("attempting to call Open() on an already Open()'d Storage")
|
||||
}
|
||||
if len(dbPath) == 0 {
|
||||
return errors.New("attempting to call Open() with an empty file name")
|
||||
}
|
||||
s.dbPath = dbPath
|
||||
s.db, err = sql.Open("sqlite3", s.dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.db.Exec(schema); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range []statementSQLPair{
|
||||
{&s.insertChain, insertChain},
|
||||
{&s.insertSCT, insertSCT},
|
||||
{&s.insertSCTFeedback, insertSCTFeedback},
|
||||
{&s.insertSTHPollination, insertSTHPollination},
|
||||
{&s.selectChainID, selectChainID},
|
||||
{&s.selectRandomRecentPollination, selectRandomRecentPollination},
|
||||
{&s.selectSCTID, selectSCTID},
|
||||
{&s.selectNumChains, selectNumChains},
|
||||
{&s.selectNumFeedback, selectNumFeedback},
|
||||
{&s.selectNumSCTs, selectNumSCTs},
|
||||
{&s.selectNumSTHs, selectNumSTHs},
|
||||
{&s.selectFeedback, selectFeedback},
|
||||
{&s.selectSTH, selectSTH}} {
|
||||
if err := prepareStatement(s.db, p); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying DB storage.
|
||||
func (s *Storage) Close() error {
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
func selectThingID(getID *sql.Stmt, thing interface{}) (int64, error) {
|
||||
rows, err := getID.Query(thing)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if !rows.Next() {
|
||||
return -1, fmt.Errorf("couldn't look up ID for %v", thing)
|
||||
}
|
||||
var id int64
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// insertThingOrSelectID will attempt to execute the insert Statement (under transaction tx), if that fails due to
|
||||
// a unique primary key constraint, it will look up that primary key by executing the getID Statement.
|
||||
// Returns the ID associated with persistent thing, or an error describing the failure.
|
||||
func insertThingOrSelectID(tx *sql.Tx, insert *sql.Stmt, getID *sql.Stmt, thing interface{}) (int64, error) {
|
||||
txInsert := tx.Stmt(insert)
|
||||
txGetID := tx.Stmt(getID)
|
||||
r, err := txInsert.Exec(thing)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sqlite3.Error:
|
||||
if e.Code == sqlite3.ErrConstraint {
|
||||
return selectThingID(txGetID, thing)
|
||||
}
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
id, err := r.LastInsertId()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *Storage) addChainIfNotExists(tx *sql.Tx, chain []string) (int64, error) {
|
||||
flatChain := strings.Join(chain, "")
|
||||
return insertThingOrSelectID(tx, s.insertChain, s.selectChainID, flatChain)
|
||||
}
|
||||
|
||||
func (s *Storage) addSCTIfNotExists(tx *sql.Tx, sct string) (int64, error) {
|
||||
return insertThingOrSelectID(tx, s.insertSCT, s.selectSCTID, sct)
|
||||
}
|
||||
|
||||
func (s *Storage) addSCTFeedbackIfNotExists(tx *sql.Tx, chainID, sctID int64) error {
|
||||
stmt := tx.Stmt(s.insertSCTFeedback)
|
||||
_, err := stmt.Exec(chainID, sctID)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case sqlite3.Error:
|
||||
// If this is a dupe that's fine, no need to return an error
|
||||
if err.(sqlite3.Error).Code != sqlite3.ErrConstraint {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSCTFeedback stores the passed in feedback object.
|
||||
func (s *Storage) AddSCTFeedback(feedback SCTFeedback) (err error) {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we return a non-nil error, then rollback the transaction.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
for _, f := range feedback.Feedback {
|
||||
chainID, err := s.addChainIfNotExists(tx, f.X509Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sct := range f.SCTData {
|
||||
sctID, err := s.addSCTIfNotExists(tx, sct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s.addSCTFeedbackIfNotExists(tx, chainID, sctID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) addSTHIfNotExists(tx *sql.Tx, sth ct.SignedTreeHead) error {
|
||||
stmt := tx.Stmt(s.insertSTHPollination)
|
||||
sigB64, err := sth.TreeHeadSignature.Base64String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to base64 sth signature: %v", err)
|
||||
}
|
||||
_, err = stmt.Exec(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String())
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case sqlite3.Error:
|
||||
// If this is a dupe that's fine, no need to return an error
|
||||
if err.(sqlite3.Error).Code != sqlite3.ErrConstraint {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRandomSTHPollination returns a random selection of "fresh" (i.e. at most 14 days old) STHs from the pool.
|
||||
func (s *Storage) GetRandomSTHPollination(newerThan time.Time, limit int) (*STHPollination, error) {
|
||||
// Occasionally this fails to select the pollen which was added by the
|
||||
// AddSTHPollination request which went on trigger this query, even though
|
||||
// the transaction committed successfully. Attempting this query under a
|
||||
// transaction doesn't fix it. /sadface
|
||||
// Still, that shouldn't really matter too much in practice.
|
||||
r, err := s.selectRandomRecentPollination.Query(newerThan.Unix()*1000, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pollination STHPollination
|
||||
for r.Next() {
|
||||
var entry ct.SignedTreeHead
|
||||
var rootB64, sigB64, idB64 string
|
||||
if err := r.Scan(&entry.Version, &entry.TreeSize, &entry.Timestamp, &rootB64, &sigB64, &idB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.SHA256RootHash.FromBase64String(rootB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.TreeHeadSignature.FromBase64String(sigB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.LogID.FromBase64String(idB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pollination.STHs = append(pollination.STHs, entry)
|
||||
}
|
||||
// If there are no entries to return, wedge an empty array in there so that the json encoder returns something valid.
|
||||
if pollination.STHs == nil {
|
||||
pollination.STHs = make([]ct.SignedTreeHead, 0)
|
||||
}
|
||||
return &pollination, nil
|
||||
}
|
||||
|
||||
// AddSTHPollination stores the passed in pollination object.
|
||||
func (s *Storage) AddSTHPollination(pollination STHPollination) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we return a non-nil error, then rollback the transaction.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
for _, sth := range pollination.STHs {
|
||||
if err := s.addSTHIfNotExists(tx, sth); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) getSCTID(sct string) (int64, error) {
|
||||
return selectThingID(s.selectSCTID, sct)
|
||||
}
|
||||
|
||||
func (s *Storage) getChainID(chain []string) (int64, error) {
|
||||
flatChain := strings.Join(chain, "")
|
||||
return selectThingID(s.selectChainID, flatChain)
|
||||
}
|
||||
|
||||
func getNumThings(getCount *sql.Stmt) (int64, error) {
|
||||
r, err := getCount.Query()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if !r.Next() {
|
||||
return -1, fmt.Errorf("Empty scan returned while querying %v", getCount)
|
||||
}
|
||||
var count int64
|
||||
if err := r.Scan(&count); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s *Storage) getNumChains() (int64, error) {
|
||||
return getNumThings(s.selectNumChains)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumFeedback() (int64, error) {
|
||||
return getNumThings(s.selectNumFeedback)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumSCTs() (int64, error) {
|
||||
return getNumThings(s.selectNumSCTs)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumSTHs() (int64, error) {
|
||||
return getNumThings(s.selectNumSTHs)
|
||||
}
|
||||
|
||||
func (s *Storage) hasFeedback(sctID, chainID int64) bool {
|
||||
r, err := s.selectFeedback.Query(sctID, chainID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return r.Next()
|
||||
}
|
||||
|
||||
func (s *Storage) hasSTH(sth ct.SignedTreeHead) bool {
|
||||
sigB64, err := sth.TreeHeadSignature.Base64String()
|
||||
if err != nil {
|
||||
log.Printf("%v", err)
|
||||
return false
|
||||
}
|
||||
r, err := s.selectSTH.Query(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return r.Next()
|
||||
}
|
||||
30
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go
generated
vendored
30
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go
generated
vendored
|
|
@ -1,30 +0,0 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
// STHVersion reflects the STH Version field in RFC6862[-bis]
|
||||
type STHVersion int
|
||||
|
||||
// STHVersion constants
|
||||
const (
|
||||
STHVersion0 = 0
|
||||
STHVersion1 = 1
|
||||
)
|
||||
|
||||
// SCTFeedbackEntry represents a single piece of SCT feedback.
|
||||
type SCTFeedbackEntry struct {
|
||||
X509Chain []string `json:"x509_chain"`
|
||||
SCTData []string `json:"sct_data"`
|
||||
}
|
||||
|
||||
// SCTFeedback represents a collection of SCTFeedback which a client might send together.
|
||||
type SCTFeedback struct {
|
||||
Feedback []SCTFeedbackEntry `json:"sct_feedback"`
|
||||
}
|
||||
|
||||
// STHPollination represents a collection of STH pollination entries which a client might send together.
|
||||
type STHPollination struct {
|
||||
STHs []ct.SignedTreeHead `json:"sths"`
|
||||
}
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
package merkletree
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcrypto
|
||||
#cgo CPPFLAGS: -I../../cpp
|
||||
#cgo CXXFLAGS: -std=c++11
|
||||
#include "merkle_tree_go.h"
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CPPMerkleTree provides an interface to the C++ CT MerkleTree library.
|
||||
// See the go/README file for details on how to build this.
|
||||
type CPPMerkleTree struct {
|
||||
FullMerkleTreeInterface
|
||||
|
||||
// The C++ MerkleTree handle
|
||||
peer C.TREE
|
||||
|
||||
// nodeSize contains the size in bytes of the nodes in the MerkleTree
|
||||
// referenced by |peer|.
|
||||
nodeSize C.size_t
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LeafCount() uint64 {
|
||||
return uint64(C.LeafCount(m.peer))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LevelCount() uint64 {
|
||||
return uint64(C.LevelCount(m.peer))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) AddLeaf(leaf []byte) uint64 {
|
||||
return uint64(C.AddLeaf(m.peer, C.BYTE_SLICE(&leaf)))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) AddLeafHash(hash []byte) uint64 {
|
||||
return uint64(C.AddLeafHash(m.peer, C.BYTE_SLICE(&hash)))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LeafHash(leaf uint64) ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.LeafHash(m.peer, C.BYTE_SLICE(&hash), C.size_t(leaf))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get leafhash of leaf %d", leaf)
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) CurrentRoot() ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.CurrentRoot(m.peer, C.BYTE_SLICE(&hash))
|
||||
if !success {
|
||||
return nil, errors.New("failed to get current root")
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) RootAtSnapshot(snapshot uint64) ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.RootAtSnapshot(m.peer, C.BYTE_SLICE(&hash), C.size_t(snapshot))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get root at snapshot %d", snapshot)
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func splitSlice(slice []byte, chunkSize int) ([][]byte, error) {
|
||||
if len(slice)%chunkSize != 0 {
|
||||
return nil, fmt.Errorf("slice len %d is not a multiple of chunkSize %d", len(slice), chunkSize)
|
||||
}
|
||||
numEntries := len(slice) / chunkSize
|
||||
ret := make([][]byte, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * chunkSize
|
||||
end := start + chunkSize
|
||||
ret[i] = slice[start:end]
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) PathToCurrentRoot(leaf uint64) ([][]byte, error) {
|
||||
var numEntries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.PathToCurrentRoot(m.peer, C.BYTE_SLICE(&entryBuffer), &numEntries, C.size_t(leaf))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to current root from leaf %d", leaf)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) PathToRootAtSnapshot(leaf, snapshot uint64) ([][]byte, error) {
|
||||
var num_entries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.PathToRootAtSnapshot(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(leaf), C.size_t(snapshot))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to root at snapshot %d from leaf %d", snapshot, leaf)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) SnapshotConsistency(snapshot1, snapshot2 uint64) ([][]byte, error) {
|
||||
var num_entries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.SnapshotConsistency(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(snapshot1), C.size_t(snapshot2))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to snapshot consistency from %d to %d", snapshot1, snapshot2)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
// NewCPPMerkleTree returns a new wrapped C++ MerkleTree, using the
|
||||
// Sha256Hasher.
|
||||
// It is the caller's responsibility to call DeletePeer() when finished with
|
||||
// the tree to deallocate its resources.
|
||||
func NewCPPMerkleTree() *CPPMerkleTree {
|
||||
m := &CPPMerkleTree{
|
||||
peer: C.NewMerkleTree(C.NewSha256Hasher()),
|
||||
}
|
||||
m.nodeSize = C.size_t(C.NodeSize(m.peer))
|
||||
return m
|
||||
}
|
||||
|
||||
// DeletePeer deallocates the memory used by the C++ MerkleTree peer.
|
||||
func (m *CPPMerkleTree) DeletePeer() {
|
||||
C.DeleteMerkleTree(m.peer)
|
||||
m.peer = nil
|
||||
}
|
||||
|
|
@ -1,154 +0,0 @@
|
|||
#include "merkletree/merkle_tree.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#include "_cgo_export.h"
|
||||
#include "merkle_tree_go.h"
|
||||
|
||||
extern "C" {
|
||||
// Some hollow functions to cast the void* types into what they really
|
||||
// are, they're only really here to provide a little bit of type
|
||||
// safety. Hopefully these should all be optimized away into oblivion
|
||||
// by the compiler.
|
||||
static inline MerkleTree* MT(TREE tree) {
|
||||
assert(tree);
|
||||
return static_cast<MerkleTree*>(tree);
|
||||
}
|
||||
static inline Sha256Hasher* H(HASHER hasher) {
|
||||
assert(hasher);
|
||||
return static_cast<Sha256Hasher*>(hasher);
|
||||
}
|
||||
static inline GoSlice* BS(BYTE_SLICE slice) {
|
||||
assert(slice);
|
||||
return static_cast<GoSlice*>(slice);
|
||||
}
|
||||
|
||||
HASHER NewSha256Hasher() {
|
||||
return new Sha256Hasher;
|
||||
}
|
||||
|
||||
TREE NewMerkleTree(HASHER hasher) {
|
||||
return new MerkleTree(H(hasher));
|
||||
}
|
||||
|
||||
void DeleteMerkleTree(TREE tree) {
|
||||
delete MT(tree);
|
||||
}
|
||||
|
||||
size_t NodeSize(TREE tree) {
|
||||
return MT(tree)->NodeSize();
|
||||
}
|
||||
|
||||
size_t LeafCount(TREE tree) {
|
||||
return MT(tree)->LeafCount();
|
||||
}
|
||||
|
||||
bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf) {
|
||||
GoSlice* slice(BS(out));
|
||||
const MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->cap < nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->LeafHash(leaf);
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t LevelCount(TREE tree) {
|
||||
const MerkleTree* t(MT(tree));
|
||||
return t->LevelCount();
|
||||
}
|
||||
|
||||
size_t AddLeaf(TREE tree, BYTE_SLICE leaf) {
|
||||
GoSlice* slice(BS(leaf));
|
||||
MerkleTree* t(MT(tree));
|
||||
return t->AddLeaf(std::string(static_cast<char*>(slice->data), slice->len));
|
||||
}
|
||||
|
||||
size_t AddLeafHash(TREE tree, BYTE_SLICE hash) {
|
||||
GoSlice* slice(BS(hash));
|
||||
MerkleTree* t(MT(tree));
|
||||
return t->AddLeafHash(
|
||||
std::string(static_cast<char*>(slice->data), slice->len));
|
||||
}
|
||||
|
||||
bool CurrentRoot(TREE tree, BYTE_SLICE out) {
|
||||
GoSlice* slice(BS(out));
|
||||
MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->len != nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->CurrentRoot();
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot) {
|
||||
GoSlice* slice(BS(out));
|
||||
MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->len != nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->RootAtSnapshot(snapshot);
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Copies the fixed-length entries from |path| into the GoSlice
|
||||
// pointed to by |dst|, one after the other in the same order.
|
||||
// |num_copied| is set to the number of entries copied.
|
||||
bool CopyNodesToSlice(const std::vector<std::string>& path, GoSlice* dst,
|
||||
size_t nodesize, size_t* num_copied) {
|
||||
assert(dst);
|
||||
assert(num_copied);
|
||||
if (dst->cap < path.size() * nodesize) {
|
||||
*num_copied = 0;
|
||||
return false;
|
||||
}
|
||||
char* e = static_cast<char*>(dst->data);
|
||||
for (int i = 0; i < path.size(); ++i) {
|
||||
assert(nodesize == path[i].size());
|
||||
memcpy(e, path[i].data(), nodesize);
|
||||
e += nodesize;
|
||||
}
|
||||
dst->len = path.size() * nodesize;
|
||||
*num_copied = path.size();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path = t->PathToCurrentRoot(leaf);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf, size_t snapshot) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path =
|
||||
t->PathToRootAtSnapshot(leaf, snapshot);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t snapshot1, size_t snapshot2) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path =
|
||||
t->SnapshotConsistency(snapshot1, snapshot2);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifndef GO_MERKLETREE_MERKLE_TREE_H_
|
||||
#define GO_MERKLETREE_MERKLE_TREE_H_
|
||||
|
||||
// These types & functions provide a trampoline to call the C++ MerkleTree
|
||||
// implementation from within Go code.
|
||||
//
|
||||
// Generally we try to jump through hoops to not allocate memory from the C++
|
||||
// side, but rather have Go allocate it inside its GC memory such that we don't
|
||||
// have to worry about leaks. Apart from the obvious benefit of doing it this
|
||||
// way, it usually also means one less memcpy() too which is nice.
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// The _cgo_export.h file doesn't appear to exist when this header is pulled in
|
||||
// to the .go file, because of this we can't use types like GoSlice here and so
|
||||
// we end up with void* everywhere; we'll at least typedef them so that the
|
||||
// source is a _little_ more readable.
|
||||
// Grumble grumble.
|
||||
typedef void* HASHER;
|
||||
typedef void* TREE;
|
||||
typedef void* BYTE_SLICE;
|
||||
|
||||
// Allocators & deallocators:
|
||||
|
||||
// Creates a new Sha256Hasher
|
||||
HASHER NewSha256Hasher();
|
||||
|
||||
// Creates a new MerkleTree passing in |hasher|.
|
||||
// The MerkleTree takes ownership of |hasher|.
|
||||
TREE NewMerkleTree(HASHER hasher);
|
||||
|
||||
// Deletes the passed in |tree|.
|
||||
void DeleteMerkleTree(TREE tree);
|
||||
|
||||
// MerkleTree methods below.
|
||||
// See the comments in ../../merkletree/merkle_tree.h for details
|
||||
|
||||
size_t NodeSize(TREE tree);
|
||||
size_t LeafCount(TREE tree);
|
||||
bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf);
|
||||
size_t LevelCount(TREE tree);
|
||||
size_t AddLeaf(TREE tree, BYTE_SLICE leaf);
|
||||
size_t AddLeafHash(TREE tree, BYTE_SLICE hash);
|
||||
bool CurrentRoot(TREE tree, BYTE_SLICE out);
|
||||
bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf, size_t snapshot);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t snapshot1, size_t snapshot2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // GO_MERKLETREE_MERKLE_TREE_H_
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
package merkletree
|
||||
|
||||
// MerkleTreeInterface represents the common interface for basic MerkleTree functions.
|
||||
type MerkleTreeInterface interface {
|
||||
// LeafCount returns the number of leaves in the tree
|
||||
LeafCount() uint64
|
||||
|
||||
// LevelCount returns the number of levels in the tree
|
||||
LevelCount() uint64
|
||||
|
||||
// AddLeaf adds the hash of |leaf| to the tree and returns the newly added
|
||||
// leaf index
|
||||
AddLeaf(leaf []byte) uint64
|
||||
|
||||
// LeafHash returns the hash of the leaf at index |leaf| or a non-nil error.
|
||||
LeafHash(leaf uint64) ([]byte, error)
|
||||
|
||||
// CurrentRoot returns the current root hash of the merkle tree.
|
||||
CurrentRoot() ([]byte, error)
|
||||
}
|
||||
|
||||
// FullMerkleTreeInterface extends MerkleTreeInterface to the full range of
|
||||
// operations that only a non-compact tree representation can implement.
|
||||
type FullMerkleTreeInterface interface {
|
||||
MerkleTreeInterface
|
||||
|
||||
// RootAtSnapshot returns the root hash at the tree size |snapshot|
|
||||
// which must be <= than the current tree size.
|
||||
RootAtSnapshot(snapshot uint64) ([]byte, error)
|
||||
|
||||
// PathToCurrentRoot returns the Merkle path (or inclusion proof) from the
|
||||
// leaf hash at index |leaf| to the current root.
|
||||
PathToCurrentRoot(leaf uint64) ([]byte, error)
|
||||
|
||||
// SnapshotConsistency returns a consistency proof between the two tree
|
||||
// sizes specified in |snapshot1| and |snapshot2|.
|
||||
SnapshotConsistency(snapshot1, snapshot2 uint64) ([]byte, error)
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
#include "merkletree/merkle_tree.cc"
|
||||
|
|
@ -1 +0,0 @@
|
|||
#include "merkletree/merkle_tree_math.cc"
|
||||
|
|
@ -1 +0,0 @@
|
|||
#include "merkletree/serial_hasher.cc"
|
||||
|
|
@ -1 +0,0 @@
|
|||
#include "merkletree/tree_hasher.cc"
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload"
|
||||
)
|
||||
|
||||
var sctFile = flag.String("sct_file", "", "File to load SCTs & leaf data from")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var sctReader io.ReadCloser
|
||||
if *sctFile == "" {
|
||||
log.Fatal("Must specify --sct_file")
|
||||
}
|
||||
|
||||
sctFileReader, err := os.Open(*sctFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sctReader, err = zlib.NewReader(sctFileReader)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := sctReader.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatalf("Error closing file: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO(alcutter) should probably store this stuff in a protobuf really.
|
||||
decoder := gob.NewDecoder(sctReader)
|
||||
var addedCert preload.AddedCert
|
||||
numAdded := 0
|
||||
numFailed := 0
|
||||
for {
|
||||
err = decoder.Decode(&addedCert)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if addedCert.AddedOk {
|
||||
log.Println(addedCert.SignedCertificateTimestamp)
|
||||
numAdded++
|
||||
} else {
|
||||
log.Printf("Cert was not added: %s", addedCert.ErrorMessage)
|
||||
numFailed++
|
||||
}
|
||||
}
|
||||
log.Printf("Num certs added: %d, num failed: %d\n", numAdded, numFailed)
|
||||
}
|
||||
197
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go
generated
vendored
197
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go
generated
vendored
|
|
@ -1,197 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
// A regex which cannot match any input
|
||||
MatchesNothingRegex = "a^"
|
||||
)
|
||||
|
||||
var sourceLogUri = flag.String("source_log_uri", "http://ct.googleapis.com/aviator", "CT log base URI to fetch entries from")
|
||||
var targetLogUri = flag.String("target_log_uri", "http://example.com/ct", "CT log base URI to add entries to")
|
||||
var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries")
|
||||
var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers")
|
||||
var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches")
|
||||
var parallelSubmit = flag.Int("parallel_submit", 2, "Number of concurrent add-[pre]-chain requests")
|
||||
var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at")
|
||||
var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.")
|
||||
var sctInputFile = flag.String("sct_file", "", "File to save SCTs & leaf data to")
|
||||
var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts")
|
||||
|
||||
func createMatcher() (scanner.Matcher, error) {
|
||||
// Make a "match everything" regex matcher
|
||||
precertRegex := regexp.MustCompile(".*")
|
||||
var certRegex *regexp.Regexp
|
||||
if *precertsOnly {
|
||||
certRegex = regexp.MustCompile(MatchesNothingRegex)
|
||||
} else {
|
||||
certRegex = precertRegex
|
||||
}
|
||||
return scanner.MatchSubjectRegex{
|
||||
CertificateSubjectRegex: certRegex,
|
||||
PrecertificateSubjectRegex: precertRegex}, nil
|
||||
}
|
||||
|
||||
func recordSct(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, sct *ct.SignedCertificateTimestamp) {
|
||||
addedCert := preload.AddedCert{
|
||||
CertDER: certDer,
|
||||
SignedCertificateTimestamp: *sct,
|
||||
AddedOk: true,
|
||||
}
|
||||
addedCerts <- &addedCert
|
||||
}
|
||||
|
||||
func recordFailure(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, addError error) {
|
||||
addedCert := preload.AddedCert{
|
||||
CertDER: certDer,
|
||||
AddedOk: false,
|
||||
ErrorMessage: addError.Error(),
|
||||
}
|
||||
addedCerts <- &addedCert
|
||||
}
|
||||
|
||||
func sctWriterJob(addedCerts <-chan *preload.AddedCert, sctWriter io.Writer, wg *sync.WaitGroup) {
|
||||
encoder := gob.NewEncoder(sctWriter)
|
||||
|
||||
numAdded := 0
|
||||
numFailed := 0
|
||||
|
||||
for c := range addedCerts {
|
||||
if c.AddedOk {
|
||||
numAdded++
|
||||
} else {
|
||||
numFailed++
|
||||
}
|
||||
if encoder != nil {
|
||||
err := encoder.Encode(c)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to encode to %s: %v", *sctInputFile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("Added %d certs, %d failed, total: %d\n", numAdded, numFailed, numAdded+numFailed)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func certSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient, certs <-chan *ct.LogEntry,
|
||||
wg *sync.WaitGroup) {
|
||||
for c := range certs {
|
||||
chain := make([]ct.ASN1Cert, len(c.Chain)+1)
|
||||
chain[0] = c.X509Cert.Raw
|
||||
copy(chain[1:], c.Chain)
|
||||
sct, err := log_client.AddChain(chain)
|
||||
if err != nil {
|
||||
log.Printf("failed to add chain with CN %s: %v\n", c.X509Cert.Subject.CommonName, err)
|
||||
recordFailure(addedCerts, chain[0], err)
|
||||
continue
|
||||
}
|
||||
recordSct(addedCerts, chain[0], sct)
|
||||
if !*quiet {
|
||||
log.Printf("Added chain for CN '%s', SCT: %s\n", c.X509Cert.Subject.CommonName, sct)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func precertSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient,
|
||||
precerts <-chan *ct.LogEntry,
|
||||
wg *sync.WaitGroup) {
|
||||
for c := range precerts {
|
||||
sct, err := log_client.AddPreChain(c.Chain)
|
||||
if err != nil {
|
||||
log.Printf("failed to add pre-chain with CN %s: %v", c.Precert.TBSCertificate.Subject.CommonName, err)
|
||||
recordFailure(addedCerts, c.Chain[0], err)
|
||||
continue
|
||||
}
|
||||
recordSct(addedCerts, c.Chain[0], sct)
|
||||
if !*quiet {
|
||||
log.Printf("Added precert chain for CN '%s', SCT: %s\n", c.Precert.TBSCertificate.Subject.CommonName, sct)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var sctFileWriter io.Writer
|
||||
var err error
|
||||
if *sctInputFile != "" {
|
||||
sctFileWriter, err = os.Create(*sctInputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
sctFileWriter = ioutil.Discard
|
||||
}
|
||||
|
||||
sctWriter := zlib.NewWriter(sctFileWriter)
|
||||
defer func() {
|
||||
err := sctWriter.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
fetchLogClient := client.New(*sourceLogUri)
|
||||
matcher, err := createMatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := scanner.ScannerOptions{
|
||||
Matcher: matcher,
|
||||
BatchSize: *batchSize,
|
||||
NumWorkers: *numWorkers,
|
||||
ParallelFetch: *parallelFetch,
|
||||
StartIndex: *startIndex,
|
||||
Quiet: *quiet,
|
||||
}
|
||||
scanner := scanner.NewScanner(fetchLogClient, opts)
|
||||
|
||||
certs := make(chan *ct.LogEntry, *batchSize**parallelFetch)
|
||||
precerts := make(chan *ct.LogEntry, *batchSize**parallelFetch)
|
||||
addedCerts := make(chan *preload.AddedCert, *batchSize**parallelFetch)
|
||||
|
||||
var sctWriterWG sync.WaitGroup
|
||||
sctWriterWG.Add(1)
|
||||
go sctWriterJob(addedCerts, sctWriter, &sctWriterWG)
|
||||
|
||||
submitLogClient := client.New(*targetLogUri)
|
||||
|
||||
var submitterWG sync.WaitGroup
|
||||
for w := 0; w < *parallelSubmit; w++ {
|
||||
submitterWG.Add(2)
|
||||
go certSubmitterJob(addedCerts, submitLogClient, certs, &submitterWG)
|
||||
go precertSubmitterJob(addedCerts, submitLogClient, precerts, &submitterWG)
|
||||
}
|
||||
|
||||
addChainFunc := func(entry *ct.LogEntry) {
|
||||
certs <- entry
|
||||
}
|
||||
addPreChainFunc := func(entry *ct.LogEntry) {
|
||||
precerts <- entry
|
||||
}
|
||||
|
||||
scanner.Scan(addChainFunc, addPreChainFunc)
|
||||
|
||||
close(certs)
|
||||
close(precerts)
|
||||
submitterWG.Wait()
|
||||
close(addedCerts)
|
||||
sctWriterWG.Wait()
|
||||
}
|
||||
12
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go
generated
vendored
12
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go
generated
vendored
|
|
@ -1,12 +0,0 @@
|
|||
package preload
|
||||
|
||||
import (
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
type AddedCert struct {
|
||||
CertDER ct.ASN1Cert
|
||||
SignedCertificateTimestamp ct.SignedCertificateTimestamp
|
||||
AddedOk bool
|
||||
ErrorMessage string
|
||||
}
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"regexp"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
// A regex which cannot match any input
|
||||
MatchesNothingRegex = "a^"
|
||||
)
|
||||
|
||||
var logUri = flag.String("log_uri", "http://ct.googleapis.com/aviator", "CT log base URI")
|
||||
var matchSubjectRegex = flag.String("match_subject_regex", ".*", "Regex to match CN/SAN")
|
||||
var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts")
|
||||
var serialNumber = flag.String("serial_number", "", "Serial number of certificate of interest")
|
||||
var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries")
|
||||
var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers")
|
||||
var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches")
|
||||
var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at")
|
||||
var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.")
|
||||
|
||||
// Prints out a short bit of info about |cert|, found at |index| in the
|
||||
// specified log
|
||||
func logCertInfo(entry *ct.LogEntry) {
|
||||
log.Printf("Interesting cert at index %d: CN: '%s'", entry.Index, entry.X509Cert.Subject.CommonName)
|
||||
}
|
||||
|
||||
// Prints out a short bit of info about |precert|, found at |index| in the
|
||||
// specified log
|
||||
func logPrecertInfo(entry *ct.LogEntry) {
|
||||
log.Printf("Interesting precert at index %d: CN: '%s' Issuer: %s", entry.Index,
|
||||
entry.Precert.TBSCertificate.Subject.CommonName, entry.Precert.TBSCertificate.Issuer.CommonName)
|
||||
}
|
||||
|
||||
func createMatcherFromFlags() (scanner.Matcher, error) {
|
||||
if *serialNumber != "" {
|
||||
log.Printf("Using SerialNumber matcher on %s", *serialNumber)
|
||||
var sn big.Int
|
||||
_, success := sn.SetString(*serialNumber, 0)
|
||||
if !success {
|
||||
return nil, fmt.Errorf("Invalid serialNumber %s", *serialNumber)
|
||||
}
|
||||
return scanner.MatchSerialNumber{SerialNumber: sn}, nil
|
||||
} else {
|
||||
// Make a regex matcher
|
||||
var certRegex *regexp.Regexp
|
||||
precertRegex := regexp.MustCompile(*matchSubjectRegex)
|
||||
switch *precertsOnly {
|
||||
case true:
|
||||
certRegex = regexp.MustCompile(MatchesNothingRegex)
|
||||
case false:
|
||||
certRegex = precertRegex
|
||||
}
|
||||
return scanner.MatchSubjectRegex{
|
||||
CertificateSubjectRegex: certRegex,
|
||||
PrecertificateSubjectRegex: precertRegex}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
logClient := client.New(*logUri)
|
||||
matcher, err := createMatcherFromFlags()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := scanner.ScannerOptions{
|
||||
Matcher: matcher,
|
||||
BatchSize: *batchSize,
|
||||
NumWorkers: *numWorkers,
|
||||
ParallelFetch: *parallelFetch,
|
||||
StartIndex: *startIndex,
|
||||
Quiet: *quiet,
|
||||
}
|
||||
scanner := scanner.NewScanner(logClient, opts)
|
||||
scanner.Scan(logCertInfo, logPrecertInfo)
|
||||
}
|
||||
399
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go
generated
vendored
399
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go
generated
vendored
|
|
@ -1,399 +0,0 @@
|
|||
package scanner
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509"
|
||||
)
|
||||
|
||||
// Clients wishing to implement their own Matchers should implement this interface:
|
||||
type Matcher interface {
|
||||
// CertificateMatches is called by the scanner for each X509 Certificate found in the log.
|
||||
// The implementation should return |true| if the passed Certificate is interesting, and |false| otherwise.
|
||||
CertificateMatches(*x509.Certificate) bool
|
||||
|
||||
// PrecertificateMatches is called by the scanner for each CT Precertificate found in the log.
|
||||
// The implementation should return |true| if the passed Precertificate is interesting, and |false| otherwise.
|
||||
PrecertificateMatches(*ct.Precertificate) bool
|
||||
}
|
||||
|
||||
// MatchAll is a Matcher which will match every possible Certificate and Precertificate.
|
||||
type MatchAll struct{}
|
||||
|
||||
func (m MatchAll) CertificateMatches(_ *x509.Certificate) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m MatchAll) PrecertificateMatches(_ *ct.Precertificate) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MatchNone is a Matcher which will never match any Certificate or Precertificate.
|
||||
type MatchNone struct{}
|
||||
|
||||
func (m MatchNone) CertificateMatches(_ *x509.Certificate) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m MatchNone) PrecertificateMatches(_ *ct.Precertificate) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type MatchSerialNumber struct {
|
||||
SerialNumber big.Int
|
||||
}
|
||||
|
||||
func (m MatchSerialNumber) CertificateMatches(c *x509.Certificate) bool {
|
||||
return c.SerialNumber.String() == m.SerialNumber.String()
|
||||
}
|
||||
|
||||
func (m MatchSerialNumber) PrecertificateMatches(p *ct.Precertificate) bool {
|
||||
return p.TBSCertificate.SerialNumber.String() == m.SerialNumber.String()
|
||||
}
|
||||
|
||||
// MatchSubjectRegex is a Matcher which will use |CertificateSubjectRegex| and |PrecertificateSubjectRegex|
|
||||
// to determine whether Certificates and Precertificates are interesting.
|
||||
// The two regexes are tested against Subject Common Name as well as all
|
||||
// Subject Alternative Names
|
||||
type MatchSubjectRegex struct {
|
||||
CertificateSubjectRegex *regexp.Regexp
|
||||
PrecertificateSubjectRegex *regexp.Regexp
|
||||
}
|
||||
|
||||
// Returns true if either CN or any SAN of |c| matches |CertificateSubjectRegex|.
|
||||
func (m MatchSubjectRegex) CertificateMatches(c *x509.Certificate) bool {
|
||||
if m.CertificateSubjectRegex.FindStringIndex(c.Subject.CommonName) != nil {
|
||||
return true
|
||||
}
|
||||
for _, alt := range c.DNSNames {
|
||||
if m.CertificateSubjectRegex.FindStringIndex(alt) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if either CN or any SAN of |p| matches |PrecertificatesubjectRegex|.
|
||||
func (m MatchSubjectRegex) PrecertificateMatches(p *ct.Precertificate) bool {
|
||||
if m.PrecertificateSubjectRegex.FindStringIndex(p.TBSCertificate.Subject.CommonName) != nil {
|
||||
return true
|
||||
}
|
||||
for _, alt := range p.TBSCertificate.DNSNames {
|
||||
if m.PrecertificateSubjectRegex.FindStringIndex(alt) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ScannerOptions holds configuration options for the Scanner
|
||||
type ScannerOptions struct {
|
||||
// Custom matcher for x509 Certificates, functor will be called for each
|
||||
// Certificate found during scanning.
|
||||
Matcher Matcher
|
||||
|
||||
// Match precerts only (Matcher still applies to precerts)
|
||||
PrecertOnly bool
|
||||
|
||||
// Number of entries to request in one batch from the Log
|
||||
BatchSize int
|
||||
|
||||
// Number of concurrent matchers to run
|
||||
NumWorkers int
|
||||
|
||||
// Number of concurrent fethers to run
|
||||
ParallelFetch int
|
||||
|
||||
// Log entry index to start fetching & matching at
|
||||
StartIndex int64
|
||||
|
||||
// Don't print any status messages to stdout
|
||||
Quiet bool
|
||||
}
|
||||
|
||||
// Creates a new ScannerOptions struct with sensible defaults
|
||||
func DefaultScannerOptions() *ScannerOptions {
|
||||
return &ScannerOptions{
|
||||
Matcher: &MatchAll{},
|
||||
PrecertOnly: false,
|
||||
BatchSize: 1000,
|
||||
NumWorkers: 1,
|
||||
ParallelFetch: 1,
|
||||
StartIndex: 0,
|
||||
Quiet: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Scanner is a tool to scan all the entries in a CT Log.
|
||||
type Scanner struct {
|
||||
// Client used to talk to the CT log instance
|
||||
logClient *client.LogClient
|
||||
|
||||
// Configuration options for this Scanner instance
|
||||
opts ScannerOptions
|
||||
|
||||
// Counter of the number of certificates scanned
|
||||
certsProcessed int64
|
||||
|
||||
// Counter of the number of precertificates encountered during the scan.
|
||||
precertsSeen int64
|
||||
|
||||
unparsableEntries int64
|
||||
entriesWithNonFatalErrors int64
|
||||
}
|
||||
|
||||
// matcherJob represents the context for an individual matcher job.
|
||||
type matcherJob struct {
|
||||
// The log entry returned by the log server
|
||||
entry ct.LogEntry
|
||||
// The index of the entry containing the LeafInput in the log
|
||||
index int64
|
||||
}
|
||||
|
||||
// fetchRange represents a range of certs to fetch from a CT log
|
||||
type fetchRange struct {
|
||||
start int64
|
||||
end int64
|
||||
}
|
||||
|
||||
// Takes the error returned by either x509.ParseCertificate() or
|
||||
// x509.ParseTBSCertificate() and determines if it's non-fatal or otherwise.
|
||||
// In the case of non-fatal errors, the error will be logged,
|
||||
// entriesWithNonFatalErrors will be incremented, and the return value will be
|
||||
// nil.
|
||||
// Fatal errors will be logged, unparsableEntires will be incremented, and the
|
||||
// fatal error itself will be returned.
|
||||
// When |err| is nil, this method does nothing.
|
||||
func (s *Scanner) handleParseEntryError(err error, entryType ct.LogEntryType, index int64) error {
|
||||
if err == nil {
|
||||
// No error to handle
|
||||
return nil
|
||||
}
|
||||
switch err.(type) {
|
||||
case x509.NonFatalErrors:
|
||||
s.entriesWithNonFatalErrors++
|
||||
// We'll make a note, but continue.
|
||||
s.Log(fmt.Sprintf("Non-fatal error in %+v at index %d: %s", entryType, index, err.Error()))
|
||||
default:
|
||||
s.unparsableEntries++
|
||||
s.Log(fmt.Sprintf("Failed to parse in %+v at index %d : %s", entryType, index, err.Error()))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Processes the given |entry| in the specified log.
|
||||
func (s *Scanner) processEntry(entry ct.LogEntry, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) {
|
||||
atomic.AddInt64(&s.certsProcessed, 1)
|
||||
switch entry.Leaf.TimestampedEntry.EntryType {
|
||||
case ct.X509LogEntryType:
|
||||
if s.opts.PrecertOnly {
|
||||
// Only interested in precerts and this is an X.509 cert, early-out.
|
||||
return
|
||||
}
|
||||
cert, err := x509.ParseCertificate(entry.Leaf.TimestampedEntry.X509Entry)
|
||||
if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
|
||||
// We hit an unparseable entry, already logged inside handleParseEntryError()
|
||||
return
|
||||
}
|
||||
if s.opts.Matcher.CertificateMatches(cert) {
|
||||
entry.X509Cert = cert
|
||||
foundCert(&entry)
|
||||
}
|
||||
case ct.PrecertLogEntryType:
|
||||
c, err := x509.ParseTBSCertificate(entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate)
|
||||
if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
|
||||
// We hit an unparseable entry, already logged inside handleParseEntryError()
|
||||
return
|
||||
}
|
||||
precert := &ct.Precertificate{
|
||||
Raw: entry.Chain[0],
|
||||
TBSCertificate: *c,
|
||||
IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash}
|
||||
if s.opts.Matcher.PrecertificateMatches(precert) {
|
||||
entry.Precert = precert
|
||||
foundPrecert(&entry)
|
||||
}
|
||||
s.precertsSeen++
|
||||
}
|
||||
}
|
||||
|
||||
// Worker function to match certs.
|
||||
// Accepts MatcherJobs over the |entries| channel, and processes them.
|
||||
// Returns true over the |done| channel when the |entries| channel is closed.
|
||||
func (s *Scanner) matcherJob(id int, entries <-chan matcherJob, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry), wg *sync.WaitGroup) {
|
||||
for e := range entries {
|
||||
s.processEntry(e.entry, foundCert, foundPrecert)
|
||||
}
|
||||
s.Log(fmt.Sprintf("Matcher %d finished", id))
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// Worker function for fetcher jobs.
|
||||
// Accepts cert ranges to fetch over the |ranges| channel, and if the fetch is
|
||||
// successful sends the individual LeafInputs out (as MatcherJobs) into the
|
||||
// |entries| channel for the matchers to chew on.
|
||||
// Will retry failed attempts to retrieve ranges indefinitely.
|
||||
// Sends true over the |done| channel when the |ranges| channel is closed.
|
||||
func (s *Scanner) fetcherJob(id int, ranges <-chan fetchRange, entries chan<- matcherJob, wg *sync.WaitGroup) {
|
||||
for r := range ranges {
|
||||
success := false
|
||||
// TODO(alcutter): give up after a while:
|
||||
for !success {
|
||||
logEntries, err := s.logClient.GetEntries(r.start, r.end)
|
||||
if err != nil {
|
||||
s.Log(fmt.Sprintf("Problem fetching from log: %s", err.Error()))
|
||||
continue
|
||||
}
|
||||
for _, logEntry := range logEntries {
|
||||
logEntry.Index = r.start
|
||||
entries <- matcherJob{logEntry, r.start}
|
||||
r.start++
|
||||
}
|
||||
if r.start > r.end {
|
||||
// Only complete if we actually got all the leaves we were
|
||||
// expecting -- Logs MAY return fewer than the number of
|
||||
// leaves requested.
|
||||
success = true
|
||||
}
|
||||
}
|
||||
}
|
||||
s.Log(fmt.Sprintf("Fetcher %d finished", id))
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// Returns the smaller of |a| and |b|
|
||||
func min(a int64, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the larger of |a| and |b|
|
||||
func max(a int64, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty prints the passed in number of |seconds| into a more human readable
|
||||
// string.
|
||||
func humanTime(seconds int) string {
|
||||
nanos := time.Duration(seconds) * time.Second
|
||||
hours := int(nanos / (time.Hour))
|
||||
nanos %= time.Hour
|
||||
minutes := int(nanos / time.Minute)
|
||||
nanos %= time.Minute
|
||||
seconds = int(nanos / time.Second)
|
||||
s := ""
|
||||
if hours > 0 {
|
||||
s += fmt.Sprintf("%d hours ", hours)
|
||||
}
|
||||
if minutes > 0 {
|
||||
s += fmt.Sprintf("%d minutes ", minutes)
|
||||
}
|
||||
if seconds > 0 {
|
||||
s += fmt.Sprintf("%d seconds ", seconds)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s Scanner) Log(msg string) {
|
||||
if !s.opts.Quiet {
|
||||
log.Print(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Performs a scan against the Log.
|
||||
// For each x509 certificate found, |foundCert| will be called with the
|
||||
// index of the entry and certificate itself as arguments. For each precert
|
||||
// found, |foundPrecert| will be called with the index of the entry and the raw
|
||||
// precert string as the arguments.
|
||||
//
|
||||
// This method blocks until the scan is complete.
|
||||
func (s *Scanner) Scan(foundCert func(*ct.LogEntry),
|
||||
foundPrecert func(*ct.LogEntry)) error {
|
||||
s.Log("Starting up...\n")
|
||||
s.certsProcessed = 0
|
||||
s.precertsSeen = 0
|
||||
s.unparsableEntries = 0
|
||||
s.entriesWithNonFatalErrors = 0
|
||||
|
||||
latestSth, err := s.logClient.GetSTH()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Log(fmt.Sprintf("Got STH with %d certs", latestSth.TreeSize))
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
startTime := time.Now()
|
||||
fetches := make(chan fetchRange, 1000)
|
||||
jobs := make(chan matcherJob, 100000)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
throughput := float64(s.certsProcessed) / time.Since(startTime).Seconds()
|
||||
remainingCerts := int64(latestSth.TreeSize) - int64(s.opts.StartIndex) - s.certsProcessed
|
||||
remainingSeconds := int(float64(remainingCerts) / throughput)
|
||||
remainingString := humanTime(remainingSeconds)
|
||||
s.Log(fmt.Sprintf("Processed: %d certs (to index %d). Throughput: %3.2f ETA: %s\n", s.certsProcessed,
|
||||
s.opts.StartIndex+int64(s.certsProcessed), throughput, remainingString))
|
||||
}
|
||||
}()
|
||||
|
||||
var ranges list.List
|
||||
for start := s.opts.StartIndex; start < int64(latestSth.TreeSize); {
|
||||
end := min(start+int64(s.opts.BatchSize), int64(latestSth.TreeSize)) - 1
|
||||
ranges.PushBack(fetchRange{start, end})
|
||||
start = end + 1
|
||||
}
|
||||
var fetcherWG sync.WaitGroup
|
||||
var matcherWG sync.WaitGroup
|
||||
// Start matcher workers
|
||||
for w := 0; w < s.opts.NumWorkers; w++ {
|
||||
matcherWG.Add(1)
|
||||
go s.matcherJob(w, jobs, foundCert, foundPrecert, &matcherWG)
|
||||
}
|
||||
// Start fetcher workers
|
||||
for w := 0; w < s.opts.ParallelFetch; w++ {
|
||||
fetcherWG.Add(1)
|
||||
go s.fetcherJob(w, fetches, jobs, &fetcherWG)
|
||||
}
|
||||
for r := ranges.Front(); r != nil; r = r.Next() {
|
||||
fetches <- r.Value.(fetchRange)
|
||||
}
|
||||
close(fetches)
|
||||
fetcherWG.Wait()
|
||||
close(jobs)
|
||||
matcherWG.Wait()
|
||||
|
||||
s.Log(fmt.Sprintf("Completed %d certs in %s", s.certsProcessed, humanTime(int(time.Since(startTime).Seconds()))))
|
||||
s.Log(fmt.Sprintf("Saw %d precerts", s.precertsSeen))
|
||||
s.Log(fmt.Sprintf("%d unparsable entries, %d non-fatal errors", s.unparsableEntries, s.entriesWithNonFatalErrors))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new Scanner instance using |client| to talk to the log, and taking
|
||||
// configuration options from |opts|.
|
||||
func NewScanner(client *client.LogClient, opts ScannerOptions) *Scanner {
|
||||
var scanner Scanner
|
||||
scanner.logClient = client
|
||||
// Set a default match-everything regex if none was provided:
|
||||
if opts.Matcher == nil {
|
||||
opts.Matcher = &MatchAll{}
|
||||
}
|
||||
scanner.opts = opts
|
||||
return &scanner
|
||||
}
|
||||
|
|
@ -1,379 +0,0 @@
|
|||
package scanner
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"log"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(alcutter): this signature is syntactically correct, but invalid.
|
||||
FourEntrySTH = "{" +
|
||||
"\"tree_size\":4,\"timestamp\":1396877652123,\"sha256_root_hash\":\"0JBu0CkZnKXc1niEndDaqqgCRHucCfVt1/WBAXs/5T8=\",\"tree_head_signature\":\"AAAACXNpZ25hdHVyZQ==\"}"
|
||||
FourEntries = "{\"entries\":[{\"leaf_input\":\"AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuu" +
|
||||
"gAwIBAgIKFIT5BQAAAAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29" +
|
||||
"vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTF" +
|
||||
"aFw0xMzA2MDcxOTQzMjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQ" +
|
||||
"HEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5" +
|
||||
"jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/" +
|
||||
"HUq+gjprmUNxLSvcK+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBR" +
|
||||
"YngbRkAPSGCufzJy+QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSU" +
|
||||
"EFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1U" +
|
||||
"dIwQYMBaAFL/AMOv1QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N" +
|
||||
"0YXRpYy5jb20vR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3J" +
|
||||
"sMGYGCCsGAQUFBwEBBFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGV" +
|
||||
"JbnRlcm5ldEF1dGhvcml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADA" +
|
||||
"aBgNVHREEEzARgg9tYWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kp" +
|
||||
"qJw5Q+Hf8gzGhxDG6aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6c" +
|
||||
"LwfldIXgTOYkw/uckGwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA==\"," +
|
||||
"\"extra_data\":\"AAXeAAK0MIICsDCCAhmgAwIBAgIDC2dxMA0GCSqGSIb3DQEBBQUAME4xCzAJBgN" +
|
||||
"VBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F" +
|
||||
"0ZSBBdXRob3JpdHkwHhcNMDkwNjA4MjA0MzI3WhcNMTMwNjA3MTk0MzI3WjBGMQswCQYDVQQGEwJVUzE" +
|
||||
"TMBEGA1UEChMKR29vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTCBnzA" +
|
||||
"NBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAye23pIucV+eEPkB9hPSP0XFjU5nneXQUr0SZMyCSjXvlKAy" +
|
||||
"6rWxJfoNfNFlOCnowzdDXxFdF7dWq1nMmzq0yE7jXDx07393cCDaob1FEm8rWIFJztyaHNWrbqeXUWaU" +
|
||||
"r/GcZOfqTGBhs3t0lig4zFEfC7wFQeeT9adGnwKziV28CAwEAAaOBozCBoDAOBgNVHQ8BAf8EBAMCAQY" +
|
||||
"wHQYDVR0OBBYEFL/AMOv1QxE+Z7qekfv8atrjaxIkMB8GA1UdIwQYMBaAFEjmaPkr0rKV10fYIyAQTzO" +
|
||||
"YkJ/UMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnV" +
|
||||
"zdC5jb20vY3Jscy9zZWN1cmVjYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAuIojxkiWsRF8YHdeBZqrocb" +
|
||||
"6ghwYB8TrgbCoZutJqOkM0ymt9e8kTP3kS8p/XmOrmSfLnzYhLLkQYGfN0rTw8Ktx5YtaiScRhKqOv5n" +
|
||||
"wnQkhClIZmloJ0pC3+gz4fniisIWvXEyZ2VxVKfmlUUIuOss4jHg7y/j7lYe8vJD5UDIAAyQwggMgMII" +
|
||||
"CiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF" +
|
||||
"4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY" +
|
||||
"0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1U" +
|
||||
"ECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4G" +
|
||||
"NADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKHv7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18" +
|
||||
"Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXv" +
|
||||
"F2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCAQUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAY" +
|
||||
"TAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSB" +
|
||||
"BdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDVR0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQ" +
|
||||
"EAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9g" +
|
||||
"jIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqhkiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvc" +
|
||||
"NAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePglcwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNN" +
|
||||
"VsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn00DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZUR" +
|
||||
"f+vzvKWipood573nvT6wHdzg=\"},{\"leaf_input\":\"AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgA" +
|
||||
"wIBAgIDAPY6MA0GCSqGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgS" +
|
||||
"W5jLjEYMBYGA1UEAxMPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N" +
|
||||
"1owgc4xKTAnBgNVBAUTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVU" +
|
||||
"zEXMBUGA1UECBMOU291dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsY" +
|
||||
"WNrYmF1ZCBJbmMuMRAwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZ" +
|
||||
"S5wdXJjaGFzZS10aWNrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCg" +
|
||||
"gEBAJtkbcF8x3TtIARHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0" +
|
||||
"uBYcJpkwhEg4Vg5skyfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5" +
|
||||
"WIZq3BUVQ9GVqGe9n1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/" +
|
||||
"pIn6M0gFAbu0egFgDybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtm" +
|
||||
"wwKS2Zo+tnulPnEe9jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9" +
|
||||
"Z/7Rc5KMA4GA1UdDwEB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RB" +
|
||||
"DYwNIIyd3d3LnN0cnVsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDV" +
|
||||
"R0fBDYwNDAyoDCgLoYsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwH" +
|
||||
"QYDVR0OBBYEFDIdT1lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1M" +
|
||||
"DMGCCsGAQUFBzAChidodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIh" +
|
||||
"vcNAQEFBQADggEBAFhFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGP" +
|
||||
"r8X5u8KzBOQ+fl6aRxvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9J" +
|
||||
"HQ95YwtlyC7H4VeDKtJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJK" +
|
||||
"ptG2OeM8AaynHsFcK/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+" +
|
||||
"sYhkvp5IcCypoJy/Rk4fHgD8VTNiNWj7KGuHRYAAA==\",\"extra_data\":\"AAqLAAPdMIID2TCCA" +
|
||||
"sGgAwIBAgIDAjbQMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzd" +
|
||||
"CBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMTAwMjE5MjIzOTI2WhcNMjAwMjE4M" +
|
||||
"jIzOTI2WjBAMQswCQYDVQQGEwJVUzEXMBUGA1UEChMOR2VvVHJ1c3QsIEluYy4xGDAWBgNVBAMTD0dlb" +
|
||||
"1RydXN0IFNTTCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJCzgMHk5UatcGA9uuUU3" +
|
||||
"Z6KXot1WubKbUGlI+g5hSZ6p1V3mkihkn46HhrxJ6ujTDnMyz1Hr4GuFmpcN+9FQf37mpc8oEOdxt8XI" +
|
||||
"dGKolbCA0mEEoE+yQpUYGa5jFTk+eb5lPHgX3UR8im55IaisYmtph6DKWOy8FQchQt65+EuDa+kvc3ns" +
|
||||
"VrXjAVaDktzKIt1XTTYdwvhdGLicTBi2LyKBeUxY0pUiWozeKdOVSQdl+8a5BLGDzAYtDRN4dgjOyFbL" +
|
||||
"TAZJQ5096QhS6CkIMlszZhWwPKoXz4mdaAN+DaIiixafWcwqQ/RmXAueOFRJq9VeiS+jDkNd53eAsMMv" +
|
||||
"R8CAwEAAaOB2TCB1jAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KM" +
|
||||
"B8GA1UdIwQYMBaAFMB6mGiNifurBWQMEX2qfWW4ysxOMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fB" +
|
||||
"DMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnVzdC5jb20vY3Jscy9ndGdsb2JhbC5jcmwwNAYIKwYBB" +
|
||||
"QUHAQEEKDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5nZW90cnVzdC5jb20wDQYJKoZIhvcNAQEFB" +
|
||||
"QADggEBANTvU4ToGr2hiwTAqfVfoRB4RV2yV2pOJMtlTjGXkZrUJPjiJ2ZwMZzBYlQG55cdOprApClIC" +
|
||||
"q8kx6jEmlTBfEx4TCtoLF0XplR4TEbigMMfOHES0tdT41SFULgCy+5jOvhWiU1Vuy7AyBh3hjELC3Dwf" +
|
||||
"jWDpCoTZFZnNF0WX3OsewYk2k9QbSqr0E1TQcKOu3EDSSmGGM8hQkx0YlEVxW+o78Qn5Rsz3VqI138S0" +
|
||||
"adhJR/V4NwdzxoQ2KDLX4z6DOW/cf/lXUQdpj6HR/oaToODEj+IZpWYeZqF6wJHzSXj8gYETpnKXKBue" +
|
||||
"rvdo5AaRTPvvz7SBMS24CqFZUE+ENQAA4EwggN9MIIC5qADAgECAgMSu+YwDQYJKoZIhvcNAQEFBQAwT" +
|
||||
"jELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlc" +
|
||||
"nRpZmljYXRlIEF1dGhvcml0eTAeFw0wMjA1MjEwNDAwMDBaFw0xODA4MjEwNDAwMDBaMEIxCzAJBgNVB" +
|
||||
"AYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0Ewg" +
|
||||
"gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDazBhjMP30FyMaVn5b3zxsOORxt3iR1Lyh2Ez4q" +
|
||||
"EO2A+lNIQcIiNpYL2Y5Kb0FeIudOOgFt2p+caTmxGCmsO+A5IkoD54l1u2D862mkceYyUIYNRSdrZhGk" +
|
||||
"i5PyvGHQ8EWlVctUO+JLYB6V63y7l9r0gCNuRT4FBU12cBGo3tyyJG/yVUrzdCXPpwmZMzfzoMZccpO5" +
|
||||
"tTVe6kZzVXeyOzSXjhT5VxPjC3+UCM2/Gbmy46kORkAt5UCOZELDv44LtEdBZr2TT5vDwcdrywej2A54" +
|
||||
"vo2UxM51F4mK9s9qBS9MusYAyhSBHHlqzM94Ti7BzaEYpx56hYw9F/AK+hxa+T5AgMBAAGjgfAwge0wH" +
|
||||
"wYDVR0jBBgwFoAUSOZo+SvSspXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFMB6mGiNifurBWQMEX2qfWW4y" +
|
||||
"sxOMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMDoGA1UdHwQzMDEwL6AtoCuGKWh0dHA6L" +
|
||||
"y9jcmwuZ2VvdHJ1c3QuY29tL2NybHMvc2VjdXJlY2EuY3JsME4GA1UdIARHMEUwQwYEVR0gADA7MDkGC" +
|
||||
"CsGAQUFBwIBFi1odHRwczovL3d3dy5nZW90cnVzdC5jb20vcmVzb3VyY2VzL3JlcG9zaXRvcnkwDQYJK" +
|
||||
"oZIhvcNAQEFBQADgYEAduESbk5LFhKGMAaygQjP8AjHx3F+Zu7C7dQ7H//w8MhO1kM4sLkwfRjQVYOia" +
|
||||
"ss2EZzoSGajbX+4E9RH/otaXHP8rtkbMhk4q5c0FKqW0uujHBQISba75ZHvgzbrHVZvytq8c2OQ5H97P" +
|
||||
"iLLPQftXzh0nOMDUE6hr5juYfKEPxIAAyQwggMgMIICiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAM" +
|
||||
"E4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZ" +
|
||||
"XJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDV" +
|
||||
"QQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhd" +
|
||||
"GUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKH" +
|
||||
"v7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH" +
|
||||
"4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXvF2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCA" +
|
||||
"QUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDV" +
|
||||
"QQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDV" +
|
||||
"R0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQEAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2" +
|
||||
"CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9gjIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqh" +
|
||||
"kiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvcNAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePgl" +
|
||||
"cwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNNVsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn0" +
|
||||
"0DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZURf+vzvKWipood573nvT6wHdzg=\"},{\"leaf_i" +
|
||||
"nput\":\"AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvrvQwDQY" +
|
||||
"JKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCB" +
|
||||
"MdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwMzE0MzN" +
|
||||
"aFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVBAcTB0N" +
|
||||
"odW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZWRpYSB" +
|
||||
"EZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8" +
|
||||
"AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/kND7au" +
|
||||
"fyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV5fn4s7" +
|
||||
"I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJENEWOY2" +
|
||||
"AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZRBMGBs" +
|
||||
"HTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDVR0gBIG" +
|
||||
"wMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwbGVhc2U" +
|
||||
"gdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGAQUFBwI" +
|
||||
"BFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0bWwwGwY" +
|
||||
"DVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwE" +
|
||||
"GCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlcnRydXN" +
|
||||
"0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQAw8sX" +
|
||||
"P2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWBVAHQsH" +
|
||||
"i4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WASQszuqT" +
|
||||
"NW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/ugcnk0a" +
|
||||
"7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jka24Cfc" +
|
||||
"m+vfUfHKB56sIzquxAAA=\",\"extra_data\":\"AArbAAQ4MIIENDCCAxygAwIBAgIEBydcJjANBgk" +
|
||||
"qhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJ" +
|
||||
"lclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTExMDgxODE4MzYzM1o" +
|
||||
"XDTE4MDgwOTE4MzU0OVowWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28" +
|
||||
"uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjCCASIwDQYJKoZIhvc" +
|
||||
"NAQEBBQADggEPADCCAQoCggEBALbcdvu5RPsSfFSwu0F1dPA1R54nukNERWAZzUQKsnjl+h4kOwIfaHd" +
|
||||
"g9OsiBQo3btv3FSC7PVPU0BGO1OtnvtjdBTeUQSUj75oQo8P3AL26JpJngVCpT56RPE4gulJ//0xNjqq" +
|
||||
"tTl+8J5cCKf2Vg0m/CrqxNRg1qXOIYlGsFBc0UOefxvOTXbnFAE83kHqBD9T1cinojGKscTvzLt8qXOm" +
|
||||
"+51Ykgiiavz39cUL9xXtrNwlHUD5ykao7xU+dEm49gANUSUEVPPKGRHQo9bmjG9t2x+oDiaBg6VH2oWQ" +
|
||||
"+dJvbKssYPMHnaBiJ7Ks4LlC5b24VMygdL9WAF4Yi8x0M4IcCAwEAAaOCAQAwgf0wEgYDVR0TAQH/BAg" +
|
||||
"wBgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1odHRwOi8vY3liZXJ" +
|
||||
"0cnVzdC5vbW5pcm9vdC5jb20vcmVwb3NpdG9yeS5jZm0wDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBa" +
|
||||
"AFOWdWTCCR1jMrPoIVDaGezq1BE3wMEIGA1UdHwQ7MDkwN6A1oDOGMWh0dHA6Ly9jZHAxLnB1YmxpYy1" +
|
||||
"0cnVzdC5jb20vQ1JML09tbmlyb290MjAyNS5jcmwwHQYDVR0OBBYEFBvkje86cWsSZWjPtpG8OUMBjXX" +
|
||||
"JMA0GCSqGSIb3DQEBBQUAA4IBAQBtK+3pj7Yp1rYwuuZttcNT0sm4Ck5In/E/Oiq0+3SW5r0YvKd5wHj" +
|
||||
"BObog406A0iTVpXt/YqPa1A8NqZ2qxem8CMlIZpiewPneq23lsDPCcNCW1x5vmAQVY0i7moVdG2nztE/" +
|
||||
"zpnAWDyEZf62wAzlJhoyic06T3CEBaLDvDXAaeqKyzCJCkVS9rHAEjUxc/Dqikvb5KhJAzXa3ZvTX0qv" +
|
||||
"ejizZ3Qk1NydWC662rpqDYPBff/Ctsxz6uHRfx+zADq3Yw8+f0jAOXFEfPhniwdKpkA/mV7mvBHai8gg" +
|
||||
"EJQo1u3MEMdCYRn82wWEWo4qMmd4QBfLe7aUJZJeEj0KoeyLEAAQ8MIIEODCCA6GgAwIBAgIEBydtuTA" +
|
||||
"NBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" +
|
||||
"DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" +
|
||||
"0IEdsb2JhbCBSb290MB4XDTEwMTEzMDE2MzUyMVoXDTE4MDgxMDE1MzQyNlowWjELMAkGA1UEBhMCSUU" +
|
||||
"xEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3J" +
|
||||
"lIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKrmD1X6CZ" +
|
||||
"ymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsBUnuId9Mcj8e6uYi" +
|
||||
"1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dn" +
|
||||
"KM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xX" +
|
||||
"tabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/yejl0qhqd" +
|
||||
"NkNwnGjkCAwEAAaOCAWowggFmMBIGA1UdEwEB/wQIMAYBAf8CAQMwTgYDVR0gBEcwRTBDBgRVHSAAMDs" +
|
||||
"wOQYIKwYBBQUHAgEWLWh0dHA6Ly9jeWJlcnRydXN0Lm9tbmlyb290LmNvbS9yZXBvc2l0b3J5LmNmbTA" +
|
||||
"OBgNVHQ8BAf8EBAMCAQYwgYkGA1UdIwSBgTB/oXmkdzB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1R" +
|
||||
"FIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgN" +
|
||||
"VBAMTGkdURSBDeWJlclRydXN0IEdsb2JhbCBSb290ggIBpTBFBgNVHR8EPjA8MDqgOKA2hjRodHRwOi8" +
|
||||
"vd3d3LnB1YmxpYy10cnVzdC5jb20vY2dpLWJpbi9DUkwvMjAxOC9jZHAuY3JsMB0GA1UdDgQWBBTlnVk" +
|
||||
"wgkdYzKz6CFQ2hns6tQRN8DANBgkqhkiG9w0BAQUFAAOBgQAWtCzJ8V7honubeCB6SnBwhhkAtwUq6Mk" +
|
||||
"lOQ/DZDx1CdmJFYAHwo28KaVkUM9xdUcjvU3Yf3eaURBuTh8gPEecQ3R/loQQTBNDvvjgci7/v648CgN" +
|
||||
"ggktv+ZrFHvavkDufYTs+3psFGsYsPFchCA9U+ihjbOgbnA/P3TBEE7lX/gACXjCCAlowggHDAgIBpTA" +
|
||||
"NBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" +
|
||||
"DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" +
|
||||
"0IEdsb2JhbCBSb290MB4XDTk4MDgxMzAwMjkwMFoXDTE4MDgxMzIzNTkwMFowdTELMAkGA1UEBhMCVVM" +
|
||||
"xGDAWBgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3QgU29sdXRpb25" +
|
||||
"zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwgUm9vdDCBnzANBgkqhkiG9w0BAQE" +
|
||||
"FAAOBjQAwgYkCgYEAlQ+gtvBQnOh6x4jN3RcOLrCU0Bs9DvaUwIqUxwbIkJfIuGQaen5sPFPhNyhzYH+" +
|
||||
"yl1MHn1P5bViU0q+NbYhngObtspXPcjHKpRxyulwC52RC5/mpLNY6DayNQqokATnmnD8BhVcNWIdF+NO" +
|
||||
"FqpNpJoVwSIA/EhXHebQfBS87YpkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBt6xsJ6V7ZUdtnImGkKjx" +
|
||||
"Id+OgfKbec6IUA4U9+6sOMMWDFjOBEwieezRO30DIdNe5fdz0dlV9m2NUGOnw6vNcsdmLQh65wJVOuvr" +
|
||||
"V4nz1aGG/juwFl19bsNejhTTEJKcND5WT78uU2J4fnVyFbceqrk8fIrXNla26p8z5qwt6fw==\"},{\"" +
|
||||
"leaf_input\":\"AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7QhM" +
|
||||
"A0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8wL" +
|
||||
"QYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE2M" +
|
||||
"zdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1UEB" +
|
||||
"RMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZvc" +
|
||||
"mRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAsTA" +
|
||||
"klUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9yZ" +
|
||||
"HBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq5Y" +
|
||||
"ajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq1o" +
|
||||
"hquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR6L" +
|
||||
"5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKxpK" +
|
||||
"AROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUsxP" +
|
||||
"LcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDAyB" +
|
||||
"ggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDAwL" +
|
||||
"oIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADAdB" +
|
||||
"gNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5nb" +
|
||||
"G9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQUFB" +
|
||||
"zAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA1B" +
|
||||
"ggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0OB" +
|
||||
"BYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0GC" +
|
||||
"SqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CKIq" +
|
||||
"0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOROI" +
|
||||
"8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7Jab" +
|
||||
"TcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XXPY" +
|
||||
"oTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA=\",\"extra_data\":\"AAgjAARfMIIEW" +
|
||||
"zCCA0OgAwIBAgILBAAAAAABL07hW2MwDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnb" +
|
||||
"iBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNM" +
|
||||
"TEwNDEzMTAwMDAwWhcNMjIwNDEzMTAwMDAwWjBZMQswCQYDVQQGEwJCRTEZMBcGA1UEChMQR2xvYmFsU" +
|
||||
"2lnbiBudi1zYTEvMC0GA1UEAxMmR2xvYmFsU2lnbiBFeHRlbmRlZCBWYWxpZGF0aW9uIENBIC0gRzIwg" +
|
||||
"gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNoUbMUpq4pbR/WNnN2EugcgyXW6aIIMO5PUbc0" +
|
||||
"FxSMPb6WU+FX7DbiLSpXysjSKyr9ZJ4FLYyD/tcaoVbAJDgu2X1WvlPZ37HbCnsk8ArysRe2LDb1r4/m" +
|
||||
"wvAj6ldrvcAAqT8umYROHf+IyAlVRDFvYK5TLFoxuJwe4NcE2fBofN8C6iZmtDimyUxyCuNQPZSY7Ggr" +
|
||||
"Vou9Xk2bTUsDt0F5NDiB0i3KF4r1VjVbNAMoQFGAVqPxq9kx1UBXeHRxmxQJaAFrQCrDI1la93rwnJUy" +
|
||||
"Q88ABeHIu/buYZ4FlGud9mmKE3zWI2DZ7k0JZscUYBR84OSaqOuR5rW5IsbwO2xAgMBAAGjggEvMIIBK" +
|
||||
"zAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUsLBK/Rx1KPgcYaoT9" +
|
||||
"vrBkD1rFqMwRwYDVR0gBEAwPjA8BgRVHSAAMDQwMgYIKwYBBQUHAgEWJmh0dHBzOi8vd3d3Lmdsb2Jhb" +
|
||||
"HNpZ24uY29tL3JlcG9zaXRvcnkvMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwuZ2xvYmFsc2lnb" +
|
||||
"i5uZXQvcm9vdC1yMi5jcmwwRAYIKwYBBQUHAQEEODA2MDQGCCsGAQUFBzABhihodHRwOi8vb2NzcC5nb" +
|
||||
"G9iYWxzaWduLmNvbS9FeHRlbmRlZFNTTENBMB8GA1UdIwQYMBaAFJviB1dnHB7AagbeWbSaLd/cGYYuM" +
|
||||
"A0GCSqGSIb3DQEBBQUAA4IBAQBfKJAMLekgsjB8iKtABfqxnVwik9WdyjUx+izqHZNZGcSgDfsJQDHaZ" +
|
||||
"FbNUr7nGGbobQmbstuUPu42RR4kVLYgBZO1MRq4ZFfm0ywBTDmWef63BJgS77cuWnf+R/N5mELdFr5ba" +
|
||||
"SvJJsgpaHfmrPZOkBMoZwTsciUf16cKUH84DnIYsSm4/66h1FS4Zk2g1c/T76kyKsWXYtKEzLCg2Jipy" +
|
||||
"jjkzEQ1b2EmsC6Ycvk4Mg20oWIKIWIV3rttkxA2UztKIXvC9b4u9gIT6a5McOkq9h/Di+Wf4I0qKOgZL" +
|
||||
"LNl3ffxb5c1ntuSNWOB1yfkK2Kq+mKhcZKMCha3PbVKZVsCAAO+MIIDujCCAqKgAwIBAgILBAAAAAABD" +
|
||||
"4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARB" +
|
||||
"gNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExM" +
|
||||
"jE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvY" +
|
||||
"mFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBA" +
|
||||
"KbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isU" +
|
||||
"oh7SqbKSaZeqKeMWhG8eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfX" +
|
||||
"klqtTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc" +
|
||||
"5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgABy" +
|
||||
"Ur6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/" +
|
||||
"zAdBgNVHQ4EFgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2Nyb" +
|
||||
"C5nbG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGL" +
|
||||
"jANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp9" +
|
||||
"Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiP" +
|
||||
"qFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMN" +
|
||||
"YxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7TBj0/VLZjmmx6" +
|
||||
"BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==\"}]}"
|
||||
|
||||
Entry0 = "AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuugAwIBAgIKFIT5BQAA" +
|
||||
"AAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29vZ2xlIEluYzEiMCAG" +
|
||||
"A1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTFaFw0xMzA2MDcxOTQz" +
|
||||
"MjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBW" +
|
||||
"aWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5jb20wgZ8wDQYJKoZI" +
|
||||
"hvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/HUq+gjprmUNxLSvcK" +
|
||||
"+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBRYngbRkAPSGCufzJy+" +
|
||||
"QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSUEFjAUBggrBgEFBQcD" +
|
||||
"AQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1UdIwQYMBaAFL/AMOv1" +
|
||||
"QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N0YXRpYy5jb20vR29v" +
|
||||
"Z2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3JsMGYGCCsGAQUFBwEB" +
|
||||
"BFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGVJbnRlcm5ldEF1dGhv" +
|
||||
"cml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9t" +
|
||||
"YWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kpqJw5Q+Hf8gzGhxDG6" +
|
||||
"aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6cLwfldIXgTOYkw/uck" +
|
||||
"Gwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA=="
|
||||
|
||||
Entry1 = "AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgAwIBAgIDAPY6MA0GCS" +
|
||||
"qGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgSW5jLjEYMBYGA1UEAx" +
|
||||
"MPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N1owgc4xKTAnBgNVBA" +
|
||||
"UTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOU2" +
|
||||
"91dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsYWNrYmF1ZCBJbmMuMR" +
|
||||
"AwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZS5wdXJjaGFzZS10aW" +
|
||||
"NrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJtkbcF8x3TtIA" +
|
||||
"RHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0uBYcJpkwhEg4Vg5sk" +
|
||||
"yfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5WIZq3BUVQ9GVqGe9n" +
|
||||
"1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/pIn6M0gFAbu0egFgD" +
|
||||
"ybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtmwwKS2Zo+tnulPnEe9" +
|
||||
"jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KMA4GA1UdDw" +
|
||||
"EB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RBDYwNIIyd3d3LnN0cn" +
|
||||
"VsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDVR0fBDYwNDAyoDCgLo" +
|
||||
"YsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwHQYDVR0OBBYEFDIdT1" +
|
||||
"lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1MDMGCCsGAQUFBzAChi" +
|
||||
"dodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIhvcNAQEFBQADggEBAF" +
|
||||
"hFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGPr8X5u8KzBOQ+fl6aR" +
|
||||
"xvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9JHQ95YwtlyC7H4VeDK" +
|
||||
"tJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJKptG2OeM8AaynHsFcK" +
|
||||
"/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+sYhkvp5IcCypoJy/R" +
|
||||
"k4fHgD8VTNiNWj7KGuHRYAAA=="
|
||||
|
||||
Entry2 = "AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvr" +
|
||||
"vQwDQYJKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ" +
|
||||
"28uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwM" +
|
||||
"zE0MzNaFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVB" +
|
||||
"AcTB0NodW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZ" +
|
||||
"WRpYSBEZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFA" +
|
||||
"AOCAQ8AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/" +
|
||||
"kND7aufyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV" +
|
||||
"5fn4s7I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJE" +
|
||||
"NEWOY2AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZ" +
|
||||
"RBMGBsHTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDV" +
|
||||
"R0gBIGwMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwb" +
|
||||
"GVhc2UgdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGA" +
|
||||
"QUFBwIBFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0b" +
|
||||
"WwwGwYDVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBB" +
|
||||
"QUHAwEGCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlc" +
|
||||
"nRydXN0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBA" +
|
||||
"QAw8sXP2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWB" +
|
||||
"VAHQsHi4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WAS" +
|
||||
"QszuqTNW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/u" +
|
||||
"gcnk0a7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jk" +
|
||||
"a24Cfcm+vfUfHKB56sIzquxAAA="
|
||||
|
||||
Entry3 = "AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7Q" +
|
||||
"hMA0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8" +
|
||||
"wLQYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE" +
|
||||
"2MzdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1U" +
|
||||
"EBRMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZ" +
|
||||
"vcmRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAs" +
|
||||
"TAklUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9" +
|
||||
"yZHBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq" +
|
||||
"5YajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq" +
|
||||
"1ohquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR" +
|
||||
"6L5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKx" +
|
||||
"pKAROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUs" +
|
||||
"xPLcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDA" +
|
||||
"yBggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDA" +
|
||||
"wLoIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADA" +
|
||||
"dBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5" +
|
||||
"nbG9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQU" +
|
||||
"FBzAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA" +
|
||||
"1BggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0" +
|
||||
"OBBYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0" +
|
||||
"GCSqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CK" +
|
||||
"Iq0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOR" +
|
||||
"OI8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7J" +
|
||||
"abTcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XX" +
|
||||
"PYoTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA="
|
||||
)
|
||||
|
||||
func makeParent(a []byte, b []byte) [sha256.Size]byte {
|
||||
if len(a) != len(b) {
|
||||
log.Fatalf("a & b are different lengths: %d vs %d", len(a), len(b))
|
||||
}
|
||||
if len(a) != sha256.Size {
|
||||
log.Fatalf("a & b incorrect length for Sha256 hash")
|
||||
}
|
||||
var r [sha256.Size * 2]byte
|
||||
copy(r[0:31], a)
|
||||
copy(r[32:63], b)
|
||||
return sha256.Sum256(r[:])
|
||||
}
|
||||
|
||||
func CalcRootHash() {
|
||||
e0, err := base64.StdEncoding.DecodeString(Entry0)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h0 := sha256.Sum256(e0)
|
||||
e1, err := base64.StdEncoding.DecodeString(Entry1)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h1 := sha256.Sum256(e1)
|
||||
e2, err := base64.StdEncoding.DecodeString(Entry2)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h2 := sha256.Sum256(e2)
|
||||
e3, err := base64.StdEncoding.DecodeString(Entry3)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h3 := sha256.Sum256(e3)
|
||||
|
||||
hash01 := makeParent(h0[:], h1[:])
|
||||
hash23 := makeParent(h2[:], h3[:])
|
||||
root := makeParent(hash01[:], hash23[:])
|
||||
log.Println(base64.StdEncoding.EncodeToString(root[:]))
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
Some of this code is Copyright (c) 2014 CloudFlare Inc., some is Copyright (c)
|
||||
2015 Internet Security Research Group.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
// +build !nopkcs11
|
||||
|
||||
// Package pkcs11key implements crypto.Signer for PKCS #11 private
|
||||
// keys. Currently, only RSA keys are supported.
|
||||
// See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-11/v2-30/pkcs-11v2-30b-d6.pdf for
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
// +build !nopkcs11
|
||||
|
||||
package pkcs11key
|
||||
|
||||
import (
|
||||
4
Makefile
4
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# This Makefile also tricks Travis into not running 'go get' for our
|
||||
# build. See http://docs.travis-ci.com/user/languages/go/
|
||||
|
||||
OBJDIR ?= ./bin
|
||||
OBJDIR ?= $(shell pwd)/bin
|
||||
DESTDIR ?= /usr/local/bin
|
||||
ARCHIVEDIR ?= /tmp
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ MAINTAINER ?= "Community"
|
|||
|
||||
CMDS = $(shell find ./cmd -maxdepth 1 -mindepth 1 -type d | grep -v testdata)
|
||||
CMD_BASENAMES = $(shell echo $(CMDS) | xargs -n1 basename)
|
||||
CMD_BINS = $(addprefix $(OBJDIR)/, $(CMD_BASENAMES) )
|
||||
CMD_BINS = $(addprefix bin/, $(CMD_BASENAMES) )
|
||||
OBJECTS = $(CMD_BINS)
|
||||
|
||||
# Build environment variables (referencing core/util.go)
|
||||
|
|
|
|||
27
README.md
27
README.md
|
|
@ -1,7 +1,7 @@
|
|||
Boulder - An ACME CA
|
||||
====================
|
||||
|
||||
This is an initial implementation of an ACME-based CA. The [ACME protocol](https://github.com/letsencrypt/acme-spec/) allows the CA to automatically verify that an applicant for a certificate actually controls an identifier, and allows domain holders to issue and revoke certificates for their domains.
|
||||
This is an implementation of an ACME-based CA. The [ACME protocol](https://github.com/letsencrypt/acme-spec/) allows the CA to automatically verify that an applicant for a certificate actually controls an identifier, and allows domain holders to issue and revoke certificates for their domains.
|
||||
|
||||
[](https://travis-ci.org/letsencrypt/boulder)
|
||||
[](https://coveralls.io/r/letsencrypt/boulder)
|
||||
|
|
@ -22,6 +22,10 @@ Slow start
|
|||
This approach is better if you intend to develop on Boulder frequently, because
|
||||
it's challenging to develop inside the Docker container.
|
||||
|
||||
We recommend setting git's [fsckObjects
|
||||
setting](https://groups.google.com/forum/#!topic/binary-transparency/f-BI4o8HZW0/discussion)
|
||||
for better integrity guarantees when getting updates.
|
||||
|
||||
Boulder requires an installation of RabbitMQ, libtool-ltdl, goose, and
|
||||
MariaDB 10 to work correctly. On Ubuntu and CentOS, you may have to
|
||||
install RabbitMQ from https://rabbitmq.com/download.html to get a
|
||||
|
|
@ -55,7 +59,7 @@ or
|
|||
|
||||
Resolve Go-dependencies, set up a database and RabbitMQ:
|
||||
|
||||
> ./test/setup.sh
|
||||
./test/setup.sh
|
||||
|
||||
**Note**: `setup.sh` calls `create_db.sh`, which uses the root MariaDB
|
||||
user with the default password, so if you have disabled that account
|
||||
|
|
@ -63,15 +67,21 @@ or changed the password you may have to adjust the file or recreate the commands
|
|||
|
||||
Start each boulder component with test configs (Ctrl-C kills all):
|
||||
|
||||
> ./start.py
|
||||
./start.py
|
||||
|
||||
Run tests:
|
||||
|
||||
> ./test.sh
|
||||
./test.sh
|
||||
|
||||
Working with a client:
|
||||
|
||||
Check out the official Let's Encrypt client from https://github.com/letsencrypt/letsencrypt/ and follow the setup instructions there.
|
||||
Check out the official Let's Encrypt client from https://github.com/letsencrypt/letsencrypt/ and follow the setup instructions there. Once you've got the client set up, you'll probably want to run it against your local Boulder. There are a number of command line flags that are necessary to run the client against a local Boulder, and without root access. The simplest way to run the client locally is to source a file that provides an alias for letsencrypt that has all those flags:
|
||||
|
||||
source ~/letsencrypt/tests/integration/_common.sh
|
||||
letsencrypt_test certonly -a standalone -d example.com
|
||||
|
||||
Your local Boulder instance uses a fake DNS server that returns 127.0.0.1 for
|
||||
any query, so you can use any value for the -d flag.
|
||||
|
||||
Component Model
|
||||
---------------
|
||||
|
|
@ -109,7 +119,7 @@ The full details of how the various ACME operations happen in Boulder are laid o
|
|||
Dependencies
|
||||
------------
|
||||
|
||||
All Go dependencies are vendorized under the Godeps directory,
|
||||
All Go dependencies are vendored under the Godeps directory,
|
||||
to [make dependency management easier](https://groups.google.com/forum/m/#!topic/golang-dev/nMWoEAG55v8).
|
||||
|
||||
Local development also requires a RabbitMQ installation and MariaDB
|
||||
|
|
@ -135,8 +145,3 @@ godep save -r ./...
|
|||
git add Godeps
|
||||
git commit
|
||||
```
|
||||
|
||||
TODO
|
||||
----
|
||||
|
||||
See [the issues list](https://github.com/letsencrypt/boulder/issues)
|
||||
|
|
|
|||
|
|
@ -485,7 +485,7 @@ func (ca *CertificateAuthorityImpl) IssueCertificate(csr x509.CertificateRequest
|
|||
if err != nil {
|
||||
err = core.InternalServerError(err.Error())
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf("Serial randomness failed, err=[%v]", err))
|
||||
ca.log.AuditErr(fmt.Errorf("Serial randomness failed, err=[%v]", err))
|
||||
return emptyCert, err
|
||||
}
|
||||
serialBigInt := big.NewInt(0)
|
||||
|
|
@ -524,14 +524,14 @@ func (ca *CertificateAuthorityImpl) IssueCertificate(csr x509.CertificateRequest
|
|||
if err != nil {
|
||||
err = core.InternalServerError(err.Error())
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf("Signer failed, rolling back: serial=[%s] err=[%v]", serialHex, err))
|
||||
ca.log.AuditErr(fmt.Errorf("Signer failed, rolling back: serial=[%s] err=[%v]", serialHex, err))
|
||||
return emptyCert, err
|
||||
}
|
||||
|
||||
if len(certPEM) == 0 {
|
||||
err = core.InternalServerError("No certificate returned by server")
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf("PEM empty from Signer, rolling back: serial=[%s] err=[%v]", serialHex, err))
|
||||
ca.log.AuditErr(fmt.Errorf("PEM empty from Signer, rolling back: serial=[%s] err=[%v]", serialHex, err))
|
||||
return emptyCert, err
|
||||
}
|
||||
|
||||
|
|
@ -539,7 +539,7 @@ func (ca *CertificateAuthorityImpl) IssueCertificate(csr x509.CertificateRequest
|
|||
if block == nil || block.Type != "CERTIFICATE" {
|
||||
err = core.InternalServerError("Invalid certificate value returned")
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf("PEM decode error, aborting and rolling back issuance: pem=[%s] err=[%v]", certPEM, err))
|
||||
ca.log.AuditErr(fmt.Errorf("PEM decode error, aborting and rolling back issuance: pem=[%s] err=[%v]", certPEM, err))
|
||||
return emptyCert, err
|
||||
}
|
||||
certDER := block.Bytes
|
||||
|
|
@ -552,7 +552,7 @@ func (ca *CertificateAuthorityImpl) IssueCertificate(csr x509.CertificateRequest
|
|||
if err != nil {
|
||||
err = core.InternalServerError(err.Error())
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf("Uncaught error, aborting and rolling back issuance: pem=[%s] err=[%v]", certPEM, err))
|
||||
ca.log.AuditErr(fmt.Errorf("Uncaught error, aborting and rolling back issuance: pem=[%s] err=[%v]", certPEM, err))
|
||||
return emptyCert, err
|
||||
}
|
||||
|
||||
|
|
@ -561,7 +561,7 @@ func (ca *CertificateAuthorityImpl) IssueCertificate(csr x509.CertificateRequest
|
|||
if err != nil {
|
||||
err = core.InternalServerError(err.Error())
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
ca.log.Audit(fmt.Sprintf(
|
||||
ca.log.AuditErr(fmt.Errorf(
|
||||
"Failed RPC to store at SA, orphaning certificate: b64der=[%s] err=[%v], regID=[%d]",
|
||||
base64.StdEncoding.EncodeToString(certDER),
|
||||
err,
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@ import (
|
|||
"io/ioutil"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cactus/go-statsd-client/statsd"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/jmhodges/clock"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/ca"
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func main() {
|
|||
bundle = append(bundle, ct.ASN1Cert(cert.Raw))
|
||||
}
|
||||
|
||||
pubi := publisher.New(bundle, logs)
|
||||
pubi := publisher.New(bundle, logs, c.Publisher.SubmissionTimeout.Duration)
|
||||
|
||||
go cmd.DebugServer(c.Publisher.DebugAddr)
|
||||
go cmd.ProfileCmd("Publisher", stats)
|
||||
|
|
@ -52,7 +52,7 @@ func main() {
|
|||
|
||||
pubs, err := rpc.NewAmqpRPCServer(amqpConf, c.Publisher.MaxConcurrentRPCServerRequests, stats)
|
||||
cmd.FailOnError(err, "Unable to create Publisher RPC server")
|
||||
rpc.NewPublisherServer(pubs, &pubi)
|
||||
rpc.NewPublisherServer(pubs, pubi)
|
||||
|
||||
err = pubs.Start(amqpConf)
|
||||
cmd.FailOnError(err, "Unable to run Publisher RPC server")
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"time"
|
||||
|
||||
cfsslConfig "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/config"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
"github.com/letsencrypt/boulder/va"
|
||||
)
|
||||
|
|
@ -155,6 +155,7 @@ type Config struct {
|
|||
|
||||
Publisher struct {
|
||||
ServiceConfig
|
||||
SubmissionTimeout ConfigDuration
|
||||
MaxConcurrentRPCServerRequests int64
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/cmd"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/codegangsta/cli"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/crypto/pkcs11key"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/letsencrypt/pkcs11key"
|
||||
)
|
||||
|
||||
const pkcs11FileName = "pkcs11.json"
|
||||
|
|
|
|||
|
|
@ -188,10 +188,10 @@ func caller(level int) string {
|
|||
func (log *AuditLogger) AuditPanic() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, 8192)
|
||||
log.Audit(fmt.Sprintf("Panic caused by err: %s", err))
|
||||
log.AuditErr(fmt.Errorf("Panic caused by err: %s", err))
|
||||
|
||||
runtime.Stack(buf, false)
|
||||
log.Audit(fmt.Sprintf("Stack Trace (Current frame) %s", buf))
|
||||
log.AuditErr(fmt.Errorf("Stack Trace (Current frame) %s", buf))
|
||||
|
||||
runtime.Stack(buf, true)
|
||||
log.Warning(fmt.Sprintf("Stack Trace (All frames): %s", buf))
|
||||
|
|
@ -243,9 +243,9 @@ func (log *AuditLogger) Notice(msg string) (err error) {
|
|||
return log.logAtLevel(syslog.LOG_NOTICE, msg)
|
||||
}
|
||||
|
||||
// Audit sends a NOTICE-severity message that is prefixed with the
|
||||
// AuditNotice sends a NOTICE-severity message that is prefixed with the
|
||||
// audit tag, for special handling at the upstream system logger.
|
||||
func (log *AuditLogger) Audit(msg string) (err error) {
|
||||
func (log *AuditLogger) AuditNotice(msg string) (err error) {
|
||||
return log.auditAtLevel(syslog.LOG_NOTICE, msg)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -70,16 +70,16 @@ func TestConstructionNil(t *testing.T) {
|
|||
|
||||
func TestEmit(t *testing.T) {
|
||||
t.Parallel()
|
||||
audit := setup(t)
|
||||
log := setup(t)
|
||||
|
||||
audit.Audit("test message")
|
||||
log.AuditNotice("test message")
|
||||
}
|
||||
|
||||
func TestEmitEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
audit := setup(t)
|
||||
log := setup(t)
|
||||
|
||||
audit.Audit("")
|
||||
log.AuditNotice("")
|
||||
}
|
||||
|
||||
func TestEmitErrors(t *testing.T) {
|
||||
|
|
@ -94,7 +94,8 @@ func TestSyslogMethods(t *testing.T) {
|
|||
t.Parallel()
|
||||
audit := setup(t)
|
||||
|
||||
audit.Audit("audit-logger_test.go: audit-notice")
|
||||
audit.AuditNotice("audit-logger_test.go: audit-notice")
|
||||
audit.AuditErr(errors.New("audit-logger_test.go: audit-err"))
|
||||
audit.Crit("audit-logger_test.go: critical")
|
||||
audit.Debug("audit-logger_test.go: debug")
|
||||
audit.Emerg("audit-logger_test.go: emerg")
|
||||
|
|
@ -178,7 +179,11 @@ func TestTransmission(t *testing.T) {
|
|||
|
||||
data := make([]byte, 128)
|
||||
|
||||
audit.Audit("audit-logger_test.go: audit-notice")
|
||||
audit.AuditNotice("audit-logger_test.go: audit-notice")
|
||||
_, _, err = l.ReadFrom(data)
|
||||
test.AssertNotError(t, err, "Failed to find packet")
|
||||
|
||||
audit.AuditErr(errors.New("audit-logger_test.go: audit-err"))
|
||||
_, _, err = l.ReadFrom(data)
|
||||
test.AssertNotError(t, err, "Failed to find packet")
|
||||
|
||||
|
|
|
|||
|
|
@ -54,13 +54,10 @@ func New(dbMap *gorp.DbMap, enforceWhitelist bool, challengeTypes map[string]boo
|
|||
const (
|
||||
maxLabels = 10
|
||||
|
||||
// DNS defines max label length as 63 characters. Some implementations allow
|
||||
// more, but we will be conservative.
|
||||
maxLabelLength = 63
|
||||
|
||||
// This is based off maxLabels * maxLabelLength, but is also a restriction based
|
||||
// on the max size of indexed storage in the issuedNames table.
|
||||
maxDNSIdentifierLength = 640
|
||||
// RFC 1034 says DNS labels have a max of 63 octets, and names have a max of 255
|
||||
// octets: https://tools.ietf.org/html/rfc1035#page-10
|
||||
maxLabelLength = 63
|
||||
maxDNSIdentifierLength = 255
|
||||
|
||||
// whitelistedPartnerRegID is the registartion ID we check for to see if we need
|
||||
// to skip the domain whitelist (but not the blacklist). This is for an
|
||||
|
|
@ -146,7 +143,7 @@ func (pa AuthorityImpl) WillingToIssue(id core.AcmeIdentifier, regID int64) erro
|
|||
}
|
||||
}
|
||||
|
||||
if len(domain) > 255 {
|
||||
if len(domain) > maxDNSIdentifierLength {
|
||||
return errNameTooLong
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func ProblemDetailsToStatusCode(prob *ProblemDetails) int {
|
|||
return prob.HTTPStatus
|
||||
}
|
||||
switch prob.Type {
|
||||
case ConnectionProblem, MalformedProblem, TLSProblem, UnknownHostProblem, BadNonceProblem:
|
||||
case ConnectionProblem, MalformedProblem, TLSProblem, UnknownHostProblem, BadNonceProblem, InvalidEmailProblem:
|
||||
return http.StatusBadRequest
|
||||
case ServerInternalProblem:
|
||||
return http.StatusInternalServerError
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/letsencrypt/boulder/test"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func TestProblemDetails(t *testing.T) {
|
||||
|
|
@ -14,3 +15,30 @@ func TestProblemDetails(t *testing.T) {
|
|||
}
|
||||
test.AssertEquals(t, pd.Error(), "urn:acme:error:malformed :: Wat? o.O")
|
||||
}
|
||||
|
||||
func TestProblemDetailsToStatusCode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pb *ProblemDetails
|
||||
statusCode int
|
||||
}{
|
||||
{&ProblemDetails{Type: ConnectionProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: MalformedProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: ServerInternalProblem}, http.StatusInternalServerError},
|
||||
{&ProblemDetails{Type: TLSProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: UnauthorizedProblem}, http.StatusForbidden},
|
||||
{&ProblemDetails{Type: UnknownHostProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: RateLimitedProblem}, statusTooManyRequests},
|
||||
{&ProblemDetails{Type: BadNonceProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: InvalidEmailProblem}, http.StatusBadRequest},
|
||||
{&ProblemDetails{Type: "foo"}, http.StatusInternalServerError},
|
||||
{&ProblemDetails{Type: "foo", HTTPStatus: 200}, 200},
|
||||
{&ProblemDetails{Type: ConnectionProblem, HTTPStatus: 200}, 200},
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
p := ProblemDetailsToStatusCode(c.pb)
|
||||
if c.statusCode != p {
|
||||
t.Errorf("Incorrect status code for %s. Expected %d, got %d", c.pb.Type, c.statusCode, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,9 +11,11 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
ctClient "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/net/context"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
blog "github.com/letsencrypt/boulder/log"
|
||||
|
|
@ -55,25 +57,30 @@ type ctSubmissionRequest struct {
|
|||
|
||||
// Impl defines a Publisher
|
||||
type Impl struct {
|
||||
log *blog.AuditLogger
|
||||
client *http.Client
|
||||
issuerBundle []ct.ASN1Cert
|
||||
ctLogs []*Log
|
||||
log *blog.AuditLogger
|
||||
client *http.Client
|
||||
issuerBundle []ct.ASN1Cert
|
||||
ctLogs []*Log
|
||||
submissionTimeout time.Duration
|
||||
|
||||
SA core.StorageAuthority
|
||||
}
|
||||
|
||||
// New creates a Publisher that will submit certificates
|
||||
// to any CT logs configured in CTConfig
|
||||
func New(bundle []ct.ASN1Cert, logs []*Log) (pub Impl) {
|
||||
func New(bundle []ct.ASN1Cert, logs []*Log, submissionTimeout time.Duration) *Impl {
|
||||
logger := blog.GetAuditLogger()
|
||||
logger.Notice("Publisher Authority Starting")
|
||||
|
||||
pub.issuerBundle = bundle
|
||||
pub.log = logger
|
||||
pub.ctLogs = logs
|
||||
|
||||
return
|
||||
if submissionTimeout == 0 {
|
||||
submissionTimeout = time.Hour * 12
|
||||
}
|
||||
return &Impl{
|
||||
submissionTimeout: submissionTimeout,
|
||||
issuerBundle: bundle,
|
||||
ctLogs: logs,
|
||||
log: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitToCT will submit the certificate represented by certDER to any CT
|
||||
|
|
@ -81,16 +88,18 @@ func New(bundle []ct.ASN1Cert, logs []*Log) (pub Impl) {
|
|||
func (pub *Impl) SubmitToCT(der []byte) error {
|
||||
cert, err := x509.ParseCertificate(der)
|
||||
if err != nil {
|
||||
pub.log.Audit(fmt.Sprintf("Failed to parse certificate: %s", err))
|
||||
pub.log.AuditErr(fmt.Errorf("Failed to parse certificate: %s", err))
|
||||
return err
|
||||
}
|
||||
|
||||
chain := append([]ct.ASN1Cert{der}, pub.issuerBundle...)
|
||||
for _, ctLog := range pub.ctLogs {
|
||||
sct, err := ctLog.client.AddChain(chain)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pub.submissionTimeout)
|
||||
defer cancel()
|
||||
sct, err := ctLog.client.AddChainWithContext(ctx, chain)
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
pub.log.Audit(fmt.Sprintf("Failed to submit certificate to CT log: %s", err))
|
||||
pub.log.AuditErr(fmt.Errorf("Failed to submit certificate to CT log: %s", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -105,21 +114,21 @@ func (pub *Impl) SubmitToCT(der []byte) error {
|
|||
})
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
pub.log.Audit(fmt.Sprintf("Failed to verify SCT receipt: %s", err))
|
||||
pub.log.AuditErr(fmt.Errorf("Failed to verify SCT receipt: %s", err))
|
||||
continue
|
||||
}
|
||||
|
||||
internalSCT, err := sctToInternal(sct, core.SerialToString(cert.SerialNumber))
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
pub.log.Audit(fmt.Sprintf("Failed to convert SCT receipt: %s", err))
|
||||
pub.log.AuditErr(fmt.Errorf("Failed to convert SCT receipt: %s", err))
|
||||
continue
|
||||
}
|
||||
|
||||
err = pub.SA.AddSCTReceipt(internalSCT)
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
pub.log.Audit(fmt.Sprintf("Failed to store SCT receipt in database: %s", err))
|
||||
pub.log.AuditErr(fmt.Errorf("Failed to store SCT receipt in database: %s", err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -222,7 +222,8 @@ func retryableLogSrv(leaf []byte, k *ecdsa.PrivateKey, retries int, after *int)
|
|||
} else {
|
||||
hits++
|
||||
if after != nil {
|
||||
w.Header().Set("Retry-After", fmt.Sprintf("%d", *after))
|
||||
w.Header().Add("Retry-After", fmt.Sprintf("%d", *after))
|
||||
w.WriteHeader(503)
|
||||
}
|
||||
w.WriteHeader(http.StatusRequestTimeout)
|
||||
}
|
||||
|
|
@ -256,7 +257,7 @@ func badLogSrv() *httptest.Server {
|
|||
func setup(t *testing.T) (*Impl, *x509.Certificate, *ecdsa.PrivateKey) {
|
||||
intermediatePEM, _ := pem.Decode([]byte(testIntermediate))
|
||||
|
||||
pub := New(nil, nil)
|
||||
pub := New(nil, nil, 0)
|
||||
pub.issuerBundle = append(pub.issuerBundle, ct.ASN1Cert(intermediatePEM.Bytes))
|
||||
pub.SA = mocks.NewStorageAuthority(clock.NewFake())
|
||||
|
||||
|
|
@ -267,7 +268,7 @@ func setup(t *testing.T) (*Impl, *x509.Certificate, *ecdsa.PrivateKey) {
|
|||
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
test.AssertNotError(t, err, "Couldn't generate test key")
|
||||
|
||||
return &pub, leaf, k
|
||||
return pub, leaf, k
|
||||
}
|
||||
|
||||
func addLog(t *testing.T, pub *Impl, port int, pubKey *ecdsa.PublicKey) {
|
||||
|
|
@ -346,8 +347,25 @@ func TestRetryAfter(t *testing.T) {
|
|||
err = pub.SubmitToCT(leaf.Raw)
|
||||
test.AssertNotError(t, err, "Certificate submission failed")
|
||||
test.AssertEquals(t, len(log.GetAllMatching("Failed to.*")), 0)
|
||||
test.Assert(t, time.Since(startedWaiting) > time.Duration(retryAfter*2)*time.Second, fmt.Sprintf("Submitter retried submission too fast: %s", time.Since(startedWaiting)))
|
||||
}
|
||||
|
||||
test.Assert(t, time.Since(startedWaiting) < time.Duration(retryAfter*2)*time.Second, fmt.Sprintf("Submitter retried submission too fast: %s", time.Since(startedWaiting)))
|
||||
func TestRetryAfterContext(t *testing.T) {
|
||||
pub, leaf, k := setup(t)
|
||||
|
||||
retryAfter := 2
|
||||
server := retryableLogSrv(leaf.Raw, k, 2, &retryAfter)
|
||||
defer server.Close()
|
||||
port, err := getPort(server)
|
||||
test.AssertNotError(t, err, "Failed to get test server port")
|
||||
addLog(t, pub, port, &k.PublicKey)
|
||||
|
||||
pub.submissionTimeout = time.Second
|
||||
s := time.Now()
|
||||
pub.SubmitToCT(leaf.Raw)
|
||||
took := time.Since(s)
|
||||
test.Assert(t, len(log.GetAllMatching(".*Failed to submit certificate to CT log: context deadline exceeded.*")) == 1, "Submission didn't timeout")
|
||||
test.Assert(t, took >= time.Second, fmt.Sprintf("Submission took too long to timeout: %s", took))
|
||||
}
|
||||
|
||||
func TestMultiLog(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -786,7 +786,7 @@ func (ra *RegistrationAuthorityImpl) RevokeCertificateWithReg(cert x509.Certific
|
|||
// Revocation reason
|
||||
// Registration ID of requester
|
||||
// Error (if there was one)
|
||||
ra.log.Audit(fmt.Sprintf(
|
||||
ra.log.AuditNotice(fmt.Sprintf(
|
||||
"%s, Request by registration ID: %d",
|
||||
revokeEvent(state, serialString, cert.Subject.CommonName, cert.DNSNames, revocationCode),
|
||||
regID,
|
||||
|
|
@ -819,7 +819,7 @@ func (ra *RegistrationAuthorityImpl) AdministrativelyRevokeCertificate(cert x509
|
|||
// Revocation reason
|
||||
// Name of admin-revoker user
|
||||
// Error (if there was one)
|
||||
ra.log.Audit(fmt.Sprintf(
|
||||
ra.log.AuditNotice(fmt.Sprintf(
|
||||
"%s, admin-revoker user: %s",
|
||||
revokeEvent(state, serialString, cert.Subject.CommonName, cert.DNSNames, revocationCode),
|
||||
user,
|
||||
|
|
|
|||
|
|
@ -377,7 +377,7 @@ func (rpc *AmqpRPCServer) processMessage(msg amqp.Delivery) {
|
|||
rpc.log.Debug(fmt.Sprintf(" [s<][%s][%s] received %s(%s) [%s]", rpc.serverQueue, msg.ReplyTo, msg.Type, safeDER(msg.Body), msg.CorrelationId))
|
||||
if !present {
|
||||
// AUDIT[ Misrouted Messages ] f523f21f-12d2-4c31-b2eb-ee4b7d96d60e
|
||||
rpc.log.Audit(fmt.Sprintf(" [s<][%s][%s] Misrouted message: %s - %s - %s", rpc.serverQueue, msg.ReplyTo, msg.Type, safeDER(msg.Body), msg.CorrelationId))
|
||||
rpc.log.AuditErr(fmt.Errorf(" [s<][%s][%s] Misrouted message: %s - %s - %s", rpc.serverQueue, msg.ReplyTo, msg.Type, safeDER(msg.Body), msg.CorrelationId))
|
||||
return
|
||||
}
|
||||
var response rpcResponse
|
||||
|
|
@ -387,7 +387,7 @@ func (rpc *AmqpRPCServer) processMessage(msg amqp.Delivery) {
|
|||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
rpc.log.Audit(fmt.Sprintf(" [s>][%s][%s] Error condition marshalling RPC response %s [%s]", rpc.serverQueue, msg.ReplyTo, msg.Type, msg.CorrelationId))
|
||||
rpc.log.AuditErr(fmt.Errorf(" [s>][%s][%s] Error condition marshalling RPC response %s [%s]", rpc.serverQueue, msg.ReplyTo, msg.Type, msg.CorrelationId))
|
||||
return
|
||||
}
|
||||
rpc.log.Debug(fmt.Sprintf(" [s>][%s][%s] replying %s: %s [%s]", rpc.serverQueue, msg.ReplyTo, msg.Type, response.debugString(), msg.CorrelationId))
|
||||
|
|
|
|||
|
|
@ -193,11 +193,11 @@ type countFQDNSetsResponse struct {
|
|||
|
||||
func improperMessage(method string, err error, obj interface{}) {
|
||||
log := blog.GetAuditLogger()
|
||||
log.Audit(fmt.Sprintf("Improper message. method: %s err: %s data: %+v", method, err, obj))
|
||||
log.AuditErr(fmt.Errorf("Improper message. method: %s err: %s data: %+v", method, err, obj))
|
||||
}
|
||||
func errorCondition(method string, err error, obj interface{}) {
|
||||
log := blog.GetAuditLogger()
|
||||
log.Audit(fmt.Sprintf("Error condition. method: %s err: %s data: %+v", method, err, obj))
|
||||
log.AuditErr(fmt.Errorf("Error condition. method: %s err: %s data: %+v", method, err, obj))
|
||||
}
|
||||
|
||||
// NewRegistrationAuthorityServer constructs an RPC server
|
||||
|
|
|
|||
|
|
@ -312,6 +312,7 @@
|
|||
|
||||
"publisher": {
|
||||
"maxConcurrentRPCServerRequests": 16,
|
||||
"submissionTimeout": "5s",
|
||||
"debugAddr": "localhost:8009",
|
||||
"amqp": {
|
||||
"serverURLFile": "test/secrets/amqp_url",
|
||||
|
|
|
|||
|
|
@ -164,7 +164,7 @@ func (va *ValidationAuthorityImpl) fetchHTTP(ctx context.Context, identifier cor
|
|||
}
|
||||
|
||||
// AUDIT[ Certificate Requests ] 11917fa4-10ef-4e0d-9105-bacbe7836a3c
|
||||
va.log.Audit(fmt.Sprintf("Attempting to validate %s for %s", challenge.Type, url))
|
||||
va.log.AuditErr(fmt.Errorf("Attempting to validate %s for %s", challenge.Type, url))
|
||||
httpRequest, err := http.NewRequest("GET", url.String(), nil)
|
||||
if err != nil {
|
||||
va.log.Debug(fmt.Sprintf("%s [%s] HTTP failure: %s", challenge.Type, identifier, err))
|
||||
|
|
@ -463,7 +463,7 @@ func (va *ValidationAuthorityImpl) checkCAA(ctx context.Context, identifier core
|
|||
return bdns.ProblemDetailsFromDNSError(err)
|
||||
}
|
||||
// AUDIT[ Certificate Requests ] 11917fa4-10ef-4e0d-9105-bacbe7836a3c
|
||||
va.log.Audit(fmt.Sprintf("Checked CAA records for %s, registration ID %d [Present: %t, Valid for issuance: %t]", identifier.Value, regID, present, valid))
|
||||
va.log.AuditNotice(fmt.Sprintf("Checked CAA records for %s, registration ID %d [Present: %t, Valid for issuance: %t]", identifier.Value, regID, present, valid))
|
||||
if !valid {
|
||||
return &probs.ProblemDetails{
|
||||
Type: probs.ConnectionProblem,
|
||||
|
|
|
|||
|
|
@ -449,13 +449,13 @@ func (wfe *WebFrontEndImpl) sendError(response http.ResponseWriter, logEvent *re
|
|||
// auditable events.
|
||||
if prob.Type == probs.ServerInternalProblem {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
wfe.log.Audit(fmt.Sprintf("Internal error - %s - %s", prob.Detail, ierr))
|
||||
wfe.log.AuditErr(fmt.Errorf("Internal error - %s - %s", prob.Detail, ierr))
|
||||
}
|
||||
|
||||
problemDoc, err := json.Marshal(prob)
|
||||
if err != nil {
|
||||
// AUDIT[ Error Conditions ] 9cc4d537-8534-4970-8665-4b382abe82f3
|
||||
wfe.log.Audit(fmt.Sprintf("Could not marshal error message: %s - %+v", err, prob))
|
||||
wfe.log.AuditErr(fmt.Errorf("Could not marshal error message: %s - %+v", err, prob))
|
||||
problemDoc = []byte("{\"detail\": \"Problem marshalling error message.\"}")
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue