Run godep update and godep save -r.
Also, remove cache-control code from ocsp-responder, since caching headers are now handled in cfssl.
This commit is contained in:
parent
cb2f7bc057
commit
f008c46a77
|
|
@ -70,10 +70,6 @@
|
|||
"Comment": "1.2.0-64-ge1712f3",
|
||||
"Rev": "e1712f381785e32046927f64a7c86fe569203196"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dgryski/go-rc2",
|
||||
"Rev": "fd90a5fcd260ebe709a689d0bdca2043afffabfa"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/facebookgo/clock",
|
||||
"Rev": "600d898af40aa09a7a93ecb9265d87b0504b6f03"
|
||||
|
|
@ -99,6 +95,10 @@
|
|||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "a1dfa5ef89a13a0aa4be5a6f81179db10bfeea36"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go",
|
||||
"Rev": "72d5367bd7ff1f4401c5649817dca766b668e322"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmhodges/clock",
|
||||
"Rev": "3c4ebd218625c9364c33db6d39c276d80c3090c6"
|
||||
|
|
@ -124,13 +124,21 @@
|
|||
"ImportPath": "github.com/miekg/pkcs11",
|
||||
"Rev": "88c9f842544e629ec046105d7fb50d5daafae737"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mreiferson/go-httpclient",
|
||||
"Rev": "63fe23f7434723dc904c901043af07931f293c47"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/streadway/amqp",
|
||||
"Rev": "150b7f24d6ad507e6026c13d85ce1f1391ac7400"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ocsp",
|
||||
"Rev": "287a1d87db5d649b01d6193bd9d07e909f08094c"
|
||||
"Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pkcs12",
|
||||
"Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/publicsuffix",
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
cferr "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/errors"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/derhelpers"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ import (
|
|||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer"
|
||||
|
||||
"github.com/google/certificate-transparency/go"
|
||||
"github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
)
|
||||
|
||||
// Signer contains a signer that uses the standard library to
|
||||
|
|
|
|||
|
|
@ -1,40 +0,0 @@
|
|||
package rc2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ebfe "github.com/ebfe/rc2"
|
||||
)
|
||||
|
||||
func BenchmarkEncrypt(b *testing.B) {
|
||||
r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
|
||||
b.ResetTimer()
|
||||
var src [8]byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
r.Encrypt(src[:], src[:])
|
||||
}
|
||||
}
|
||||
func BenchmarkEbfeEncrypt(b *testing.B) {
|
||||
r, _ := ebfe.NewCipher([]byte{0, 0, 0, 0, 0, 0, 0, 0})
|
||||
b.ResetTimer()
|
||||
var src [8]byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
r.Encrypt(src[:], src[:])
|
||||
}
|
||||
}
|
||||
func BenchmarkDecrypt(b *testing.B) {
|
||||
r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
|
||||
b.ResetTimer()
|
||||
var src [8]byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
r.Decrypt(src[:], src[:])
|
||||
}
|
||||
}
|
||||
func BenchmarkEbfeDecrypt(b *testing.B) {
|
||||
r, _ := ebfe.NewCipher([]byte{0, 0, 0, 0, 0, 0, 0, 0})
|
||||
b.ResetTimer()
|
||||
var src [8]byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
r.Decrypt(src[:], src[:])
|
||||
}
|
||||
}
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
package rc2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncryptDecrypt(t *testing.T) {
|
||||
|
||||
// TODO(dgryski): add the rest of the test vectors from the RFC
|
||||
var tests = []struct {
|
||||
key string
|
||||
plain string
|
||||
cipher string
|
||||
t1 int
|
||||
}{
|
||||
{
|
||||
"0000000000000000",
|
||||
"0000000000000000",
|
||||
"ebb773f993278eff",
|
||||
63,
|
||||
},
|
||||
{
|
||||
"ffffffffffffffff",
|
||||
"ffffffffffffffff",
|
||||
"278b27e42e2f0d49",
|
||||
64,
|
||||
},
|
||||
{
|
||||
"3000000000000000",
|
||||
"1000000000000001",
|
||||
"30649edf9be7d2c2",
|
||||
64,
|
||||
},
|
||||
{
|
||||
"88",
|
||||
"0000000000000000",
|
||||
"61a8a244adacccf0",
|
||||
64,
|
||||
},
|
||||
{
|
||||
"88bca90e90875a",
|
||||
"0000000000000000",
|
||||
"6ccf4308974c267f",
|
||||
64,
|
||||
},
|
||||
{
|
||||
"88bca90e90875a7f0f79c384627bafb2",
|
||||
"0000000000000000",
|
||||
"1a807d272bbe5db1",
|
||||
64,
|
||||
},
|
||||
{
|
||||
"88bca90e90875a7f0f79c384627bafb2",
|
||||
"0000000000000000",
|
||||
"2269552ab0f85ca6",
|
||||
128,
|
||||
},
|
||||
{
|
||||
"88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e",
|
||||
"0000000000000000",
|
||||
"5b78d3a43dfff1f1",
|
||||
129,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
k, _ := hex.DecodeString(tt.key)
|
||||
p, _ := hex.DecodeString(tt.plain)
|
||||
c, _ := hex.DecodeString(tt.cipher)
|
||||
|
||||
b, _ := New(k, tt.t1)
|
||||
|
||||
var dst [8]byte
|
||||
|
||||
b.Encrypt(dst[:], p)
|
||||
|
||||
if !bytes.Equal(dst[:], c) {
|
||||
t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c)
|
||||
}
|
||||
|
||||
b.Decrypt(dst[:], c)
|
||||
|
||||
if !bytes.Equal(dst[:], p) {
|
||||
t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
25
Godeps/_workspace/src/github.com/google/certificate-transparency/go/README.md
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/google/certificate-transparency/go/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
This is the really early beginnings of a certificate transparency log
|
||||
client written in Go, along with a log scanner tool.
|
||||
|
||||
You'll need go v1.1 or higher to compile.
|
||||
|
||||
# Installation
|
||||
|
||||
This go code must be imported into your go workspace before you can
|
||||
use it, which can be done with:
|
||||
|
||||
go get github.com/google/certificate-transparency/go/client
|
||||
go get github.com/google/certificate-transparency/go/scanner
|
||||
etc.
|
||||
|
||||
# Building the binaries
|
||||
|
||||
To compile the log scanner run:
|
||||
|
||||
go build github.com/google/certificate-transparency/go/scanner/main/scanner.go
|
||||
|
||||
# Contributing
|
||||
|
||||
When sending pull requests, please ensure that everything's been run
|
||||
through ```gofmt``` beforehand so we can keep everything nice and
|
||||
tidy.
|
||||
956
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/asn1.go
generated
vendored
Normal file
956
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/asn1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,956 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
|
||||
// as defined in ITU-T Rec X.690.
|
||||
//
|
||||
// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
|
||||
// http://luca.ntop.org/Teaching/Appunti/asn1.html.
|
||||
//
|
||||
// START CT CHANGES
|
||||
// This is a fork of the Go standard library ASN.1 implementation
|
||||
// (encoding/asn1). The main difference is that this version tries to correct
|
||||
// for errors (e.g. use of tagPrintableString when the string data is really
|
||||
// ISO8859-1 - a common error present in many x509 certificates in the wild.)
|
||||
// END CT CHANGES
|
||||
package asn1
|
||||
|
||||
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
|
||||
// are different encoding formats for those objects. Here, we'll be dealing
|
||||
// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
|
||||
// it's fast to parse and, unlike BER, has a unique encoding for every object.
|
||||
// When calculating hashes over objects, it's important that the resulting
|
||||
// bytes be the same at both ends and DER removes this margin of error.
|
||||
//
|
||||
// ASN.1 is very complex and this package doesn't attempt to implement
|
||||
// everything by any means.
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
// END CT CHANGES
|
||||
"math/big"
|
||||
"reflect"
|
||||
// START CT CHANGES
|
||||
"strings"
|
||||
// END CT CHANGES
|
||||
"time"
|
||||
)
|
||||
|
||||
// A StructuralError suggests that the ASN.1 data is valid, but the Go type
|
||||
// which is receiving it doesn't match.
|
||||
type StructuralError struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
|
||||
|
||||
// A SyntaxError suggests that the ASN.1 data is invalid.
|
||||
type SyntaxError struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
|
||||
|
||||
// We start by dealing with each of the primitive types in turn.
|
||||
|
||||
// BOOLEAN
|
||||
|
||||
func parseBool(bytes []byte) (ret bool, err error) {
|
||||
if len(bytes) != 1 {
|
||||
err = SyntaxError{"invalid boolean"}
|
||||
return
|
||||
}
|
||||
|
||||
// DER demands that "If the encoding represents the boolean value TRUE,
|
||||
// its single contents octet shall have all eight bits set to one."
|
||||
// Thus only 0 and 255 are valid encoded values.
|
||||
switch bytes[0] {
|
||||
case 0:
|
||||
ret = false
|
||||
case 0xff:
|
||||
ret = true
|
||||
default:
|
||||
err = SyntaxError{"invalid boolean"}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// INTEGER
|
||||
|
||||
// parseInt64 treats the given bytes as a big-endian, signed integer and
|
||||
// returns the result.
|
||||
func parseInt64(bytes []byte) (ret int64, err error) {
|
||||
if len(bytes) > 8 {
|
||||
// We'll overflow an int64 in this case.
|
||||
err = StructuralError{"integer too large"}
|
||||
return
|
||||
}
|
||||
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
|
||||
ret <<= 8
|
||||
ret |= int64(bytes[bytesRead])
|
||||
}
|
||||
|
||||
// Shift up and down in order to sign extend the result.
|
||||
ret <<= 64 - uint8(len(bytes))*8
|
||||
ret >>= 64 - uint8(len(bytes))*8
|
||||
return
|
||||
}
|
||||
|
||||
// parseInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseInt32(bytes []byte) (int32, error) {
|
||||
ret64, err := parseInt64(bytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ret64 != int64(int32(ret64)) {
|
||||
return 0, StructuralError{"integer too large"}
|
||||
}
|
||||
return int32(ret64), nil
|
||||
}
|
||||
|
||||
var bigOne = big.NewInt(1)
|
||||
|
||||
// parseBigInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseBigInt(bytes []byte) *big.Int {
|
||||
ret := new(big.Int)
|
||||
if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
|
||||
// This is a negative number.
|
||||
notBytes := make([]byte, len(bytes))
|
||||
for i := range notBytes {
|
||||
notBytes[i] = ^bytes[i]
|
||||
}
|
||||
ret.SetBytes(notBytes)
|
||||
ret.Add(ret, bigOne)
|
||||
ret.Neg(ret)
|
||||
return ret
|
||||
}
|
||||
ret.SetBytes(bytes)
|
||||
return ret
|
||||
}
|
||||
|
||||
// BIT STRING
|
||||
|
||||
// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
|
||||
// bit string is padded up to the nearest byte in memory and the number of
|
||||
// valid bits is recorded. Padding bits will be zero.
|
||||
type BitString struct {
|
||||
Bytes []byte // bits packed into bytes.
|
||||
BitLength int // length in bits.
|
||||
}
|
||||
|
||||
// At returns the bit at the given index. If the index is out of range it
|
||||
// returns false.
|
||||
func (b BitString) At(i int) int {
|
||||
if i < 0 || i >= b.BitLength {
|
||||
return 0
|
||||
}
|
||||
x := i / 8
|
||||
y := 7 - uint(i%8)
|
||||
return int(b.Bytes[x]>>y) & 1
|
||||
}
|
||||
|
||||
// RightAlign returns a slice where the padding bits are at the beginning. The
|
||||
// slice may share memory with the BitString.
|
||||
func (b BitString) RightAlign() []byte {
|
||||
shift := uint(8 - (b.BitLength % 8))
|
||||
if shift == 8 || len(b.Bytes) == 0 {
|
||||
return b.Bytes
|
||||
}
|
||||
|
||||
a := make([]byte, len(b.Bytes))
|
||||
a[0] = b.Bytes[0] >> shift
|
||||
for i := 1; i < len(b.Bytes); i++ {
|
||||
a[i] = b.Bytes[i-1] << (8 - shift)
|
||||
a[i] |= b.Bytes[i] >> shift
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
|
||||
func parseBitString(bytes []byte) (ret BitString, err error) {
|
||||
if len(bytes) == 0 {
|
||||
err = SyntaxError{"zero length BIT STRING"}
|
||||
return
|
||||
}
|
||||
paddingBits := int(bytes[0])
|
||||
if paddingBits > 7 ||
|
||||
len(bytes) == 1 && paddingBits > 0 ||
|
||||
bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
|
||||
err = SyntaxError{"invalid padding bits in BIT STRING"}
|
||||
return
|
||||
}
|
||||
ret.BitLength = (len(bytes)-1)*8 - paddingBits
|
||||
ret.Bytes = bytes[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// OBJECT IDENTIFIER
|
||||
|
||||
// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
|
||||
type ObjectIdentifier []int
|
||||
|
||||
// Equal reports whether oi and other represent the same identifier.
|
||||
func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
|
||||
if len(oi) != len(other) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(oi); i++ {
|
||||
if oi[i] != other[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
|
||||
// returns it. An object identifier is a sequence of variable length integers
|
||||
// that are assigned in a hierarchy.
|
||||
func parseObjectIdentifier(bytes []byte) (s []int, err error) {
|
||||
if len(bytes) == 0 {
|
||||
err = SyntaxError{"zero length OBJECT IDENTIFIER"}
|
||||
return
|
||||
}
|
||||
|
||||
// In the worst case, we get two elements from the first byte (which is
|
||||
// encoded differently) and then every varint is a single byte long.
|
||||
s = make([]int, len(bytes)+1)
|
||||
|
||||
// The first varint is 40*value1 + value2:
|
||||
// According to this packing, value1 can take the values 0, 1 and 2 only.
|
||||
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
|
||||
// then there are no restrictions on value2.
|
||||
v, offset, err := parseBase128Int(bytes, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v < 80 {
|
||||
s[0] = v / 40
|
||||
s[1] = v % 40
|
||||
} else {
|
||||
s[0] = 2
|
||||
s[1] = v - 80
|
||||
}
|
||||
|
||||
i := 2
|
||||
for ; offset < len(bytes); i++ {
|
||||
v, offset, err = parseBase128Int(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s[i] = v
|
||||
}
|
||||
s = s[0:i]
|
||||
return
|
||||
}
|
||||
|
||||
// ENUMERATED
|
||||
|
||||
// An Enumerated is represented as a plain int.
|
||||
type Enumerated int
|
||||
|
||||
// FLAG
|
||||
|
||||
// A Flag accepts any data and is set to true if present.
|
||||
type Flag bool
|
||||
|
||||
// parseBase128Int parses a base-128 encoded int from the given offset in the
|
||||
// given byte slice. It returns the value and the new offset.
|
||||
func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
|
||||
offset = initOffset
|
||||
for shifted := 0; offset < len(bytes); shifted++ {
|
||||
if shifted > 4 {
|
||||
err = StructuralError{"base 128 integer too large"}
|
||||
return
|
||||
}
|
||||
ret <<= 7
|
||||
b := bytes[offset]
|
||||
ret |= int(b & 0x7f)
|
||||
offset++
|
||||
if b&0x80 == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = SyntaxError{"truncated base 128 integer"}
|
||||
return
|
||||
}
|
||||
|
||||
// UTCTime
|
||||
|
||||
func parseUTCTime(bytes []byte) (ret time.Time, err error) {
|
||||
s := string(bytes)
|
||||
ret, err = time.Parse("0601021504Z0700", s)
|
||||
if err != nil {
|
||||
ret, err = time.Parse("060102150405Z0700", s)
|
||||
}
|
||||
if err == nil && ret.Year() >= 2050 {
|
||||
// UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
|
||||
ret = ret.AddDate(-100, 0, 0)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
|
||||
// and returns the resulting time.
|
||||
func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
|
||||
return time.Parse("20060102150405Z0700", string(bytes))
|
||||
}
|
||||
|
||||
// PrintableString
|
||||
|
||||
// parsePrintableString parses a ASN.1 PrintableString from the given byte
|
||||
// array and returns it.
|
||||
func parsePrintableString(bytes []byte) (ret string, err error) {
|
||||
for _, b := range bytes {
|
||||
if !isPrintable(b) {
|
||||
err = SyntaxError{"PrintableString contains invalid character"}
|
||||
return
|
||||
}
|
||||
}
|
||||
ret = string(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
// isPrintable returns true iff the given b is in the ASN.1 PrintableString set.
|
||||
func isPrintable(b byte) bool {
|
||||
return 'a' <= b && b <= 'z' ||
|
||||
'A' <= b && b <= 'Z' ||
|
||||
'0' <= b && b <= '9' ||
|
||||
'\'' <= b && b <= ')' ||
|
||||
'+' <= b && b <= '/' ||
|
||||
b == ' ' ||
|
||||
b == ':' ||
|
||||
b == '=' ||
|
||||
b == '?' ||
|
||||
// This is technically not allowed in a PrintableString.
|
||||
// However, x509 certificates with wildcard strings don't
|
||||
// always use the correct string type so we permit it.
|
||||
b == '*'
|
||||
}
|
||||
|
||||
// IA5String
|
||||
|
||||
// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
|
||||
// byte slice and returns it.
|
||||
func parseIA5String(bytes []byte) (ret string, err error) {
|
||||
for _, b := range bytes {
|
||||
if b >= 0x80 {
|
||||
err = SyntaxError{"IA5String contains invalid character"}
|
||||
return
|
||||
}
|
||||
}
|
||||
ret = string(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
// T61String
|
||||
|
||||
// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
|
||||
// byte slice and returns it.
|
||||
func parseT61String(bytes []byte) (ret string, err error) {
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// UTF8String
|
||||
|
||||
// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
|
||||
// array and returns it.
|
||||
func parseUTF8String(bytes []byte) (ret string, err error) {
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// A RawValue represents an undecoded ASN.1 object.
|
||||
type RawValue struct {
|
||||
Class, Tag int
|
||||
IsCompound bool
|
||||
Bytes []byte
|
||||
FullBytes []byte // includes the tag and length
|
||||
}
|
||||
|
||||
// RawContent is used to signal that the undecoded, DER data needs to be
|
||||
// preserved for a struct. To use it, the first field of the struct must have
|
||||
// this type. It's an error for any of the other fields to have this type.
|
||||
type RawContent []byte
|
||||
|
||||
// Tagging
|
||||
|
||||
// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
|
||||
// into a byte slice. It returns the parsed data and the new offset. SET and
|
||||
// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
|
||||
// don't distinguish between ordered and unordered objects in this code.
|
||||
func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
|
||||
offset = initOffset
|
||||
b := bytes[offset]
|
||||
offset++
|
||||
ret.class = int(b >> 6)
|
||||
ret.isCompound = b&0x20 == 0x20
|
||||
ret.tag = int(b & 0x1f)
|
||||
|
||||
// If the bottom five bits are set, then the tag number is actually base 128
|
||||
// encoded afterwards
|
||||
if ret.tag == 0x1f {
|
||||
ret.tag, offset, err = parseBase128Int(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if offset >= len(bytes) {
|
||||
err = SyntaxError{"truncated tag or length"}
|
||||
return
|
||||
}
|
||||
b = bytes[offset]
|
||||
offset++
|
||||
if b&0x80 == 0 {
|
||||
// The length is encoded in the bottom 7 bits.
|
||||
ret.length = int(b & 0x7f)
|
||||
} else {
|
||||
// Bottom 7 bits give the number of length bytes to follow.
|
||||
numBytes := int(b & 0x7f)
|
||||
if numBytes == 0 {
|
||||
err = SyntaxError{"indefinite length found (not DER)"}
|
||||
return
|
||||
}
|
||||
ret.length = 0
|
||||
for i := 0; i < numBytes; i++ {
|
||||
if offset >= len(bytes) {
|
||||
err = SyntaxError{"truncated tag or length"}
|
||||
return
|
||||
}
|
||||
b = bytes[offset]
|
||||
offset++
|
||||
if ret.length >= 1<<23 {
|
||||
// We can't shift ret.length up without
|
||||
// overflowing.
|
||||
err = StructuralError{"length too large"}
|
||||
return
|
||||
}
|
||||
ret.length <<= 8
|
||||
ret.length |= int(b)
|
||||
if ret.length == 0 {
|
||||
// DER requires that lengths be minimal.
|
||||
err = StructuralError{"superfluous leading zeros in length"}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
|
||||
// a number of ASN.1 values from the given byte slice and returns them as a
|
||||
// slice of Go values of the given type.
|
||||
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
|
||||
expectedTag, compoundType, ok := getUniversalType(elemType)
|
||||
if !ok {
|
||||
err = StructuralError{"unknown Go type for slice"}
|
||||
return
|
||||
}
|
||||
|
||||
// First we iterate over the input and count the number of elements,
|
||||
// checking that the types are correct in each case.
|
||||
numElements := 0
|
||||
for offset := 0; offset < len(bytes); {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
|
||||
// that a sequence of them can be parsed into a []string.
|
||||
if t.tag == tagGeneralString {
|
||||
t.tag = tagPrintableString
|
||||
}
|
||||
if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
|
||||
err = StructuralError{"sequence tag mismatch"}
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"truncated sequence"}
|
||||
return
|
||||
}
|
||||
offset += t.length
|
||||
numElements++
|
||||
}
|
||||
ret = reflect.MakeSlice(sliceType, numElements, numElements)
|
||||
params := fieldParameters{}
|
||||
offset := 0
|
||||
for i := 0; i < numElements; i++ {
|
||||
offset, err = parseField(ret.Index(i), bytes, offset, params)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
bitStringType = reflect.TypeOf(BitString{})
|
||||
objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
|
||||
enumeratedType = reflect.TypeOf(Enumerated(0))
|
||||
flagType = reflect.TypeOf(Flag(false))
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
rawValueType = reflect.TypeOf(RawValue{})
|
||||
rawContentsType = reflect.TypeOf(RawContent(nil))
|
||||
bigIntType = reflect.TypeOf(new(big.Int))
|
||||
)
|
||||
|
||||
// invalidLength returns true iff offset + length > sliceLength, or if the
|
||||
// addition would overflow.
|
||||
func invalidLength(offset, length, sliceLength int) bool {
|
||||
return offset+length < offset || offset+length > sliceLength
|
||||
}
|
||||
|
||||
// START CT CHANGES
|
||||
|
||||
// Tests whether the data in |bytes| would be a valid ISO8859-1 string.
|
||||
// Clearly, a sequence of bytes comprised solely of valid ISO8859-1
|
||||
// codepoints does not imply that the encoding MUST be ISO8859-1, rather that
|
||||
// you would not encounter an error trying to interpret the data as such.
|
||||
func couldBeISO8859_1(bytes []byte) bool {
|
||||
for _, b := range bytes {
|
||||
if b < 0x20 || (b >= 0x7F && b < 0xA0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Checks whether the data in |bytes| would be a valid T.61 string.
|
||||
// Clearly, a sequence of bytes comprised solely of valid T.61
|
||||
// codepoints does not imply that the encoding MUST be T.61, rather that
|
||||
// you would not encounter an error trying to interpret the data as such.
|
||||
func couldBeT61(bytes []byte) bool {
|
||||
for _, b := range bytes {
|
||||
switch b {
|
||||
case 0x00:
|
||||
// Since we're guessing at (incorrect) encodings for a
|
||||
// PrintableString, we'll err on the side of caution and disallow
|
||||
// strings with a NUL in them, don't want to re-create a PayPal NUL
|
||||
// situation in monitors.
|
||||
fallthrough
|
||||
case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF,
|
||||
0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
|
||||
0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF:
|
||||
// These are all invalid code points in T.61, so it can't be a T.61 string.
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Converts the data in |bytes| to the equivalent UTF-8 string.
|
||||
func iso8859_1ToUTF8(bytes []byte) string {
|
||||
buf := make([]rune, len(bytes))
|
||||
for i, b := range bytes {
|
||||
buf[i] = rune(b)
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// END CT CHANGES
|
||||
|
||||
// parseField is the main parsing function. Given a byte slice and an offset
|
||||
// into the array, it will try to parse a suitable ASN.1 value out and store it
|
||||
// in the given Value.
|
||||
func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
|
||||
offset = initOffset
|
||||
fieldType := v.Type()
|
||||
|
||||
// If we have run out of data, it may be that there are optional elements at the end.
|
||||
if offset == len(bytes) {
|
||||
if !setDefaultValue(v, params) {
|
||||
err = SyntaxError{"sequence truncated"}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Deal with raw values.
|
||||
if fieldType == rawValueType {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
|
||||
offset += t.length
|
||||
v.Set(reflect.ValueOf(result))
|
||||
return
|
||||
}
|
||||
|
||||
// Deal with the ANY type.
|
||||
if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
var result interface{}
|
||||
if !t.isCompound && t.class == classUniversal {
|
||||
innerBytes := bytes[offset : offset+t.length]
|
||||
switch t.tag {
|
||||
case tagPrintableString:
|
||||
result, err = parsePrintableString(innerBytes)
|
||||
// START CT CHANGES
|
||||
if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
|
||||
// Probably an ISO8859-1 string stuffed in, check if it
|
||||
// would be valid and assume that's what's happened if so,
|
||||
// otherwise try T.61, failing that give up and just assign
|
||||
// the bytes
|
||||
switch {
|
||||
case couldBeISO8859_1(innerBytes):
|
||||
result, err = iso8859_1ToUTF8(innerBytes), nil
|
||||
case couldBeT61(innerBytes):
|
||||
result, err = parseT61String(innerBytes)
|
||||
default:
|
||||
result = nil
|
||||
err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
|
||||
}
|
||||
}
|
||||
// END CT CHANGES
|
||||
case tagIA5String:
|
||||
result, err = parseIA5String(innerBytes)
|
||||
case tagT61String:
|
||||
result, err = parseT61String(innerBytes)
|
||||
case tagUTF8String:
|
||||
result, err = parseUTF8String(innerBytes)
|
||||
case tagInteger:
|
||||
result, err = parseInt64(innerBytes)
|
||||
case tagBitString:
|
||||
result, err = parseBitString(innerBytes)
|
||||
case tagOID:
|
||||
result, err = parseObjectIdentifier(innerBytes)
|
||||
case tagUTCTime:
|
||||
result, err = parseUTCTime(innerBytes)
|
||||
case tagOctetString:
|
||||
result = innerBytes
|
||||
default:
|
||||
// If we don't know how to handle the type, we just leave Value as nil.
|
||||
}
|
||||
}
|
||||
offset += t.length
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if result != nil {
|
||||
v.Set(reflect.ValueOf(result))
|
||||
}
|
||||
return
|
||||
}
|
||||
universalTag, compoundType, ok1 := getUniversalType(fieldType)
|
||||
if !ok1 {
|
||||
err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
|
||||
return
|
||||
}
|
||||
|
||||
t, offset, err := parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if params.explicit {
|
||||
expectedClass := classContextSpecific
|
||||
if params.application {
|
||||
expectedClass = classApplication
|
||||
}
|
||||
if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
|
||||
if t.length > 0 {
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if fieldType != flagType {
|
||||
err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
|
||||
return
|
||||
}
|
||||
v.SetBool(true)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// The tags didn't match, it might be an optional element.
|
||||
ok := setDefaultValue(v, params)
|
||||
if ok {
|
||||
offset = initOffset
|
||||
} else {
|
||||
err = StructuralError{"explicitly tagged member didn't match"}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for strings: all the ASN.1 string types map to the Go
|
||||
// type string. getUniversalType returns the tag for PrintableString
|
||||
// when it sees a string, so if we see a different string type on the
|
||||
// wire, we change the universal type to match.
|
||||
if universalTag == tagPrintableString {
|
||||
switch t.tag {
|
||||
case tagIA5String, tagGeneralString, tagT61String, tagUTF8String:
|
||||
universalTag = t.tag
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for time: UTCTime and GeneralizedTime both map to the
|
||||
// Go type time.Time.
|
||||
if universalTag == tagUTCTime && t.tag == tagGeneralizedTime {
|
||||
universalTag = tagGeneralizedTime
|
||||
}
|
||||
|
||||
expectedClass := classUniversal
|
||||
expectedTag := universalTag
|
||||
|
||||
if !params.explicit && params.tag != nil {
|
||||
expectedClass = classContextSpecific
|
||||
expectedTag = *params.tag
|
||||
}
|
||||
|
||||
if !params.explicit && params.application && params.tag != nil {
|
||||
expectedClass = classApplication
|
||||
expectedTag = *params.tag
|
||||
}
|
||||
|
||||
// We have unwrapped any explicit tagging at this point.
|
||||
if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
|
||||
// Tags don't match. Again, it could be an optional element.
|
||||
ok := setDefaultValue(v, params)
|
||||
if ok {
|
||||
offset = initOffset
|
||||
} else {
|
||||
err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
|
||||
}
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
innerBytes := bytes[offset : offset+t.length]
|
||||
offset += t.length
|
||||
|
||||
// We deal with the structures defined in this package first.
|
||||
switch fieldType {
|
||||
case objectIdentifierType:
|
||||
newSlice, err1 := parseObjectIdentifier(innerBytes)
|
||||
v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
|
||||
if err1 == nil {
|
||||
reflect.Copy(v, reflect.ValueOf(newSlice))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case bitStringType:
|
||||
bs, err1 := parseBitString(innerBytes)
|
||||
if err1 == nil {
|
||||
v.Set(reflect.ValueOf(bs))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case timeType:
|
||||
var time time.Time
|
||||
var err1 error
|
||||
if universalTag == tagUTCTime {
|
||||
time, err1 = parseUTCTime(innerBytes)
|
||||
} else {
|
||||
time, err1 = parseGeneralizedTime(innerBytes)
|
||||
}
|
||||
if err1 == nil {
|
||||
v.Set(reflect.ValueOf(time))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case enumeratedType:
|
||||
parsedInt, err1 := parseInt32(innerBytes)
|
||||
if err1 == nil {
|
||||
v.SetInt(int64(parsedInt))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case flagType:
|
||||
v.SetBool(true)
|
||||
return
|
||||
case bigIntType:
|
||||
parsedInt := parseBigInt(innerBytes)
|
||||
v.Set(reflect.ValueOf(parsedInt))
|
||||
return
|
||||
}
|
||||
switch val := v; val.Kind() {
|
||||
case reflect.Bool:
|
||||
parsedBool, err1 := parseBool(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetBool(parsedBool)
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
if val.Type().Size() == 4 {
|
||||
parsedInt, err1 := parseInt32(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetInt(int64(parsedInt))
|
||||
}
|
||||
err = err1
|
||||
} else {
|
||||
parsedInt, err1 := parseInt64(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetInt(parsedInt)
|
||||
}
|
||||
err = err1
|
||||
}
|
||||
return
|
||||
// TODO(dfc) Add support for the remaining integer types
|
||||
case reflect.Struct:
|
||||
structType := fieldType
|
||||
|
||||
if structType.NumField() > 0 &&
|
||||
structType.Field(0).Type == rawContentsType {
|
||||
bytes := bytes[initOffset:offset]
|
||||
val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
|
||||
}
|
||||
|
||||
innerOffset := 0
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
field := structType.Field(i)
|
||||
if i == 0 && field.Type == rawContentsType {
|
||||
continue
|
||||
}
|
||||
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// We allow extra bytes at the end of the SEQUENCE because
|
||||
// adding elements to the end has been used in X.509 as the
|
||||
// version numbers have increased.
|
||||
return
|
||||
case reflect.Slice:
|
||||
sliceType := fieldType
|
||||
if sliceType.Elem().Kind() == reflect.Uint8 {
|
||||
val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
|
||||
reflect.Copy(val, reflect.ValueOf(innerBytes))
|
||||
return
|
||||
}
|
||||
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
|
||||
if err1 == nil {
|
||||
val.Set(newSlice)
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case reflect.String:
|
||||
var v string
|
||||
switch universalTag {
|
||||
case tagPrintableString:
|
||||
v, err = parsePrintableString(innerBytes)
|
||||
case tagIA5String:
|
||||
v, err = parseIA5String(innerBytes)
|
||||
case tagT61String:
|
||||
v, err = parseT61String(innerBytes)
|
||||
case tagUTF8String:
|
||||
v, err = parseUTF8String(innerBytes)
|
||||
case tagGeneralString:
|
||||
// GeneralString is specified in ISO-2022/ECMA-35,
|
||||
// A brief review suggests that it includes structures
|
||||
// that allow the encoding to change midstring and
|
||||
// such. We give up and pass it as an 8-bit string.
|
||||
v, err = parseT61String(innerBytes)
|
||||
default:
|
||||
err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
|
||||
}
|
||||
if err == nil {
|
||||
val.SetString(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
err = StructuralError{"unsupported: " + v.Type().String()}
|
||||
return
|
||||
}
|
||||
|
||||
// setDefaultValue is used to install a default value, from a tag string, into
|
||||
// a Value. It is successful is the field was optional, even if a default value
|
||||
// wasn't provided or it failed to install it into the Value.
|
||||
func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
|
||||
if !params.optional {
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
if params.defaultValue == nil {
|
||||
return
|
||||
}
|
||||
switch val := v; val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
val.SetInt(*params.defaultValue)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal parses the DER-encoded ASN.1 data structure b
|
||||
// and uses the reflect package to fill in an arbitrary value pointed at by val.
|
||||
// Because Unmarshal uses the reflect package, the structs
|
||||
// being written to must use upper case field names.
|
||||
//
|
||||
// An ASN.1 INTEGER can be written to an int, int32, int64,
|
||||
// or *big.Int (from the math/big package).
|
||||
// If the encoded value does not fit in the Go type,
|
||||
// Unmarshal returns a parse error.
|
||||
//
|
||||
// An ASN.1 BIT STRING can be written to a BitString.
|
||||
//
|
||||
// An ASN.1 OCTET STRING can be written to a []byte.
|
||||
//
|
||||
// An ASN.1 OBJECT IDENTIFIER can be written to an
|
||||
// ObjectIdentifier.
|
||||
//
|
||||
// An ASN.1 ENUMERATED can be written to an Enumerated.
|
||||
//
|
||||
// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
|
||||
//
|
||||
// An ASN.1 PrintableString or IA5String can be written to a string.
|
||||
//
|
||||
// Any of the above ASN.1 values can be written to an interface{}.
|
||||
// The value stored in the interface has the corresponding Go type.
|
||||
// For integers, that type is int64.
|
||||
//
|
||||
// An ASN.1 SEQUENCE OF x or SET OF x can be written
|
||||
// to a slice if an x can be written to the slice's element type.
|
||||
//
|
||||
// An ASN.1 SEQUENCE or SET can be written to a struct
|
||||
// if each of the elements in the sequence can be
|
||||
// written to the corresponding element in the struct.
|
||||
//
|
||||
// The following tags on struct fields have special meaning to Unmarshal:
|
||||
//
|
||||
// optional marks the field as ASN.1 OPTIONAL
|
||||
// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
|
||||
// default:x sets the default value for optional integer fields
|
||||
//
|
||||
// If the type of the first field of a structure is RawContent then the raw
|
||||
// ASN1 contents of the struct will be stored in it.
|
||||
//
|
||||
// Other ASN.1 types are not supported; if it encounters them,
|
||||
// Unmarshal returns a parse error.
|
||||
func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
|
||||
return UnmarshalWithParams(b, val, "")
|
||||
}
|
||||
|
||||
// UnmarshalWithParams allows field parameters to be specified for the
|
||||
// top-level element. The form of the params is the same as the field tags.
|
||||
func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
|
||||
v := reflect.ValueOf(val).Elem()
|
||||
offset, err := parseField(v, b, 0, parseFieldParameters(params))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[offset:], nil
|
||||
}
|
||||
163
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/common.go
generated
vendored
Normal file
163
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/common.go
generated
vendored
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package asn1
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ASN.1 objects have metadata preceding them:
|
||||
// the tag: the type of the object
|
||||
// a flag denoting if this object is compound or not
|
||||
// the class type: the namespace of the tag
|
||||
// the length of the object, in bytes
|
||||
|
||||
// Here are some standard tags and classes
|
||||
|
||||
const (
|
||||
tagBoolean = 1
|
||||
tagInteger = 2
|
||||
tagBitString = 3
|
||||
tagOctetString = 4
|
||||
tagOID = 6
|
||||
tagEnum = 10
|
||||
tagUTF8String = 12
|
||||
tagSequence = 16
|
||||
tagSet = 17
|
||||
tagPrintableString = 19
|
||||
tagT61String = 20
|
||||
tagIA5String = 22
|
||||
tagUTCTime = 23
|
||||
tagGeneralizedTime = 24
|
||||
tagGeneralString = 27
|
||||
)
|
||||
|
||||
const (
|
||||
classUniversal = 0
|
||||
classApplication = 1
|
||||
classContextSpecific = 2
|
||||
classPrivate = 3
|
||||
)
|
||||
|
||||
type tagAndLength struct {
|
||||
class, tag, length int
|
||||
isCompound bool
|
||||
}
|
||||
|
||||
// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
|
||||
// of" and "in addition to". When not specified, every primitive type has a
|
||||
// default tag in the UNIVERSAL class.
|
||||
//
|
||||
// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
|
||||
// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
|
||||
// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
|
||||
//
|
||||
// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
|
||||
// /additional/ tag would wrap the default tag. This explicit tag will have the
|
||||
// compound flag set.
|
||||
//
|
||||
// (This is used in order to remove ambiguity with optional elements.)
|
||||
//
|
||||
// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
|
||||
// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
|
||||
// tagging with tag strings on the fields of a structure.
|
||||
|
||||
// fieldParameters is the parsed representation of tag string from a structure field.
|
||||
type fieldParameters struct {
|
||||
optional bool // true iff the field is OPTIONAL
|
||||
explicit bool // true iff an EXPLICIT tag is in use.
|
||||
application bool // true iff an APPLICATION tag is in use.
|
||||
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
|
||||
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
|
||||
stringType int // the string tag to use when marshaling.
|
||||
set bool // true iff this should be encoded as a SET
|
||||
omitEmpty bool // true iff this should be omitted if empty when marshaling.
|
||||
|
||||
// Invariants:
|
||||
// if explicit is set, tag is non-nil.
|
||||
}
|
||||
|
||||
// Given a tag string with the format specified in the package comment,
|
||||
// parseFieldParameters will parse it into a fieldParameters structure,
|
||||
// ignoring unknown parts of the string.
|
||||
func parseFieldParameters(str string) (ret fieldParameters) {
|
||||
for _, part := range strings.Split(str, ",") {
|
||||
switch {
|
||||
case part == "optional":
|
||||
ret.optional = true
|
||||
case part == "explicit":
|
||||
ret.explicit = true
|
||||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "ia5":
|
||||
ret.stringType = tagIA5String
|
||||
case part == "printable":
|
||||
ret.stringType = tagPrintableString
|
||||
case part == "utf8":
|
||||
ret.stringType = tagUTF8String
|
||||
case strings.HasPrefix(part, "default:"):
|
||||
i, err := strconv.ParseInt(part[8:], 10, 64)
|
||||
if err == nil {
|
||||
ret.defaultValue = new(int64)
|
||||
*ret.defaultValue = i
|
||||
}
|
||||
case strings.HasPrefix(part, "tag:"):
|
||||
i, err := strconv.Atoi(part[4:])
|
||||
if err == nil {
|
||||
ret.tag = new(int)
|
||||
*ret.tag = i
|
||||
}
|
||||
case part == "set":
|
||||
ret.set = true
|
||||
case part == "application":
|
||||
ret.application = true
|
||||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "omitempty":
|
||||
ret.omitEmpty = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Given a reflected Go type, getUniversalType returns the default tag number
|
||||
// and expected compound flag.
|
||||
func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
|
||||
switch t {
|
||||
case objectIdentifierType:
|
||||
return tagOID, false, true
|
||||
case bitStringType:
|
||||
return tagBitString, false, true
|
||||
case timeType:
|
||||
return tagUTCTime, false, true
|
||||
case enumeratedType:
|
||||
return tagEnum, false, true
|
||||
case bigIntType:
|
||||
return tagInteger, false, true
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return tagBoolean, false, true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return tagInteger, false, true
|
||||
case reflect.Struct:
|
||||
return tagSequence, true, true
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return tagOctetString, false, true
|
||||
}
|
||||
if strings.HasSuffix(t.Name(), "SET") {
|
||||
return tagSet, true, true
|
||||
}
|
||||
return tagSequence, true, true
|
||||
case reflect.String:
|
||||
return tagPrintableString, false, true
|
||||
}
|
||||
return 0, false, false
|
||||
}
|
||||
581
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/marshal.go
generated
vendored
Normal file
581
Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/marshal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,581 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package asn1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A forkableWriter is an in-memory buffer that can be
|
||||
// 'forked' to create new forkableWriters that bracket the
|
||||
// original. After
|
||||
// pre, post := w.fork();
|
||||
// the overall sequence of bytes represented is logically w+pre+post.
|
||||
type forkableWriter struct {
|
||||
*bytes.Buffer
|
||||
pre, post *forkableWriter
|
||||
}
|
||||
|
||||
func newForkableWriter() *forkableWriter {
|
||||
return &forkableWriter{new(bytes.Buffer), nil, nil}
|
||||
}
|
||||
|
||||
func (f *forkableWriter) fork() (pre, post *forkableWriter) {
|
||||
if f.pre != nil || f.post != nil {
|
||||
panic("have already forked")
|
||||
}
|
||||
f.pre = newForkableWriter()
|
||||
f.post = newForkableWriter()
|
||||
return f.pre, f.post
|
||||
}
|
||||
|
||||
func (f *forkableWriter) Len() (l int) {
|
||||
l += f.Buffer.Len()
|
||||
if f.pre != nil {
|
||||
l += f.pre.Len()
|
||||
}
|
||||
if f.post != nil {
|
||||
l += f.post.Len()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
|
||||
n, err = out.Write(f.Bytes())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var nn int
|
||||
|
||||
if f.pre != nil {
|
||||
nn, err = f.pre.writeTo(out)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if f.post != nil {
|
||||
nn, err = f.post.writeTo(out)
|
||||
n += nn
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalBase128Int(out *forkableWriter, n int64) (err error) {
|
||||
if n == 0 {
|
||||
err = out.WriteByte(0)
|
||||
return
|
||||
}
|
||||
|
||||
l := 0
|
||||
for i := n; i > 0; i >>= 7 {
|
||||
l++
|
||||
}
|
||||
|
||||
for i := l - 1; i >= 0; i-- {
|
||||
o := byte(n >> uint(i*7))
|
||||
o &= 0x7f
|
||||
if i != 0 {
|
||||
o |= 0x80
|
||||
}
|
||||
err = out.WriteByte(o)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalInt64(out *forkableWriter, i int64) (err error) {
|
||||
n := int64Length(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func int64Length(i int64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 127 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
for i < -128 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
|
||||
if n.Sign() < 0 {
|
||||
// A negative number has to be converted to two's-complement
|
||||
// form. So we'll subtract 1 and invert. If the
|
||||
// most-significant-bit isn't set then we'll need to pad the
|
||||
// beginning with 0xff in order to keep the number negative.
|
||||
nMinus1 := new(big.Int).Neg(n)
|
||||
nMinus1.Sub(nMinus1, bigOne)
|
||||
bytes := nMinus1.Bytes()
|
||||
for i := range bytes {
|
||||
bytes[i] ^= 0xff
|
||||
}
|
||||
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
|
||||
err = out.WriteByte(0xff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
} else if n.Sign() == 0 {
|
||||
// Zero is written as a single 0 zero rather than no bytes.
|
||||
err = out.WriteByte(0x00)
|
||||
} else {
|
||||
bytes := n.Bytes()
|
||||
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
|
||||
// We'll have to pad this with 0x00 in order to stop it
|
||||
// looking like a negative number.
|
||||
err = out.WriteByte(0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalLength(out *forkableWriter, i int) (err error) {
|
||||
n := lengthLength(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lengthLength(i int) (numBytes int) {
|
||||
numBytes = 1
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
|
||||
b := uint8(t.class) << 6
|
||||
if t.isCompound {
|
||||
b |= 0x20
|
||||
}
|
||||
if t.tag >= 31 {
|
||||
b |= 0x1f
|
||||
err = out.WriteByte(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalBase128Int(out, int64(t.tag))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
b |= uint8(t.tag)
|
||||
err = out.WriteByte(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if t.length >= 128 {
|
||||
l := lengthLength(t.length)
|
||||
err = out.WriteByte(0x80 | byte(l))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalLength(out, t.length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = out.WriteByte(byte(t.length))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalBitString(out *forkableWriter, b BitString) (err error) {
|
||||
paddingBits := byte((8 - b.BitLength%8) % 8)
|
||||
err = out.WriteByte(paddingBits)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = out.Write(b.Bytes)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
|
||||
if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
|
||||
return StructuralError{"invalid object identifier"}
|
||||
}
|
||||
|
||||
err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for i := 2; i < len(oid); i++ {
|
||||
err = marshalBase128Int(out, int64(oid[i]))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func marshalPrintableString(out *forkableWriter, s string) (err error) {
|
||||
b := []byte(s)
|
||||
for _, c := range b {
|
||||
if !isPrintable(c) {
|
||||
return StructuralError{"PrintableString contains invalid character"}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = out.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalIA5String(out *forkableWriter, s string) (err error) {
|
||||
b := []byte(s)
|
||||
for _, c := range b {
|
||||
if c > 127 {
|
||||
return StructuralError{"IA5String contains invalid character"}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = out.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalUTF8String(out *forkableWriter, s string) (err error) {
|
||||
_, err = out.Write([]byte(s))
|
||||
return
|
||||
}
|
||||
|
||||
func marshalTwoDigits(out *forkableWriter, v int) (err error) {
|
||||
err = out.WriteByte(byte('0' + (v/10)%10))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return out.WriteByte(byte('0' + v%10))
|
||||
}
|
||||
|
||||
func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
|
||||
year, month, day := t.Date()
|
||||
|
||||
switch {
|
||||
case 1950 <= year && year < 2000:
|
||||
err = marshalTwoDigits(out, int(year-1900))
|
||||
case 2000 <= year && year < 2050:
|
||||
err = marshalTwoDigits(out, int(year-2000))
|
||||
default:
|
||||
return StructuralError{"cannot represent time as UTCTime"}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, int(month))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, day)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hour, min, sec := t.Clock()
|
||||
|
||||
err = marshalTwoDigits(out, hour)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, min)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, sec)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, offset := t.Zone()
|
||||
|
||||
switch {
|
||||
case offset/60 == 0:
|
||||
err = out.WriteByte('Z')
|
||||
return
|
||||
case offset > 0:
|
||||
err = out.WriteByte('+')
|
||||
case offset < 0:
|
||||
err = out.WriteByte('-')
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
offsetMinutes := offset / 60
|
||||
if offsetMinutes < 0 {
|
||||
offsetMinutes = -offsetMinutes
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, offsetMinutes/60)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, offsetMinutes%60)
|
||||
return
|
||||
}
|
||||
|
||||
func stripTagAndLength(in []byte) []byte {
|
||||
_, offset, err := parseTagAndLength(in, 0)
|
||||
if err != nil {
|
||||
return in
|
||||
}
|
||||
return in[offset:]
|
||||
}
|
||||
|
||||
func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
|
||||
switch value.Type() {
|
||||
case timeType:
|
||||
return marshalUTCTime(out, value.Interface().(time.Time))
|
||||
case bitStringType:
|
||||
return marshalBitString(out, value.Interface().(BitString))
|
||||
case objectIdentifierType:
|
||||
return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
|
||||
case bigIntType:
|
||||
return marshalBigInt(out, value.Interface().(*big.Int))
|
||||
}
|
||||
|
||||
switch v := value; v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return out.WriteByte(255)
|
||||
} else {
|
||||
return out.WriteByte(0)
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return marshalInt64(out, int64(v.Int()))
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
|
||||
startingField := 0
|
||||
|
||||
// If the first element of the structure is a non-empty
|
||||
// RawContents, then we don't bother serializing the rest.
|
||||
if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
|
||||
s := v.Field(0)
|
||||
if s.Len() > 0 {
|
||||
bytes := make([]byte, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
bytes[i] = uint8(s.Index(i).Uint())
|
||||
}
|
||||
/* The RawContents will contain the tag and
|
||||
* length fields but we'll also be writing
|
||||
* those ourselves, so we strip them out of
|
||||
* bytes */
|
||||
_, err = out.Write(stripTagAndLength(bytes))
|
||||
return
|
||||
} else {
|
||||
startingField = 1
|
||||
}
|
||||
}
|
||||
|
||||
for i := startingField; i < t.NumField(); i++ {
|
||||
var pre *forkableWriter
|
||||
pre, out = out.fork()
|
||||
err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
case reflect.Slice:
|
||||
sliceType := v.Type()
|
||||
if sliceType.Elem().Kind() == reflect.Uint8 {
|
||||
bytes := make([]byte, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
bytes[i] = uint8(v.Index(i).Uint())
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
var fp fieldParameters
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
var pre *forkableWriter
|
||||
pre, out = out.fork()
|
||||
err = marshalField(pre, v.Index(i), fp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
case reflect.String:
|
||||
switch params.stringType {
|
||||
case tagIA5String:
|
||||
return marshalIA5String(out, v.String())
|
||||
case tagPrintableString:
|
||||
return marshalPrintableString(out, v.String())
|
||||
default:
|
||||
return marshalUTF8String(out, v.String())
|
||||
}
|
||||
}
|
||||
|
||||
return StructuralError{"unknown Go type"}
|
||||
}
|
||||
|
||||
func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
|
||||
// If the field is an interface{} then recurse into it.
|
||||
if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
|
||||
return marshalField(out, v.Elem(), params)
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
|
||||
return
|
||||
}
|
||||
|
||||
if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
|
||||
return
|
||||
}
|
||||
|
||||
if v.Type() == rawValueType {
|
||||
rv := v.Interface().(RawValue)
|
||||
if len(rv.FullBytes) != 0 {
|
||||
_, err = out.Write(rv.FullBytes)
|
||||
} else {
|
||||
err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = out.Write(rv.Bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
tag, isCompound, ok := getUniversalType(v.Type())
|
||||
if !ok {
|
||||
err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
|
||||
return
|
||||
}
|
||||
class := classUniversal
|
||||
|
||||
if params.stringType != 0 && tag != tagPrintableString {
|
||||
return StructuralError{"explicit string type given to non-string member"}
|
||||
}
|
||||
|
||||
if tag == tagPrintableString {
|
||||
if params.stringType == 0 {
|
||||
// This is a string without an explicit string type. We'll use
|
||||
// a PrintableString if the character set in the string is
|
||||
// sufficiently limited, otherwise we'll use a UTF8String.
|
||||
for _, r := range v.String() {
|
||||
if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
|
||||
if !utf8.ValidString(v.String()) {
|
||||
return errors.New("asn1: string not valid UTF-8")
|
||||
}
|
||||
tag = tagUTF8String
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tag = params.stringType
|
||||
}
|
||||
}
|
||||
|
||||
if params.set {
|
||||
if tag != tagSequence {
|
||||
return StructuralError{"non sequence tagged as set"}
|
||||
}
|
||||
tag = tagSet
|
||||
}
|
||||
|
||||
tags, body := out.fork()
|
||||
|
||||
err = marshalBody(body, v, params)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bodyLen := body.Len()
|
||||
|
||||
var explicitTag *forkableWriter
|
||||
if params.explicit {
|
||||
explicitTag, tags = tags.fork()
|
||||
}
|
||||
|
||||
if !params.explicit && params.tag != nil {
|
||||
// implicit tag.
|
||||
tag = *params.tag
|
||||
class = classContextSpecific
|
||||
}
|
||||
|
||||
err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if params.explicit {
|
||||
err = marshalTagAndLength(explicitTag, tagAndLength{
|
||||
class: classContextSpecific,
|
||||
tag: *params.tag,
|
||||
length: bodyLen + tags.Len(),
|
||||
isCompound: true,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal returns the ASN.1 encoding of val.
|
||||
func Marshal(val interface{}) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
v := reflect.ValueOf(val)
|
||||
f := newForkableWriter()
|
||||
err := marshalField(f, v, fieldParameters{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = f.writeTo(&out)
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
330
Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
Normal file
330
Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
// Package client is a CT log client implementation and contains types and code
|
||||
// for interacting with RFC6962-compliant CT Log instances.
|
||||
// See http://tools.ietf.org/html/rfc6962 for details
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/mreiferson/go-httpclient"
|
||||
)
|
||||
|
||||
// URI paths for CT Log endpoints
|
||||
const (
|
||||
AddChainPath = "/ct/v1/add-chain"
|
||||
AddPreChainPath = "/ct/v1/add-pre-chain"
|
||||
GetSTHPath = "/ct/v1/get-sth"
|
||||
GetEntriesPath = "/ct/v1/get-entries"
|
||||
)
|
||||
|
||||
// LogClient represents a client for a given CT Log instance
|
||||
type LogClient struct {
|
||||
uri string // the base URI of the log. e.g. http://ct.googleapis/pilot
|
||||
httpClient *http.Client // used to interact with the log via HTTP
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// JSON structures follow.
|
||||
// These represent the structures returned by the CT Log server.
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// addChainRequest represents the JSON request body sent to the add-chain CT
|
||||
// method.
|
||||
type addChainRequest struct {
|
||||
Chain []string `json:"chain"`
|
||||
}
|
||||
|
||||
// addChainResponse represents the JSON response to the add-chain CT method.
|
||||
// An SCT represents a Log's promise to integrate a [pre-]certificate into the
|
||||
// log within a defined period of time.
|
||||
type addChainResponse struct {
|
||||
SCTVersion ct.Version `json:"sct_version"` // SCT structure version
|
||||
ID string `json:"id"` // Log ID
|
||||
Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
|
||||
Extensions string `json:"extensions"` // Holder for any CT extensions
|
||||
Signature string `json:"signature"` // Log signature for this SCT
|
||||
}
|
||||
|
||||
// getSTHResponse respresents the JSON response to the get-sth CT method
|
||||
type getSTHResponse struct {
|
||||
TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
|
||||
Timestamp uint64 `json:"timestamp"` // Time that the tree was created
|
||||
SHA256RootHash string `json:"sha256_root_hash"` // Root hash of the tree
|
||||
TreeHeadSignature string `json:"tree_head_signature"` // Log signature for this STH
|
||||
}
|
||||
|
||||
// base64LeafEntry respresents a Base64 encoded leaf entry
|
||||
type base64LeafEntry struct {
|
||||
LeafInput string `json:"leaf_input"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
}
|
||||
|
||||
// getEntriesReponse respresents the JSON response to the CT get-entries method
|
||||
type getEntriesResponse struct {
|
||||
Entries []base64LeafEntry `json:"entries"` // the list of returned entries
|
||||
}
|
||||
|
||||
// getConsistencyProofResponse represents the JSON response to the CT get-consistency-proof method
|
||||
type getConsistencyProofResponse struct {
|
||||
Consistency []string `json:"consistency"`
|
||||
}
|
||||
|
||||
// getAuditProofResponse represents the JSON response to the CT get-audit-proof method
|
||||
type getAuditProofResponse struct {
|
||||
Hash []string `json:"hash"` // the hashes which make up the proof
|
||||
TreeSize uint64 `json:"tree_size"` // the tree size against which this proof is constructed
|
||||
}
|
||||
|
||||
// getAcceptedRootsResponse represents the JSON response to the CT get-roots method.
|
||||
type getAcceptedRootsResponse struct {
|
||||
Certificates []string `json:"certificates"`
|
||||
}
|
||||
|
||||
// getEntryAndProodReponse represents the JSON response to the CT get-entry-and-proof method
|
||||
type getEntryAndProofResponse struct {
|
||||
LeafInput string `json:"leaf_input"` // the entry itself
|
||||
ExtraData string `json:"extra_data"` // any chain provided when the entry was added to the log
|
||||
AuditPath []string `json:"audit_path"` // the corresponding proof
|
||||
}
|
||||
|
||||
// New constructs a new LogClient instance.
|
||||
// |uri| is the base URI of the CT log instance to interact with, e.g.
|
||||
// http://ct.googleapis.com/pilot
|
||||
func New(uri string) *LogClient {
|
||||
var c LogClient
|
||||
c.uri = uri
|
||||
transport := &httpclient.Transport{
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
RequestTimeout: 30 * time.Second,
|
||||
ResponseHeaderTimeout: 30 * time.Second,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
DisableKeepAlives: false,
|
||||
}
|
||||
c.httpClient = &http.Client{Transport: transport}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Makes a HTTP call to |uri|, and attempts to parse the response as a JSON
|
||||
// representation of the structure in |res|.
|
||||
// Returns a non-nil |error| if there was a problem.
|
||||
func (c *LogClient) fetchAndParse(uri string, res interface{}) error {
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Keep-Alive", "timeout=15, max=100")
|
||||
resp, err := c.httpClient.Do(req)
|
||||
var body []byte
|
||||
if resp != nil {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = json.Unmarshal(body, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Makes a HTTP POST call to |uri|, and attempts to parse the response as a JSON
|
||||
// representation of the structure in |res|.
|
||||
// Returns a non-nil |error| if there was a problem.
|
||||
func (c *LogClient) postAndParse(uri string, req interface{}, res interface{}) (*http.Response, string, error) {
|
||||
postBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
httpReq, err := http.NewRequest("POST", uri, bytes.NewReader(postBody))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
httpReq.Header.Set("Keep-Alive", "timeout=15, max=100")
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
resp, err := c.httpClient.Do(httpReq)
|
||||
// Read all of the body, if there is one, so that the http.Client can do
|
||||
// Keep-Alive:
|
||||
var body []byte
|
||||
if resp != nil {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
if err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
if err = json.Unmarshal(body, &res); err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
}
|
||||
return resp, string(body), nil
|
||||
}
|
||||
|
||||
// Attempts to add |chain| to the log, using the api end-point specified by
|
||||
// |path|.
|
||||
func (c *LogClient) addChainWithRetry(path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
var resp addChainResponse
|
||||
var req addChainRequest
|
||||
for _, link := range chain {
|
||||
req.Chain = append(req.Chain, base64.StdEncoding.EncodeToString(link))
|
||||
}
|
||||
done := false
|
||||
httpStatus := "Unknown"
|
||||
for !done {
|
||||
backoffSeconds := 0
|
||||
httpResp, errorBody, err := c.postAndParse(c.uri+path, &req, &resp)
|
||||
if err != nil {
|
||||
log.Printf("Got %s, backing off.", err)
|
||||
backoffSeconds = 10
|
||||
} else {
|
||||
switch {
|
||||
case httpResp.StatusCode == 200:
|
||||
done = true
|
||||
break
|
||||
case httpResp.StatusCode == 408:
|
||||
case httpResp.StatusCode == 503:
|
||||
// Retry
|
||||
backoffSeconds = 10
|
||||
if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" {
|
||||
if seconds, err := strconv.Atoi(retryAfter); err != nil {
|
||||
backoffSeconds = seconds
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Got HTTP Status %s: %s", httpResp.Status, errorBody)
|
||||
}
|
||||
httpStatus = httpResp.Status
|
||||
}
|
||||
// Now back-off before retrying
|
||||
log.Printf("Got %s, backing-off %d seconds.", httpStatus, backoffSeconds)
|
||||
time.Sleep(time.Duration(backoffSeconds) * time.Second)
|
||||
}
|
||||
|
||||
rawLogID, err := base64.StdEncoding.DecodeString(resp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawSignature, err := base64.StdEncoding.DecodeString(resp.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var logID ct.SHA256Hash
|
||||
copy(logID[:], rawLogID)
|
||||
return &ct.SignedCertificateTimestamp{
|
||||
SCTVersion: resp.SCTVersion,
|
||||
LogID: logID,
|
||||
Timestamp: resp.Timestamp,
|
||||
Extensions: ct.CTExtensions(resp.Extensions),
|
||||
Signature: *ds}, nil
|
||||
}
|
||||
|
||||
// AddChain adds the (DER represented) X509 |chain| to the log.
|
||||
func (c *LogClient) AddChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(AddChainPath, chain)
|
||||
}
|
||||
|
||||
// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
|
||||
func (c *LogClient) AddPreChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(AddPreChainPath, chain)
|
||||
}
|
||||
|
||||
// GetSTH retrieves the current STH from the log.
|
||||
// Returns a populated SignedTreeHead, or a non-nil error.
|
||||
func (c *LogClient) GetSTH() (sth *ct.SignedTreeHead, err error) {
|
||||
var resp getSTHResponse
|
||||
if err = c.fetchAndParse(c.uri+GetSTHPath, &resp); err != nil {
|
||||
return
|
||||
}
|
||||
sth = &ct.SignedTreeHead{
|
||||
TreeSize: resp.TreeSize,
|
||||
Timestamp: resp.Timestamp,
|
||||
}
|
||||
|
||||
rawRootHash, err := base64.StdEncoding.DecodeString(resp.SHA256RootHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 encoding in sha256_root_hash: %v", err)
|
||||
}
|
||||
if len(rawRootHash) != sha256.Size {
|
||||
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(rawRootHash))
|
||||
}
|
||||
copy(sth.SHA256RootHash[:], rawRootHash)
|
||||
|
||||
rawSignature, err := base64.StdEncoding.DecodeString(resp.TreeHeadSignature)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid base64 encoding in tree_head_signature")
|
||||
}
|
||||
ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(alcutter): Verify signature
|
||||
sth.TreeHeadSignature = *ds
|
||||
return
|
||||
}
|
||||
|
||||
// GetEntries attempts to retrieve the entries in the sequence [|start|, |end|] from the CT
|
||||
// log server. (see section 4.6.)
|
||||
// Returns a slice of LeafInputs or a non-nil error.
|
||||
func (c *LogClient) GetEntries(start, end int64) ([]ct.LogEntry, error) {
|
||||
if end < 0 {
|
||||
return nil, errors.New("end should be >= 0")
|
||||
}
|
||||
if end < start {
|
||||
return nil, errors.New("start should be <= end")
|
||||
}
|
||||
var resp getEntriesResponse
|
||||
err := c.fetchAndParse(fmt.Sprintf("%s%s?start=%d&end=%d", c.uri, GetEntriesPath, start, end), &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries := make([]ct.LogEntry, end-start+1, end-start+1)
|
||||
for index, entry := range resp.Entries {
|
||||
leafBytes, err := base64.StdEncoding.DecodeString(entry.LeafInput)
|
||||
leaf, err := ct.ReadMerkleTreeLeaf(bytes.NewBuffer(leafBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[index].Leaf = *leaf
|
||||
chainBytes, err := base64.StdEncoding.DecodeString(entry.ExtraData)
|
||||
|
||||
var chain []ct.ASN1Cert
|
||||
switch leaf.TimestampedEntry.EntryType {
|
||||
case ct.X509LogEntryType:
|
||||
chain, err = ct.UnmarshalX509ChainArray(chainBytes)
|
||||
|
||||
case ct.PrecertLogEntryType:
|
||||
chain, err = ct.UnmarshalPrecertChainArray(chainBytes)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("saw unknown entry type: %v", leaf.TimestampedEntry.EntryType)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[index].Chain = chain
|
||||
entries[index].Index = start + int64(index)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
143
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go
generated
vendored
Normal file
143
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go
generated
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
var defaultNumPollinationsToReturn = flag.Int("default_num_pollinations_to_return", 10,
|
||||
"Number of randomly selected STH pollination entries to return for sth-pollination requests.")
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SignatureVerifierMap is a map of SignatureVerifier by LogID
|
||||
type SignatureVerifierMap map[ct.SHA256Hash]ct.SignatureVerifier
|
||||
|
||||
// Handler for the gossip HTTP requests.
|
||||
type Handler struct {
|
||||
storage *Storage
|
||||
verifiers SignatureVerifierMap
|
||||
clock clock
|
||||
}
|
||||
|
||||
func writeWrongMethodResponse(rw *http.ResponseWriter, allowed string) {
|
||||
(*rw).Header().Add("Allow", allowed)
|
||||
(*rw).WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
func writeErrorResponse(rw *http.ResponseWriter, status int, body string) {
|
||||
(*rw).WriteHeader(status)
|
||||
(*rw).Write([]byte(body))
|
||||
}
|
||||
|
||||
// HandleSCTFeedback handles requests POSTed to .../sct-feedback.
|
||||
// It attempts to store the provided SCT Feedback
|
||||
func (h *Handler) HandleSCTFeedback(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.Method != "POST" {
|
||||
writeWrongMethodResponse(&rw, "POST")
|
||||
return
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
var feedback SCTFeedback
|
||||
if err := decoder.Decode(&feedback); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid SCT Feedback received: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(alcutter): 5.1.1 Validate leaf chains up to a trusted root
|
||||
// TODO(alcutter): 5.1.1/2 Verify each SCT is valid and from a known log, discard those which aren't
|
||||
// TODO(alcutter): 5.1.1/3 Discard leaves for domains other than ours.
|
||||
if err := h.storage.AddSCTFeedback(feedback); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Unable to store feedback: %v", err))
|
||||
return
|
||||
}
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleSTHPollination handles requests POSTed to .../sth-pollination.
|
||||
// It attempts to store the provided pollination info, and returns a random set of
|
||||
// pollination data from the last 14 days (i.e. "fresh" by the definition of the gossip RFC.)
|
||||
func (h *Handler) HandleSTHPollination(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.Method != "POST" {
|
||||
writeWrongMethodResponse(&rw, "POST")
|
||||
return
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
var p STHPollination
|
||||
if err := decoder.Decode(&p); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid STH Pollination received: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
sthToKeep := make([]ct.SignedTreeHead, 0, len(p.STHs))
|
||||
for _, sth := range p.STHs {
|
||||
v, found := h.verifiers[sth.LogID]
|
||||
if !found {
|
||||
log.Printf("Pollination entry for unknown logID: %s", sth.LogID.Base64String())
|
||||
continue
|
||||
}
|
||||
if err := v.VerifySTHSignature(sth); err != nil {
|
||||
log.Printf("Failed to verify STH, dropping: %v", err)
|
||||
continue
|
||||
}
|
||||
sthToKeep = append(sthToKeep, sth)
|
||||
}
|
||||
p.STHs = sthToKeep
|
||||
|
||||
err := h.storage.AddSTHPollination(p)
|
||||
if err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't store pollination: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
freshTime := h.clock.Now().AddDate(0, 0, -14)
|
||||
rp, err := h.storage.GetRandomSTHPollination(freshTime, *defaultNumPollinationsToReturn)
|
||||
if err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't fetch pollination to return: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
json := json.NewEncoder(rw)
|
||||
if err := json.Encode(*rp); err != nil {
|
||||
writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't encode pollination to return: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Handler object, taking a pointer a Storage object to
|
||||
// use for storing and retrieving feedback and pollination data, and a
|
||||
// SignatureVerifierMap for verifying signatures from known logs.
|
||||
func NewHandler(s *Storage, v SignatureVerifierMap) Handler {
|
||||
return Handler{
|
||||
storage: s,
|
||||
verifiers: v,
|
||||
clock: realClock{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Handler object, taking a pointer a Storage object to
|
||||
// use for storing and retrieving feedback and pollination data, and a
|
||||
// SignatureVerifierMap for verifying signatures from known logs.
|
||||
func newHandlerWithClock(s *Storage, v SignatureVerifierMap, c clock) Handler {
|
||||
return Handler{
|
||||
storage: s,
|
||||
verifiers: v,
|
||||
clock: c,
|
||||
}
|
||||
}
|
||||
73
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/main/gossip_server.go
generated
vendored
Normal file
73
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/main/gossip_server.go
generated
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip"
|
||||
)
|
||||
|
||||
var dbPath = flag.String("database", "/tmp/gossip.sq3", "Path to database.")
|
||||
var listenAddress = flag.String("listen", ":8080", "Listen address:port for HTTP server.")
|
||||
var logKeys = flag.String("log_public_keys", "", "Comma separated list of files containing trusted Logs' public keys in PEM format")
|
||||
|
||||
func createVerifiers() (*gossip.SignatureVerifierMap, error) {
|
||||
m := make(gossip.SignatureVerifierMap)
|
||||
if len(*logKeys) == 0 {
|
||||
return nil, errors.New("--log_public_keys is empty")
|
||||
}
|
||||
keys := strings.Split(*logKeys, ",")
|
||||
for _, k := range keys {
|
||||
pem, err := ioutil.ReadFile(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read specified PEM file %s: %v", k, err)
|
||||
}
|
||||
for len(pem) > 0 {
|
||||
key, id, rest, err := ct.PublicKeyFromPEM(pem)
|
||||
pem = rest
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key from PEM in file %s: %v", k, err)
|
||||
}
|
||||
sv, err := ct.NewSignatureVerifier(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create new SignatureVerifier: %v", err)
|
||||
}
|
||||
m[id] = *sv
|
||||
log.Printf("Loaded key for LogID %v", id)
|
||||
}
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
verifierMap, err := createVerifiers()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load log public keys: %v", err)
|
||||
}
|
||||
log.Print("Starting gossip server.")
|
||||
|
||||
storage := gossip.Storage{}
|
||||
if err := storage.Open(*dbPath); err != nil {
|
||||
log.Fatalf("Failed to open storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
handler := gossip.NewHandler(&storage, *verifierMap)
|
||||
serveMux := http.NewServeMux()
|
||||
serveMux.HandleFunc("/.well-known/ct/v1/sct-feedback", handler.HandleSCTFeedback)
|
||||
serveMux.HandleFunc("/.well-known/ct/v1/sth-pollination", handler.HandleSTHPollination)
|
||||
server := &http.Server{
|
||||
Addr: *listenAddress,
|
||||
Handler: serveMux,
|
||||
}
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Printf("Error serving: %v", err)
|
||||
}
|
||||
}
|
||||
377
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go
generated
vendored
Normal file
377
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go
generated
vendored
Normal file
|
|
@ -0,0 +1,377 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS sths (
|
||||
version INTEGER NOT NULL,
|
||||
tree_size INTEGER NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
root_hash BYTES NOT NULL,
|
||||
signature BYTES NOT NULL,
|
||||
log_id BYTES NOT NULL,
|
||||
PRIMARY KEY (version, tree_size, timestamp, root_hash, log_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS scts (
|
||||
sct_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
sct BYTES NOT NULL UNIQUE
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS chains (
|
||||
chain_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
chain STRING NOT NULL UNIQUE
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sct_feedback (
|
||||
chain_id INTEGER NOT NULL REFERENCES chains(chain_id),
|
||||
sct_id INTEGER NOT NULL REFERENCES scts(sct_id),
|
||||
PRIMARY KEY (chain_id, sct_id)
|
||||
|
||||
);`
|
||||
|
||||
const insertChain = `INSERT INTO chains(chain) VALUES ($1);`
|
||||
const insertSCT = `INSERT INTO scts(sct) VALUES ($1);`
|
||||
const insertSCTFeedback = `INSERT INTO sct_feedback(chain_id, sct_id) VALUES ($1, $2);`
|
||||
const insertSTHPollination = `INSERT INTO sths(version, tree_size, timestamp, root_hash, signature, log_id) VALUES($1, $2, $3, $4, $5, $6);`
|
||||
|
||||
const selectChainID = `SELECT chain_id FROM chains WHERE chain = $1;`
|
||||
|
||||
// Selects at most $2 rows from the sths table whose timestamp is newer than $1.
|
||||
const selectRandomRecentPollination = `SELECT version, tree_size, timestamp, root_hash, signature, log_id FROM sths
|
||||
WHERE timestamp >= $1 ORDER BY random() LIMIT $2;`
|
||||
const selectSCTID = `SELECT sct_id FROM scts WHERE sct = $1;`
|
||||
|
||||
const selectNumSCTs = `SELECT COUNT(*) FROM scts;`
|
||||
const selectNumChains = `SELECT COUNT(*) FROM chains;`
|
||||
const selectNumFeedback = `SELECT COUNT(*) FROM sct_feedback;`
|
||||
const selectNumSTHs = `SELECT COUNT(*) FROM sths;`
|
||||
|
||||
const selectFeedback = `SELECT COUNT(*) FROM sct_feedback WHERE chain_id = $1 AND sct_id = $2;`
|
||||
const selectSTH = `SELECT COUNT(*) FROM sths WHERE version = $1 AND tree_size = $2 AND timestamp = $3 AND root_hash = $4 AND signature = $5 AND log_id = $6;`
|
||||
|
||||
// Storage provides an SQLite3-backed method for persisting gossip data
|
||||
type Storage struct {
|
||||
db *sql.DB
|
||||
dbPath string
|
||||
insertChain *sql.Stmt
|
||||
insertSCT *sql.Stmt
|
||||
insertSCTFeedback *sql.Stmt
|
||||
insertSTHPollination *sql.Stmt
|
||||
selectChainID *sql.Stmt
|
||||
selectRandomRecentPollination *sql.Stmt
|
||||
selectSCTID *sql.Stmt
|
||||
|
||||
selectNumChains *sql.Stmt
|
||||
selectNumFeedback *sql.Stmt
|
||||
selectNumSCTs *sql.Stmt
|
||||
selectNumSTHs *sql.Stmt
|
||||
|
||||
selectFeedback *sql.Stmt
|
||||
selectSTH *sql.Stmt
|
||||
}
|
||||
|
||||
type statementSQLPair struct {
|
||||
Statement **sql.Stmt
|
||||
SQL string
|
||||
}
|
||||
|
||||
func prepareStatement(db *sql.DB, s statementSQLPair) error {
|
||||
stmt, err := db.Prepare(s.SQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*(s.Statement) = stmt
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the underlying persistent data store.
|
||||
// Should be called before attempting to use any of the store or search methods.
|
||||
func (s *Storage) Open(dbPath string) error {
|
||||
var err error
|
||||
if s.db != nil {
|
||||
return errors.New("attempting to call Open() on an already Open()'d Storage")
|
||||
}
|
||||
if len(dbPath) == 0 {
|
||||
return errors.New("attempting to call Open() with an empty file name")
|
||||
}
|
||||
s.dbPath = dbPath
|
||||
s.db, err = sql.Open("sqlite3", s.dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.db.Exec(schema); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range []statementSQLPair{
|
||||
{&s.insertChain, insertChain},
|
||||
{&s.insertSCT, insertSCT},
|
||||
{&s.insertSCTFeedback, insertSCTFeedback},
|
||||
{&s.insertSTHPollination, insertSTHPollination},
|
||||
{&s.selectChainID, selectChainID},
|
||||
{&s.selectRandomRecentPollination, selectRandomRecentPollination},
|
||||
{&s.selectSCTID, selectSCTID},
|
||||
{&s.selectNumChains, selectNumChains},
|
||||
{&s.selectNumFeedback, selectNumFeedback},
|
||||
{&s.selectNumSCTs, selectNumSCTs},
|
||||
{&s.selectNumSTHs, selectNumSTHs},
|
||||
{&s.selectFeedback, selectFeedback},
|
||||
{&s.selectSTH, selectSTH}} {
|
||||
if err := prepareStatement(s.db, p); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying DB storage.
|
||||
func (s *Storage) Close() error {
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
func selectThingID(getID *sql.Stmt, thing interface{}) (int64, error) {
|
||||
rows, err := getID.Query(thing)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if !rows.Next() {
|
||||
return -1, fmt.Errorf("couldn't look up ID for %v", thing)
|
||||
}
|
||||
var id int64
|
||||
if err = rows.Scan(&id); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// insertThingOrSelectID will attempt to execute the insert Statement (under transaction tx), if that fails due to
|
||||
// a unique primary key constraint, it will look up that primary key by executing the getID Statement.
|
||||
// Returns the ID associated with persistent thing, or an error describing the failure.
|
||||
func insertThingOrSelectID(tx *sql.Tx, insert *sql.Stmt, getID *sql.Stmt, thing interface{}) (int64, error) {
|
||||
txInsert := tx.Stmt(insert)
|
||||
txGetID := tx.Stmt(getID)
|
||||
r, err := txInsert.Exec(thing)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sqlite3.Error:
|
||||
if e.Code == sqlite3.ErrConstraint {
|
||||
return selectThingID(txGetID, thing)
|
||||
}
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
id, err := r.LastInsertId()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *Storage) addChainIfNotExists(tx *sql.Tx, chain []string) (int64, error) {
|
||||
flatChain := strings.Join(chain, "")
|
||||
return insertThingOrSelectID(tx, s.insertChain, s.selectChainID, flatChain)
|
||||
}
|
||||
|
||||
func (s *Storage) addSCTIfNotExists(tx *sql.Tx, sct string) (int64, error) {
|
||||
return insertThingOrSelectID(tx, s.insertSCT, s.selectSCTID, sct)
|
||||
}
|
||||
|
||||
func (s *Storage) addSCTFeedbackIfNotExists(tx *sql.Tx, chainID, sctID int64) error {
|
||||
stmt := tx.Stmt(s.insertSCTFeedback)
|
||||
_, err := stmt.Exec(chainID, sctID)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case sqlite3.Error:
|
||||
// If this is a dupe that's fine, no need to return an error
|
||||
if err.(sqlite3.Error).Code != sqlite3.ErrConstraint {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSCTFeedback stores the passed in feedback object.
|
||||
func (s *Storage) AddSCTFeedback(feedback SCTFeedback) (err error) {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we return a non-nil error, then rollback the transaction.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
for _, f := range feedback.Feedback {
|
||||
chainID, err := s.addChainIfNotExists(tx, f.X509Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sct := range f.SCTData {
|
||||
sctID, err := s.addSCTIfNotExists(tx, sct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s.addSCTFeedbackIfNotExists(tx, chainID, sctID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) addSTHIfNotExists(tx *sql.Tx, sth ct.SignedTreeHead) error {
|
||||
stmt := tx.Stmt(s.insertSTHPollination)
|
||||
sigB64, err := sth.TreeHeadSignature.Base64String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to base64 sth signature: %v", err)
|
||||
}
|
||||
_, err = stmt.Exec(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String())
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case sqlite3.Error:
|
||||
// If this is a dupe that's fine, no need to return an error
|
||||
if err.(sqlite3.Error).Code != sqlite3.ErrConstraint {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRandomSTHPollination returns a random selection of "fresh" (i.e. at most 14 days old) STHs from the pool.
|
||||
func (s *Storage) GetRandomSTHPollination(newerThan time.Time, limit int) (*STHPollination, error) {
|
||||
// Occasionally this fails to select the pollen which was added by the
|
||||
// AddSTHPollination request which went on trigger this query, even though
|
||||
// the transaction committed successfully. Attempting this query under a
|
||||
// transaction doesn't fix it. /sadface
|
||||
// Still, that shouldn't really matter too much in practice.
|
||||
r, err := s.selectRandomRecentPollination.Query(newerThan.Unix()*1000, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pollination STHPollination
|
||||
for r.Next() {
|
||||
var entry ct.SignedTreeHead
|
||||
var rootB64, sigB64, idB64 string
|
||||
if err := r.Scan(&entry.Version, &entry.TreeSize, &entry.Timestamp, &rootB64, &sigB64, &idB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.SHA256RootHash.FromBase64String(rootB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.TreeHeadSignature.FromBase64String(sigB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := entry.LogID.FromBase64String(idB64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pollination.STHs = append(pollination.STHs, entry)
|
||||
}
|
||||
// If there are no entries to return, wedge an empty array in there so that the json encoder returns something valid.
|
||||
if pollination.STHs == nil {
|
||||
pollination.STHs = make([]ct.SignedTreeHead, 0)
|
||||
}
|
||||
return &pollination, nil
|
||||
}
|
||||
|
||||
// AddSTHPollination stores the passed in pollination object.
|
||||
func (s *Storage) AddSTHPollination(pollination STHPollination) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we return a non-nil error, then rollback the transaction.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
err = tx.Commit()
|
||||
}()
|
||||
|
||||
for _, sth := range pollination.STHs {
|
||||
if err := s.addSTHIfNotExists(tx, sth); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) getSCTID(sct string) (int64, error) {
|
||||
return selectThingID(s.selectSCTID, sct)
|
||||
}
|
||||
|
||||
func (s *Storage) getChainID(chain []string) (int64, error) {
|
||||
flatChain := strings.Join(chain, "")
|
||||
return selectThingID(s.selectChainID, flatChain)
|
||||
}
|
||||
|
||||
func getNumThings(getCount *sql.Stmt) (int64, error) {
|
||||
r, err := getCount.Query()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if !r.Next() {
|
||||
return -1, fmt.Errorf("Empty scan returned while querying %v", getCount)
|
||||
}
|
||||
var count int64
|
||||
if err := r.Scan(&count); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s *Storage) getNumChains() (int64, error) {
|
||||
return getNumThings(s.selectNumChains)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumFeedback() (int64, error) {
|
||||
return getNumThings(s.selectNumFeedback)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumSCTs() (int64, error) {
|
||||
return getNumThings(s.selectNumSCTs)
|
||||
}
|
||||
|
||||
func (s *Storage) getNumSTHs() (int64, error) {
|
||||
return getNumThings(s.selectNumSTHs)
|
||||
}
|
||||
|
||||
func (s *Storage) hasFeedback(sctID, chainID int64) bool {
|
||||
r, err := s.selectFeedback.Query(sctID, chainID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return r.Next()
|
||||
}
|
||||
|
||||
func (s *Storage) hasSTH(sth ct.SignedTreeHead) bool {
|
||||
sigB64, err := sth.TreeHeadSignature.Base64String()
|
||||
if err != nil {
|
||||
log.Printf("%v", err)
|
||||
return false
|
||||
}
|
||||
r, err := s.selectSTH.Query(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return r.Next()
|
||||
}
|
||||
30
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package gossip
|
||||
|
||||
import (
|
||||
ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
// STHVersion reflects the STH Version field in RFC6862[-bis]
|
||||
type STHVersion int
|
||||
|
||||
// STHVersion constants
|
||||
const (
|
||||
STHVersion0 = 0
|
||||
STHVersion1 = 1
|
||||
)
|
||||
|
||||
// SCTFeedbackEntry represents a single piece of SCT feedback.
|
||||
type SCTFeedbackEntry struct {
|
||||
X509Chain []string `json:"x509_chain"`
|
||||
SCTData []string `json:"sct_data"`
|
||||
}
|
||||
|
||||
// SCTFeedback represents a collection of SCTFeedback which a client might send together.
|
||||
type SCTFeedback struct {
|
||||
Feedback []SCTFeedbackEntry `json:"sct_feedback"`
|
||||
}
|
||||
|
||||
// STHPollination represents a collection of STH pollination entries which a client might send together.
|
||||
type STHPollination struct {
|
||||
STHs []ct.SignedTreeHead `json:"sths"`
|
||||
}
|
||||
131
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree.go
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree.go
generated
vendored
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
package merkletree
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcrypto
|
||||
#cgo CPPFLAGS: -I../../cpp
|
||||
#cgo CXXFLAGS: -std=c++11
|
||||
#include "merkle_tree_go.h"
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CPPMerkleTree provides an interface to the C++ CT MerkleTree library.
|
||||
// See the go/README file for details on how to build this.
|
||||
type CPPMerkleTree struct {
|
||||
FullMerkleTreeInterface
|
||||
|
||||
// The C++ MerkleTree handle
|
||||
peer C.TREE
|
||||
|
||||
// nodeSize contains the size in bytes of the nodes in the MerkleTree
|
||||
// referenced by |peer|.
|
||||
nodeSize C.size_t
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LeafCount() uint64 {
|
||||
return uint64(C.LeafCount(m.peer))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LevelCount() uint64 {
|
||||
return uint64(C.LevelCount(m.peer))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) AddLeaf(leaf []byte) uint64 {
|
||||
return uint64(C.AddLeaf(m.peer, C.BYTE_SLICE(&leaf)))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) AddLeafHash(hash []byte) uint64 {
|
||||
return uint64(C.AddLeafHash(m.peer, C.BYTE_SLICE(&hash)))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) LeafHash(leaf uint64) ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.LeafHash(m.peer, C.BYTE_SLICE(&hash), C.size_t(leaf))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get leafhash of leaf %d", leaf)
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) CurrentRoot() ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.CurrentRoot(m.peer, C.BYTE_SLICE(&hash))
|
||||
if !success {
|
||||
return nil, errors.New("failed to get current root")
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) RootAtSnapshot(snapshot uint64) ([]byte, error) {
|
||||
hash := make([]byte, m.nodeSize)
|
||||
success := C.RootAtSnapshot(m.peer, C.BYTE_SLICE(&hash), C.size_t(snapshot))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get root at snapshot %d", snapshot)
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func splitSlice(slice []byte, chunkSize int) ([][]byte, error) {
|
||||
if len(slice)%chunkSize != 0 {
|
||||
return nil, fmt.Errorf("slice len %d is not a multiple of chunkSize %d", len(slice), chunkSize)
|
||||
}
|
||||
numEntries := len(slice) / chunkSize
|
||||
ret := make([][]byte, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * chunkSize
|
||||
end := start + chunkSize
|
||||
ret[i] = slice[start:end]
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) PathToCurrentRoot(leaf uint64) ([][]byte, error) {
|
||||
var numEntries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.PathToCurrentRoot(m.peer, C.BYTE_SLICE(&entryBuffer), &numEntries, C.size_t(leaf))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to current root from leaf %d", leaf)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) PathToRootAtSnapshot(leaf, snapshot uint64) ([][]byte, error) {
|
||||
var num_entries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.PathToRootAtSnapshot(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(leaf), C.size_t(snapshot))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to root at snapshot %d from leaf %d", snapshot, leaf)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
func (m *CPPMerkleTree) SnapshotConsistency(snapshot1, snapshot2 uint64) ([][]byte, error) {
|
||||
var num_entries C.size_t
|
||||
entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize)
|
||||
success := C.SnapshotConsistency(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(snapshot1), C.size_t(snapshot2))
|
||||
if !success {
|
||||
return nil, fmt.Errorf("failed to get path to snapshot consistency from %d to %d", snapshot1, snapshot2)
|
||||
}
|
||||
return splitSlice(entryBuffer, int(m.nodeSize))
|
||||
}
|
||||
|
||||
// NewCPPMerkleTree returns a new wrapped C++ MerkleTree, using the
|
||||
// Sha256Hasher.
|
||||
// It is the caller's responsibility to call DeletePeer() when finished with
|
||||
// the tree to deallocate its resources.
|
||||
func NewCPPMerkleTree() *CPPMerkleTree {
|
||||
m := &CPPMerkleTree{
|
||||
peer: C.NewMerkleTree(C.NewSha256Hasher()),
|
||||
}
|
||||
m.nodeSize = C.size_t(C.NodeSize(m.peer))
|
||||
return m
|
||||
}
|
||||
|
||||
// DeletePeer deallocates the memory used by the C++ MerkleTree peer.
|
||||
func (m *CPPMerkleTree) DeletePeer() {
|
||||
C.DeleteMerkleTree(m.peer)
|
||||
m.peer = nil
|
||||
}
|
||||
154
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.cc
generated
vendored
Normal file
154
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.cc
generated
vendored
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
#include "merkletree/merkle_tree.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#include "_cgo_export.h"
|
||||
#include "merkle_tree_go.h"
|
||||
|
||||
extern "C" {
|
||||
// Some hollow functions to cast the void* types into what they really
|
||||
// are, they're only really here to provide a little bit of type
|
||||
// safety. Hopefully these should all be optimized away into oblivion
|
||||
// by the compiler.
|
||||
static inline MerkleTree* MT(TREE tree) {
|
||||
assert(tree);
|
||||
return static_cast<MerkleTree*>(tree);
|
||||
}
|
||||
static inline Sha256Hasher* H(HASHER hasher) {
|
||||
assert(hasher);
|
||||
return static_cast<Sha256Hasher*>(hasher);
|
||||
}
|
||||
static inline GoSlice* BS(BYTE_SLICE slice) {
|
||||
assert(slice);
|
||||
return static_cast<GoSlice*>(slice);
|
||||
}
|
||||
|
||||
HASHER NewSha256Hasher() {
|
||||
return new Sha256Hasher;
|
||||
}
|
||||
|
||||
TREE NewMerkleTree(HASHER hasher) {
|
||||
return new MerkleTree(H(hasher));
|
||||
}
|
||||
|
||||
void DeleteMerkleTree(TREE tree) {
|
||||
delete MT(tree);
|
||||
}
|
||||
|
||||
size_t NodeSize(TREE tree) {
|
||||
return MT(tree)->NodeSize();
|
||||
}
|
||||
|
||||
size_t LeafCount(TREE tree) {
|
||||
return MT(tree)->LeafCount();
|
||||
}
|
||||
|
||||
bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf) {
|
||||
GoSlice* slice(BS(out));
|
||||
const MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->cap < nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->LeafHash(leaf);
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t LevelCount(TREE tree) {
|
||||
const MerkleTree* t(MT(tree));
|
||||
return t->LevelCount();
|
||||
}
|
||||
|
||||
size_t AddLeaf(TREE tree, BYTE_SLICE leaf) {
|
||||
GoSlice* slice(BS(leaf));
|
||||
MerkleTree* t(MT(tree));
|
||||
return t->AddLeaf(std::string(static_cast<char*>(slice->data), slice->len));
|
||||
}
|
||||
|
||||
size_t AddLeafHash(TREE tree, BYTE_SLICE hash) {
|
||||
GoSlice* slice(BS(hash));
|
||||
MerkleTree* t(MT(tree));
|
||||
return t->AddLeafHash(
|
||||
std::string(static_cast<char*>(slice->data), slice->len));
|
||||
}
|
||||
|
||||
bool CurrentRoot(TREE tree, BYTE_SLICE out) {
|
||||
GoSlice* slice(BS(out));
|
||||
MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->len != nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->CurrentRoot();
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot) {
|
||||
GoSlice* slice(BS(out));
|
||||
MerkleTree* t(MT(tree));
|
||||
const size_t nodesize(t->NodeSize());
|
||||
if (slice->data == NULL || slice->len != nodesize) {
|
||||
return false;
|
||||
}
|
||||
const std::string& hash = t->RootAtSnapshot(snapshot);
|
||||
assert(nodesize == hash.size());
|
||||
memcpy(slice->data, hash.data(), nodesize);
|
||||
slice->len = nodesize;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Copies the fixed-length entries from |path| into the GoSlice
|
||||
// pointed to by |dst|, one after the other in the same order.
|
||||
// |num_copied| is set to the number of entries copied.
|
||||
bool CopyNodesToSlice(const std::vector<std::string>& path, GoSlice* dst,
|
||||
size_t nodesize, size_t* num_copied) {
|
||||
assert(dst);
|
||||
assert(num_copied);
|
||||
if (dst->cap < path.size() * nodesize) {
|
||||
*num_copied = 0;
|
||||
return false;
|
||||
}
|
||||
char* e = static_cast<char*>(dst->data);
|
||||
for (int i = 0; i < path.size(); ++i) {
|
||||
assert(nodesize == path[i].size());
|
||||
memcpy(e, path[i].data(), nodesize);
|
||||
e += nodesize;
|
||||
}
|
||||
dst->len = path.size() * nodesize;
|
||||
*num_copied = path.size();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path = t->PathToCurrentRoot(leaf);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf, size_t snapshot) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path =
|
||||
t->PathToRootAtSnapshot(leaf, snapshot);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t snapshot1, size_t snapshot2) {
|
||||
MerkleTree* t(MT(tree));
|
||||
const std::vector<std::string> path =
|
||||
t->SnapshotConsistency(snapshot1, snapshot2);
|
||||
return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
74
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.h
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.h
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifndef GO_MERKLETREE_MERKLE_TREE_H_
|
||||
#define GO_MERKLETREE_MERKLE_TREE_H_
|
||||
|
||||
// These types & functions provide a trampoline to call the C++ MerkleTree
|
||||
// implementation from within Go code.
|
||||
//
|
||||
// Generally we try to jump through hoops to not allocate memory from the C++
|
||||
// side, but rather have Go allocate it inside its GC memory such that we don't
|
||||
// have to worry about leaks. Apart from the obvious benefit of doing it this
|
||||
// way, it usually also means one less memcpy() too which is nice.
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// The _cgo_export.h file doesn't appear to exist when this header is pulled in
|
||||
// to the .go file, because of this we can't use types like GoSlice here and so
|
||||
// we end up with void* everywhere; we'll at least typedef them so that the
|
||||
// source is a _little_ more readable.
|
||||
// Grumble grumble.
|
||||
typedef void* HASHER;
|
||||
typedef void* TREE;
|
||||
typedef void* BYTE_SLICE;
|
||||
|
||||
// Allocators & deallocators:
|
||||
|
||||
// Creates a new Sha256Hasher
|
||||
HASHER NewSha256Hasher();
|
||||
|
||||
// Creates a new MerkleTree passing in |hasher|.
|
||||
// The MerkleTree takes ownership of |hasher|.
|
||||
TREE NewMerkleTree(HASHER hasher);
|
||||
|
||||
// Deletes the passed in |tree|.
|
||||
void DeleteMerkleTree(TREE tree);
|
||||
|
||||
// MerkleTree methods below.
|
||||
// See the comments in ../../merkletree/merkle_tree.h for details
|
||||
|
||||
size_t NodeSize(TREE tree);
|
||||
size_t LeafCount(TREE tree);
|
||||
bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf);
|
||||
size_t LevelCount(TREE tree);
|
||||
size_t AddLeaf(TREE tree, BYTE_SLICE leaf);
|
||||
size_t AddLeafHash(TREE tree, BYTE_SLICE hash);
|
||||
bool CurrentRoot(TREE tree, BYTE_SLICE out);
|
||||
bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t leaf, size_t snapshot);
|
||||
|
||||
// |out| must contain sufficent space to hold all of the path elements
|
||||
// sequentially.
|
||||
// |num_entries| is set to the number of actual elements stored in |out|.
|
||||
bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries,
|
||||
size_t snapshot1, size_t snapshot2);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // GO_MERKLETREE_MERKLE_TREE_H_
|
||||
38
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_interface.go
generated
vendored
Normal file
38
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_interface.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package merkletree
|
||||
|
||||
// MerkleTreeInterface represents the common interface for basic MerkleTree functions.
|
||||
type MerkleTreeInterface interface {
|
||||
// LeafCount returns the number of leaves in the tree
|
||||
LeafCount() uint64
|
||||
|
||||
// LevelCount returns the number of levels in the tree
|
||||
LevelCount() uint64
|
||||
|
||||
// AddLeaf adds the hash of |leaf| to the tree and returns the newly added
|
||||
// leaf index
|
||||
AddLeaf(leaf []byte) uint64
|
||||
|
||||
// LeafHash returns the hash of the leaf at index |leaf| or a non-nil error.
|
||||
LeafHash(leaf uint64) ([]byte, error)
|
||||
|
||||
// CurrentRoot returns the current root hash of the merkle tree.
|
||||
CurrentRoot() ([]byte, error)
|
||||
}
|
||||
|
||||
// FullMerkleTreeInterface extends MerkleTreeInterface to the full range of
|
||||
// operations that only a non-compact tree representation can implement.
|
||||
type FullMerkleTreeInterface interface {
|
||||
MerkleTreeInterface
|
||||
|
||||
// RootAtSnapshot returns the root hash at the tree size |snapshot|
|
||||
// which must be <= than the current tree size.
|
||||
RootAtSnapshot(snapshot uint64) ([]byte, error)
|
||||
|
||||
// PathToCurrentRoot returns the Merkle path (or inclusion proof) from the
|
||||
// leaf hash at index |leaf| to the current root.
|
||||
PathToCurrentRoot(leaf uint64) ([]byte, error)
|
||||
|
||||
// SnapshotConsistency returns a consistency proof between the two tree
|
||||
// sizes specified in |snapshot1| and |snapshot2|.
|
||||
SnapshotConsistency(snapshot1, snapshot2 uint64) ([]byte, error)
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree.cc
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree.cc
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include "merkletree/merkle_tree.cc"
|
||||
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree_math.cc
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree_math.cc
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include "merkletree/merkle_tree_math.cc"
|
||||
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_serial_hasher.cc
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_serial_hasher.cc
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include "merkletree/serial_hasher.cc"
|
||||
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_tree_hasher.cc
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_tree_hasher.cc
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include "merkletree/tree_hasher.cc"
|
||||
57
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/dumpscts/main/dumpscts.go
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/dumpscts/main/dumpscts.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload"
|
||||
)
|
||||
|
||||
var sctFile = flag.String("sct_file", "", "File to load SCTs & leaf data from")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var sctReader io.ReadCloser
|
||||
if *sctFile == "" {
|
||||
log.Fatal("Must specify --sct_file")
|
||||
}
|
||||
|
||||
sctFileReader, err := os.Open(*sctFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sctReader, err = zlib.NewReader(sctFileReader)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := sctReader.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatalf("Error closing file: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO(alcutter) should probably store this stuff in a protobuf really.
|
||||
decoder := gob.NewDecoder(sctReader)
|
||||
var addedCert preload.AddedCert
|
||||
numAdded := 0
|
||||
numFailed := 0
|
||||
for {
|
||||
err = decoder.Decode(&addedCert)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if addedCert.AddedOk {
|
||||
log.Println(addedCert.SignedCertificateTimestamp)
|
||||
numAdded++
|
||||
} else {
|
||||
log.Printf("Cert was not added: %s", addedCert.ErrorMessage)
|
||||
numFailed++
|
||||
}
|
||||
}
|
||||
log.Printf("Num certs added: %d, num failed: %d\n", numAdded, numFailed)
|
||||
}
|
||||
197
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go
generated
vendored
Normal file
197
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go
generated
vendored
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
// A regex which cannot match any input
|
||||
MatchesNothingRegex = "a^"
|
||||
)
|
||||
|
||||
var sourceLogUri = flag.String("source_log_uri", "http://ct.googleapis.com/aviator", "CT log base URI to fetch entries from")
|
||||
var targetLogUri = flag.String("target_log_uri", "http://example.com/ct", "CT log base URI to add entries to")
|
||||
var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries")
|
||||
var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers")
|
||||
var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches")
|
||||
var parallelSubmit = flag.Int("parallel_submit", 2, "Number of concurrent add-[pre]-chain requests")
|
||||
var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at")
|
||||
var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.")
|
||||
var sctInputFile = flag.String("sct_file", "", "File to save SCTs & leaf data to")
|
||||
var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts")
|
||||
|
||||
func createMatcher() (scanner.Matcher, error) {
|
||||
// Make a "match everything" regex matcher
|
||||
precertRegex := regexp.MustCompile(".*")
|
||||
var certRegex *regexp.Regexp
|
||||
if *precertsOnly {
|
||||
certRegex = regexp.MustCompile(MatchesNothingRegex)
|
||||
} else {
|
||||
certRegex = precertRegex
|
||||
}
|
||||
return scanner.MatchSubjectRegex{
|
||||
CertificateSubjectRegex: certRegex,
|
||||
PrecertificateSubjectRegex: precertRegex}, nil
|
||||
}
|
||||
|
||||
func recordSct(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, sct *ct.SignedCertificateTimestamp) {
|
||||
addedCert := preload.AddedCert{
|
||||
CertDER: certDer,
|
||||
SignedCertificateTimestamp: *sct,
|
||||
AddedOk: true,
|
||||
}
|
||||
addedCerts <- &addedCert
|
||||
}
|
||||
|
||||
func recordFailure(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, addError error) {
|
||||
addedCert := preload.AddedCert{
|
||||
CertDER: certDer,
|
||||
AddedOk: false,
|
||||
ErrorMessage: addError.Error(),
|
||||
}
|
||||
addedCerts <- &addedCert
|
||||
}
|
||||
|
||||
func sctWriterJob(addedCerts <-chan *preload.AddedCert, sctWriter io.Writer, wg *sync.WaitGroup) {
|
||||
encoder := gob.NewEncoder(sctWriter)
|
||||
|
||||
numAdded := 0
|
||||
numFailed := 0
|
||||
|
||||
for c := range addedCerts {
|
||||
if c.AddedOk {
|
||||
numAdded++
|
||||
} else {
|
||||
numFailed++
|
||||
}
|
||||
if encoder != nil {
|
||||
err := encoder.Encode(c)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to encode to %s: %v", *sctInputFile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("Added %d certs, %d failed, total: %d\n", numAdded, numFailed, numAdded+numFailed)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func certSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient, certs <-chan *ct.LogEntry,
|
||||
wg *sync.WaitGroup) {
|
||||
for c := range certs {
|
||||
chain := make([]ct.ASN1Cert, len(c.Chain)+1)
|
||||
chain[0] = c.X509Cert.Raw
|
||||
copy(chain[1:], c.Chain)
|
||||
sct, err := log_client.AddChain(chain)
|
||||
if err != nil {
|
||||
log.Printf("failed to add chain with CN %s: %v\n", c.X509Cert.Subject.CommonName, err)
|
||||
recordFailure(addedCerts, chain[0], err)
|
||||
continue
|
||||
}
|
||||
recordSct(addedCerts, chain[0], sct)
|
||||
if !*quiet {
|
||||
log.Printf("Added chain for CN '%s', SCT: %s\n", c.X509Cert.Subject.CommonName, sct)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func precertSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient,
|
||||
precerts <-chan *ct.LogEntry,
|
||||
wg *sync.WaitGroup) {
|
||||
for c := range precerts {
|
||||
sct, err := log_client.AddPreChain(c.Chain)
|
||||
if err != nil {
|
||||
log.Printf("failed to add pre-chain with CN %s: %v", c.Precert.TBSCertificate.Subject.CommonName, err)
|
||||
recordFailure(addedCerts, c.Chain[0], err)
|
||||
continue
|
||||
}
|
||||
recordSct(addedCerts, c.Chain[0], sct)
|
||||
if !*quiet {
|
||||
log.Printf("Added precert chain for CN '%s', SCT: %s\n", c.Precert.TBSCertificate.Subject.CommonName, sct)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var sctFileWriter io.Writer
|
||||
var err error
|
||||
if *sctInputFile != "" {
|
||||
sctFileWriter, err = os.Create(*sctInputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
sctFileWriter = ioutil.Discard
|
||||
}
|
||||
|
||||
sctWriter := zlib.NewWriter(sctFileWriter)
|
||||
defer func() {
|
||||
err := sctWriter.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
fetchLogClient := client.New(*sourceLogUri)
|
||||
matcher, err := createMatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := scanner.ScannerOptions{
|
||||
Matcher: matcher,
|
||||
BatchSize: *batchSize,
|
||||
NumWorkers: *numWorkers,
|
||||
ParallelFetch: *parallelFetch,
|
||||
StartIndex: *startIndex,
|
||||
Quiet: *quiet,
|
||||
}
|
||||
scanner := scanner.NewScanner(fetchLogClient, opts)
|
||||
|
||||
certs := make(chan *ct.LogEntry, *batchSize**parallelFetch)
|
||||
precerts := make(chan *ct.LogEntry, *batchSize**parallelFetch)
|
||||
addedCerts := make(chan *preload.AddedCert, *batchSize**parallelFetch)
|
||||
|
||||
var sctWriterWG sync.WaitGroup
|
||||
sctWriterWG.Add(1)
|
||||
go sctWriterJob(addedCerts, sctWriter, &sctWriterWG)
|
||||
|
||||
submitLogClient := client.New(*targetLogUri)
|
||||
|
||||
var submitterWG sync.WaitGroup
|
||||
for w := 0; w < *parallelSubmit; w++ {
|
||||
submitterWG.Add(2)
|
||||
go certSubmitterJob(addedCerts, submitLogClient, certs, &submitterWG)
|
||||
go precertSubmitterJob(addedCerts, submitLogClient, precerts, &submitterWG)
|
||||
}
|
||||
|
||||
addChainFunc := func(entry *ct.LogEntry) {
|
||||
certs <- entry
|
||||
}
|
||||
addPreChainFunc := func(entry *ct.LogEntry) {
|
||||
precerts <- entry
|
||||
}
|
||||
|
||||
scanner.Scan(addChainFunc, addPreChainFunc)
|
||||
|
||||
close(certs)
|
||||
close(precerts)
|
||||
submitterWG.Wait()
|
||||
close(addedCerts)
|
||||
sctWriterWG.Wait()
|
||||
}
|
||||
12
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
package preload
|
||||
|
||||
import (
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
)
|
||||
|
||||
type AddedCert struct {
|
||||
CertDER ct.ASN1Cert
|
||||
SignedCertificateTimestamp ct.SignedCertificateTimestamp
|
||||
AddedOk bool
|
||||
ErrorMessage string
|
||||
}
|
||||
86
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/main/scanner.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/main/scanner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"regexp"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
// A regex which cannot match any input
|
||||
MatchesNothingRegex = "a^"
|
||||
)
|
||||
|
||||
var logUri = flag.String("log_uri", "http://ct.googleapis.com/aviator", "CT log base URI")
|
||||
var matchSubjectRegex = flag.String("match_subject_regex", ".*", "Regex to match CN/SAN")
|
||||
var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts")
|
||||
var serialNumber = flag.String("serial_number", "", "Serial number of certificate of interest")
|
||||
var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries")
|
||||
var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers")
|
||||
var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches")
|
||||
var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at")
|
||||
var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.")
|
||||
|
||||
// Prints out a short bit of info about |cert|, found at |index| in the
|
||||
// specified log
|
||||
func logCertInfo(entry *ct.LogEntry) {
|
||||
log.Printf("Interesting cert at index %d: CN: '%s'", entry.Index, entry.X509Cert.Subject.CommonName)
|
||||
}
|
||||
|
||||
// Prints out a short bit of info about |precert|, found at |index| in the
|
||||
// specified log
|
||||
func logPrecertInfo(entry *ct.LogEntry) {
|
||||
log.Printf("Interesting precert at index %d: CN: '%s' Issuer: %s", entry.Index,
|
||||
entry.Precert.TBSCertificate.Subject.CommonName, entry.Precert.TBSCertificate.Issuer.CommonName)
|
||||
}
|
||||
|
||||
func createMatcherFromFlags() (scanner.Matcher, error) {
|
||||
if *serialNumber != "" {
|
||||
log.Printf("Using SerialNumber matcher on %s", *serialNumber)
|
||||
var sn big.Int
|
||||
_, success := sn.SetString(*serialNumber, 0)
|
||||
if !success {
|
||||
return nil, fmt.Errorf("Invalid serialNumber %s", *serialNumber)
|
||||
}
|
||||
return scanner.MatchSerialNumber{SerialNumber: sn}, nil
|
||||
} else {
|
||||
// Make a regex matcher
|
||||
var certRegex *regexp.Regexp
|
||||
precertRegex := regexp.MustCompile(*matchSubjectRegex)
|
||||
switch *precertsOnly {
|
||||
case true:
|
||||
certRegex = regexp.MustCompile(MatchesNothingRegex)
|
||||
case false:
|
||||
certRegex = precertRegex
|
||||
}
|
||||
return scanner.MatchSubjectRegex{
|
||||
CertificateSubjectRegex: certRegex,
|
||||
PrecertificateSubjectRegex: precertRegex}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
logClient := client.New(*logUri)
|
||||
matcher, err := createMatcherFromFlags()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := scanner.ScannerOptions{
|
||||
Matcher: matcher,
|
||||
BatchSize: *batchSize,
|
||||
NumWorkers: *numWorkers,
|
||||
ParallelFetch: *parallelFetch,
|
||||
StartIndex: *startIndex,
|
||||
Quiet: *quiet,
|
||||
}
|
||||
scanner := scanner.NewScanner(logClient, opts)
|
||||
scanner.Scan(logCertInfo, logPrecertInfo)
|
||||
}
|
||||
399
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go
generated
vendored
Normal file
399
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,399 @@
|
|||
package scanner
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509"
|
||||
)
|
||||
|
||||
// Clients wishing to implement their own Matchers should implement this interface:
|
||||
type Matcher interface {
|
||||
// CertificateMatches is called by the scanner for each X509 Certificate found in the log.
|
||||
// The implementation should return |true| if the passed Certificate is interesting, and |false| otherwise.
|
||||
CertificateMatches(*x509.Certificate) bool
|
||||
|
||||
// PrecertificateMatches is called by the scanner for each CT Precertificate found in the log.
|
||||
// The implementation should return |true| if the passed Precertificate is interesting, and |false| otherwise.
|
||||
PrecertificateMatches(*ct.Precertificate) bool
|
||||
}
|
||||
|
||||
// MatchAll is a Matcher which will match every possible Certificate and Precertificate.
|
||||
type MatchAll struct{}
|
||||
|
||||
func (m MatchAll) CertificateMatches(_ *x509.Certificate) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m MatchAll) PrecertificateMatches(_ *ct.Precertificate) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MatchNone is a Matcher which will never match any Certificate or Precertificate.
|
||||
type MatchNone struct{}
|
||||
|
||||
func (m MatchNone) CertificateMatches(_ *x509.Certificate) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m MatchNone) PrecertificateMatches(_ *ct.Precertificate) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type MatchSerialNumber struct {
|
||||
SerialNumber big.Int
|
||||
}
|
||||
|
||||
func (m MatchSerialNumber) CertificateMatches(c *x509.Certificate) bool {
|
||||
return c.SerialNumber.String() == m.SerialNumber.String()
|
||||
}
|
||||
|
||||
func (m MatchSerialNumber) PrecertificateMatches(p *ct.Precertificate) bool {
|
||||
return p.TBSCertificate.SerialNumber.String() == m.SerialNumber.String()
|
||||
}
|
||||
|
||||
// MatchSubjectRegex is a Matcher which will use |CertificateSubjectRegex| and |PrecertificateSubjectRegex|
|
||||
// to determine whether Certificates and Precertificates are interesting.
|
||||
// The two regexes are tested against Subject Common Name as well as all
|
||||
// Subject Alternative Names
|
||||
type MatchSubjectRegex struct {
|
||||
CertificateSubjectRegex *regexp.Regexp
|
||||
PrecertificateSubjectRegex *regexp.Regexp
|
||||
}
|
||||
|
||||
// Returns true if either CN or any SAN of |c| matches |CertificateSubjectRegex|.
|
||||
func (m MatchSubjectRegex) CertificateMatches(c *x509.Certificate) bool {
|
||||
if m.CertificateSubjectRegex.FindStringIndex(c.Subject.CommonName) != nil {
|
||||
return true
|
||||
}
|
||||
for _, alt := range c.DNSNames {
|
||||
if m.CertificateSubjectRegex.FindStringIndex(alt) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if either CN or any SAN of |p| matches |PrecertificatesubjectRegex|.
|
||||
func (m MatchSubjectRegex) PrecertificateMatches(p *ct.Precertificate) bool {
|
||||
if m.PrecertificateSubjectRegex.FindStringIndex(p.TBSCertificate.Subject.CommonName) != nil {
|
||||
return true
|
||||
}
|
||||
for _, alt := range p.TBSCertificate.DNSNames {
|
||||
if m.PrecertificateSubjectRegex.FindStringIndex(alt) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ScannerOptions holds configuration options for the Scanner
|
||||
type ScannerOptions struct {
|
||||
// Custom matcher for x509 Certificates, functor will be called for each
|
||||
// Certificate found during scanning.
|
||||
Matcher Matcher
|
||||
|
||||
// Match precerts only (Matcher still applies to precerts)
|
||||
PrecertOnly bool
|
||||
|
||||
// Number of entries to request in one batch from the Log
|
||||
BatchSize int
|
||||
|
||||
// Number of concurrent matchers to run
|
||||
NumWorkers int
|
||||
|
||||
// Number of concurrent fethers to run
|
||||
ParallelFetch int
|
||||
|
||||
// Log entry index to start fetching & matching at
|
||||
StartIndex int64
|
||||
|
||||
// Don't print any status messages to stdout
|
||||
Quiet bool
|
||||
}
|
||||
|
||||
// Creates a new ScannerOptions struct with sensible defaults
|
||||
func DefaultScannerOptions() *ScannerOptions {
|
||||
return &ScannerOptions{
|
||||
Matcher: &MatchAll{},
|
||||
PrecertOnly: false,
|
||||
BatchSize: 1000,
|
||||
NumWorkers: 1,
|
||||
ParallelFetch: 1,
|
||||
StartIndex: 0,
|
||||
Quiet: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Scanner is a tool to scan all the entries in a CT Log.
|
||||
type Scanner struct {
|
||||
// Client used to talk to the CT log instance
|
||||
logClient *client.LogClient
|
||||
|
||||
// Configuration options for this Scanner instance
|
||||
opts ScannerOptions
|
||||
|
||||
// Counter of the number of certificates scanned
|
||||
certsProcessed int64
|
||||
|
||||
// Counter of the number of precertificates encountered during the scan.
|
||||
precertsSeen int64
|
||||
|
||||
unparsableEntries int64
|
||||
entriesWithNonFatalErrors int64
|
||||
}
|
||||
|
||||
// matcherJob represents the context for an individual matcher job.
|
||||
type matcherJob struct {
|
||||
// The log entry returned by the log server
|
||||
entry ct.LogEntry
|
||||
// The index of the entry containing the LeafInput in the log
|
||||
index int64
|
||||
}
|
||||
|
||||
// fetchRange represents a range of certs to fetch from a CT log
|
||||
type fetchRange struct {
|
||||
start int64
|
||||
end int64
|
||||
}
|
||||
|
||||
// Takes the error returned by either x509.ParseCertificate() or
|
||||
// x509.ParseTBSCertificate() and determines if it's non-fatal or otherwise.
|
||||
// In the case of non-fatal errors, the error will be logged,
|
||||
// entriesWithNonFatalErrors will be incremented, and the return value will be
|
||||
// nil.
|
||||
// Fatal errors will be logged, unparsableEntires will be incremented, and the
|
||||
// fatal error itself will be returned.
|
||||
// When |err| is nil, this method does nothing.
|
||||
func (s *Scanner) handleParseEntryError(err error, entryType ct.LogEntryType, index int64) error {
|
||||
if err == nil {
|
||||
// No error to handle
|
||||
return nil
|
||||
}
|
||||
switch err.(type) {
|
||||
case x509.NonFatalErrors:
|
||||
s.entriesWithNonFatalErrors++
|
||||
// We'll make a note, but continue.
|
||||
s.Log(fmt.Sprintf("Non-fatal error in %+v at index %d: %s", entryType, index, err.Error()))
|
||||
default:
|
||||
s.unparsableEntries++
|
||||
s.Log(fmt.Sprintf("Failed to parse in %+v at index %d : %s", entryType, index, err.Error()))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Processes the given |entry| in the specified log.
|
||||
func (s *Scanner) processEntry(entry ct.LogEntry, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) {
|
||||
atomic.AddInt64(&s.certsProcessed, 1)
|
||||
switch entry.Leaf.TimestampedEntry.EntryType {
|
||||
case ct.X509LogEntryType:
|
||||
if s.opts.PrecertOnly {
|
||||
// Only interested in precerts and this is an X.509 cert, early-out.
|
||||
return
|
||||
}
|
||||
cert, err := x509.ParseCertificate(entry.Leaf.TimestampedEntry.X509Entry)
|
||||
if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
|
||||
// We hit an unparseable entry, already logged inside handleParseEntryError()
|
||||
return
|
||||
}
|
||||
if s.opts.Matcher.CertificateMatches(cert) {
|
||||
entry.X509Cert = cert
|
||||
foundCert(&entry)
|
||||
}
|
||||
case ct.PrecertLogEntryType:
|
||||
c, err := x509.ParseTBSCertificate(entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate)
|
||||
if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil {
|
||||
// We hit an unparseable entry, already logged inside handleParseEntryError()
|
||||
return
|
||||
}
|
||||
precert := &ct.Precertificate{
|
||||
Raw: entry.Chain[0],
|
||||
TBSCertificate: *c,
|
||||
IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash}
|
||||
if s.opts.Matcher.PrecertificateMatches(precert) {
|
||||
entry.Precert = precert
|
||||
foundPrecert(&entry)
|
||||
}
|
||||
s.precertsSeen++
|
||||
}
|
||||
}
|
||||
|
||||
// Worker function to match certs.
|
||||
// Accepts MatcherJobs over the |entries| channel, and processes them.
|
||||
// Returns true over the |done| channel when the |entries| channel is closed.
|
||||
func (s *Scanner) matcherJob(id int, entries <-chan matcherJob, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry), wg *sync.WaitGroup) {
|
||||
for e := range entries {
|
||||
s.processEntry(e.entry, foundCert, foundPrecert)
|
||||
}
|
||||
s.Log(fmt.Sprintf("Matcher %d finished", id))
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// Worker function for fetcher jobs.
|
||||
// Accepts cert ranges to fetch over the |ranges| channel, and if the fetch is
|
||||
// successful sends the individual LeafInputs out (as MatcherJobs) into the
|
||||
// |entries| channel for the matchers to chew on.
|
||||
// Will retry failed attempts to retrieve ranges indefinitely.
|
||||
// Sends true over the |done| channel when the |ranges| channel is closed.
|
||||
func (s *Scanner) fetcherJob(id int, ranges <-chan fetchRange, entries chan<- matcherJob, wg *sync.WaitGroup) {
|
||||
for r := range ranges {
|
||||
success := false
|
||||
// TODO(alcutter): give up after a while:
|
||||
for !success {
|
||||
logEntries, err := s.logClient.GetEntries(r.start, r.end)
|
||||
if err != nil {
|
||||
s.Log(fmt.Sprintf("Problem fetching from log: %s", err.Error()))
|
||||
continue
|
||||
}
|
||||
for _, logEntry := range logEntries {
|
||||
logEntry.Index = r.start
|
||||
entries <- matcherJob{logEntry, r.start}
|
||||
r.start++
|
||||
}
|
||||
if r.start > r.end {
|
||||
// Only complete if we actually got all the leaves we were
|
||||
// expecting -- Logs MAY return fewer than the number of
|
||||
// leaves requested.
|
||||
success = true
|
||||
}
|
||||
}
|
||||
}
|
||||
s.Log(fmt.Sprintf("Fetcher %d finished", id))
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
// Returns the smaller of |a| and |b|
|
||||
func min(a int64, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the larger of |a| and |b|
|
||||
func max(a int64, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty prints the passed in number of |seconds| into a more human readable
|
||||
// string.
|
||||
func humanTime(seconds int) string {
|
||||
nanos := time.Duration(seconds) * time.Second
|
||||
hours := int(nanos / (time.Hour))
|
||||
nanos %= time.Hour
|
||||
minutes := int(nanos / time.Minute)
|
||||
nanos %= time.Minute
|
||||
seconds = int(nanos / time.Second)
|
||||
s := ""
|
||||
if hours > 0 {
|
||||
s += fmt.Sprintf("%d hours ", hours)
|
||||
}
|
||||
if minutes > 0 {
|
||||
s += fmt.Sprintf("%d minutes ", minutes)
|
||||
}
|
||||
if seconds > 0 {
|
||||
s += fmt.Sprintf("%d seconds ", seconds)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s Scanner) Log(msg string) {
|
||||
if !s.opts.Quiet {
|
||||
log.Print(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Performs a scan against the Log.
|
||||
// For each x509 certificate found, |foundCert| will be called with the
|
||||
// index of the entry and certificate itself as arguments. For each precert
|
||||
// found, |foundPrecert| will be called with the index of the entry and the raw
|
||||
// precert string as the arguments.
|
||||
//
|
||||
// This method blocks until the scan is complete.
|
||||
func (s *Scanner) Scan(foundCert func(*ct.LogEntry),
|
||||
foundPrecert func(*ct.LogEntry)) error {
|
||||
s.Log("Starting up...\n")
|
||||
s.certsProcessed = 0
|
||||
s.precertsSeen = 0
|
||||
s.unparsableEntries = 0
|
||||
s.entriesWithNonFatalErrors = 0
|
||||
|
||||
latestSth, err := s.logClient.GetSTH()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Log(fmt.Sprintf("Got STH with %d certs", latestSth.TreeSize))
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
startTime := time.Now()
|
||||
fetches := make(chan fetchRange, 1000)
|
||||
jobs := make(chan matcherJob, 100000)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
throughput := float64(s.certsProcessed) / time.Since(startTime).Seconds()
|
||||
remainingCerts := int64(latestSth.TreeSize) - int64(s.opts.StartIndex) - s.certsProcessed
|
||||
remainingSeconds := int(float64(remainingCerts) / throughput)
|
||||
remainingString := humanTime(remainingSeconds)
|
||||
s.Log(fmt.Sprintf("Processed: %d certs (to index %d). Throughput: %3.2f ETA: %s\n", s.certsProcessed,
|
||||
s.opts.StartIndex+int64(s.certsProcessed), throughput, remainingString))
|
||||
}
|
||||
}()
|
||||
|
||||
var ranges list.List
|
||||
for start := s.opts.StartIndex; start < int64(latestSth.TreeSize); {
|
||||
end := min(start+int64(s.opts.BatchSize), int64(latestSth.TreeSize)) - 1
|
||||
ranges.PushBack(fetchRange{start, end})
|
||||
start = end + 1
|
||||
}
|
||||
var fetcherWG sync.WaitGroup
|
||||
var matcherWG sync.WaitGroup
|
||||
// Start matcher workers
|
||||
for w := 0; w < s.opts.NumWorkers; w++ {
|
||||
matcherWG.Add(1)
|
||||
go s.matcherJob(w, jobs, foundCert, foundPrecert, &matcherWG)
|
||||
}
|
||||
// Start fetcher workers
|
||||
for w := 0; w < s.opts.ParallelFetch; w++ {
|
||||
fetcherWG.Add(1)
|
||||
go s.fetcherJob(w, fetches, jobs, &fetcherWG)
|
||||
}
|
||||
for r := ranges.Front(); r != nil; r = r.Next() {
|
||||
fetches <- r.Value.(fetchRange)
|
||||
}
|
||||
close(fetches)
|
||||
fetcherWG.Wait()
|
||||
close(jobs)
|
||||
matcherWG.Wait()
|
||||
|
||||
s.Log(fmt.Sprintf("Completed %d certs in %s", s.certsProcessed, humanTime(int(time.Since(startTime).Seconds()))))
|
||||
s.Log(fmt.Sprintf("Saw %d precerts", s.precertsSeen))
|
||||
s.Log(fmt.Sprintf("%d unparsable entries, %d non-fatal errors", s.unparsableEntries, s.entriesWithNonFatalErrors))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new Scanner instance using |client| to talk to the log, and taking
|
||||
// configuration options from |opts|.
|
||||
func NewScanner(client *client.LogClient, opts ScannerOptions) *Scanner {
|
||||
var scanner Scanner
|
||||
scanner.logClient = client
|
||||
// Set a default match-everything regex if none was provided:
|
||||
if opts.Matcher == nil {
|
||||
opts.Matcher = &MatchAll{}
|
||||
}
|
||||
scanner.opts = opts
|
||||
return &scanner
|
||||
}
|
||||
379
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner_test_data.go
generated
vendored
Normal file
379
Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner_test_data.go
generated
vendored
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
package scanner
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"log"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(alcutter): this signature is syntactically correct, but invalid.
|
||||
FourEntrySTH = "{" +
|
||||
"\"tree_size\":4,\"timestamp\":1396877652123,\"sha256_root_hash\":\"0JBu0CkZnKXc1niEndDaqqgCRHucCfVt1/WBAXs/5T8=\",\"tree_head_signature\":\"AAAACXNpZ25hdHVyZQ==\"}"
|
||||
FourEntries = "{\"entries\":[{\"leaf_input\":\"AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuu" +
|
||||
"gAwIBAgIKFIT5BQAAAAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29" +
|
||||
"vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTF" +
|
||||
"aFw0xMzA2MDcxOTQzMjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQ" +
|
||||
"HEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5" +
|
||||
"jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/" +
|
||||
"HUq+gjprmUNxLSvcK+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBR" +
|
||||
"YngbRkAPSGCufzJy+QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSU" +
|
||||
"EFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1U" +
|
||||
"dIwQYMBaAFL/AMOv1QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N" +
|
||||
"0YXRpYy5jb20vR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3J" +
|
||||
"sMGYGCCsGAQUFBwEBBFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGV" +
|
||||
"JbnRlcm5ldEF1dGhvcml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADA" +
|
||||
"aBgNVHREEEzARgg9tYWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kp" +
|
||||
"qJw5Q+Hf8gzGhxDG6aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6c" +
|
||||
"LwfldIXgTOYkw/uckGwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA==\"," +
|
||||
"\"extra_data\":\"AAXeAAK0MIICsDCCAhmgAwIBAgIDC2dxMA0GCSqGSIb3DQEBBQUAME4xCzAJBgN" +
|
||||
"VBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F" +
|
||||
"0ZSBBdXRob3JpdHkwHhcNMDkwNjA4MjA0MzI3WhcNMTMwNjA3MTk0MzI3WjBGMQswCQYDVQQGEwJVUzE" +
|
||||
"TMBEGA1UEChMKR29vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTCBnzA" +
|
||||
"NBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAye23pIucV+eEPkB9hPSP0XFjU5nneXQUr0SZMyCSjXvlKAy" +
|
||||
"6rWxJfoNfNFlOCnowzdDXxFdF7dWq1nMmzq0yE7jXDx07393cCDaob1FEm8rWIFJztyaHNWrbqeXUWaU" +
|
||||
"r/GcZOfqTGBhs3t0lig4zFEfC7wFQeeT9adGnwKziV28CAwEAAaOBozCBoDAOBgNVHQ8BAf8EBAMCAQY" +
|
||||
"wHQYDVR0OBBYEFL/AMOv1QxE+Z7qekfv8atrjaxIkMB8GA1UdIwQYMBaAFEjmaPkr0rKV10fYIyAQTzO" +
|
||||
"YkJ/UMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnV" +
|
||||
"zdC5jb20vY3Jscy9zZWN1cmVjYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAuIojxkiWsRF8YHdeBZqrocb" +
|
||||
"6ghwYB8TrgbCoZutJqOkM0ymt9e8kTP3kS8p/XmOrmSfLnzYhLLkQYGfN0rTw8Ktx5YtaiScRhKqOv5n" +
|
||||
"wnQkhClIZmloJ0pC3+gz4fniisIWvXEyZ2VxVKfmlUUIuOss4jHg7y/j7lYe8vJD5UDIAAyQwggMgMII" +
|
||||
"CiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF" +
|
||||
"4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY" +
|
||||
"0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1U" +
|
||||
"ECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4G" +
|
||||
"NADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKHv7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18" +
|
||||
"Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXv" +
|
||||
"F2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCAQUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAY" +
|
||||
"TAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSB" +
|
||||
"BdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDVR0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQ" +
|
||||
"EAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9g" +
|
||||
"jIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqhkiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvc" +
|
||||
"NAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePglcwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNN" +
|
||||
"VsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn00DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZUR" +
|
||||
"f+vzvKWipood573nvT6wHdzg=\"},{\"leaf_input\":\"AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgA" +
|
||||
"wIBAgIDAPY6MA0GCSqGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgS" +
|
||||
"W5jLjEYMBYGA1UEAxMPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N" +
|
||||
"1owgc4xKTAnBgNVBAUTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVU" +
|
||||
"zEXMBUGA1UECBMOU291dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsY" +
|
||||
"WNrYmF1ZCBJbmMuMRAwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZ" +
|
||||
"S5wdXJjaGFzZS10aWNrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCg" +
|
||||
"gEBAJtkbcF8x3TtIARHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0" +
|
||||
"uBYcJpkwhEg4Vg5skyfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5" +
|
||||
"WIZq3BUVQ9GVqGe9n1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/" +
|
||||
"pIn6M0gFAbu0egFgDybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtm" +
|
||||
"wwKS2Zo+tnulPnEe9jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9" +
|
||||
"Z/7Rc5KMA4GA1UdDwEB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RB" +
|
||||
"DYwNIIyd3d3LnN0cnVsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDV" +
|
||||
"R0fBDYwNDAyoDCgLoYsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwH" +
|
||||
"QYDVR0OBBYEFDIdT1lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1M" +
|
||||
"DMGCCsGAQUFBzAChidodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIh" +
|
||||
"vcNAQEFBQADggEBAFhFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGP" +
|
||||
"r8X5u8KzBOQ+fl6aRxvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9J" +
|
||||
"HQ95YwtlyC7H4VeDKtJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJK" +
|
||||
"ptG2OeM8AaynHsFcK/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+" +
|
||||
"sYhkvp5IcCypoJy/Rk4fHgD8VTNiNWj7KGuHRYAAA==\",\"extra_data\":\"AAqLAAPdMIID2TCCA" +
|
||||
"sGgAwIBAgIDAjbQMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzd" +
|
||||
"CBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMTAwMjE5MjIzOTI2WhcNMjAwMjE4M" +
|
||||
"jIzOTI2WjBAMQswCQYDVQQGEwJVUzEXMBUGA1UEChMOR2VvVHJ1c3QsIEluYy4xGDAWBgNVBAMTD0dlb" +
|
||||
"1RydXN0IFNTTCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJCzgMHk5UatcGA9uuUU3" +
|
||||
"Z6KXot1WubKbUGlI+g5hSZ6p1V3mkihkn46HhrxJ6ujTDnMyz1Hr4GuFmpcN+9FQf37mpc8oEOdxt8XI" +
|
||||
"dGKolbCA0mEEoE+yQpUYGa5jFTk+eb5lPHgX3UR8im55IaisYmtph6DKWOy8FQchQt65+EuDa+kvc3ns" +
|
||||
"VrXjAVaDktzKIt1XTTYdwvhdGLicTBi2LyKBeUxY0pUiWozeKdOVSQdl+8a5BLGDzAYtDRN4dgjOyFbL" +
|
||||
"TAZJQ5096QhS6CkIMlszZhWwPKoXz4mdaAN+DaIiixafWcwqQ/RmXAueOFRJq9VeiS+jDkNd53eAsMMv" +
|
||||
"R8CAwEAAaOB2TCB1jAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KM" +
|
||||
"B8GA1UdIwQYMBaAFMB6mGiNifurBWQMEX2qfWW4ysxOMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fB" +
|
||||
"DMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnVzdC5jb20vY3Jscy9ndGdsb2JhbC5jcmwwNAYIKwYBB" +
|
||||
"QUHAQEEKDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5nZW90cnVzdC5jb20wDQYJKoZIhvcNAQEFB" +
|
||||
"QADggEBANTvU4ToGr2hiwTAqfVfoRB4RV2yV2pOJMtlTjGXkZrUJPjiJ2ZwMZzBYlQG55cdOprApClIC" +
|
||||
"q8kx6jEmlTBfEx4TCtoLF0XplR4TEbigMMfOHES0tdT41SFULgCy+5jOvhWiU1Vuy7AyBh3hjELC3Dwf" +
|
||||
"jWDpCoTZFZnNF0WX3OsewYk2k9QbSqr0E1TQcKOu3EDSSmGGM8hQkx0YlEVxW+o78Qn5Rsz3VqI138S0" +
|
||||
"adhJR/V4NwdzxoQ2KDLX4z6DOW/cf/lXUQdpj6HR/oaToODEj+IZpWYeZqF6wJHzSXj8gYETpnKXKBue" +
|
||||
"rvdo5AaRTPvvz7SBMS24CqFZUE+ENQAA4EwggN9MIIC5qADAgECAgMSu+YwDQYJKoZIhvcNAQEFBQAwT" +
|
||||
"jELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlc" +
|
||||
"nRpZmljYXRlIEF1dGhvcml0eTAeFw0wMjA1MjEwNDAwMDBaFw0xODA4MjEwNDAwMDBaMEIxCzAJBgNVB" +
|
||||
"AYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0Ewg" +
|
||||
"gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDazBhjMP30FyMaVn5b3zxsOORxt3iR1Lyh2Ez4q" +
|
||||
"EO2A+lNIQcIiNpYL2Y5Kb0FeIudOOgFt2p+caTmxGCmsO+A5IkoD54l1u2D862mkceYyUIYNRSdrZhGk" +
|
||||
"i5PyvGHQ8EWlVctUO+JLYB6V63y7l9r0gCNuRT4FBU12cBGo3tyyJG/yVUrzdCXPpwmZMzfzoMZccpO5" +
|
||||
"tTVe6kZzVXeyOzSXjhT5VxPjC3+UCM2/Gbmy46kORkAt5UCOZELDv44LtEdBZr2TT5vDwcdrywej2A54" +
|
||||
"vo2UxM51F4mK9s9qBS9MusYAyhSBHHlqzM94Ti7BzaEYpx56hYw9F/AK+hxa+T5AgMBAAGjgfAwge0wH" +
|
||||
"wYDVR0jBBgwFoAUSOZo+SvSspXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFMB6mGiNifurBWQMEX2qfWW4y" +
|
||||
"sxOMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMDoGA1UdHwQzMDEwL6AtoCuGKWh0dHA6L" +
|
||||
"y9jcmwuZ2VvdHJ1c3QuY29tL2NybHMvc2VjdXJlY2EuY3JsME4GA1UdIARHMEUwQwYEVR0gADA7MDkGC" +
|
||||
"CsGAQUFBwIBFi1odHRwczovL3d3dy5nZW90cnVzdC5jb20vcmVzb3VyY2VzL3JlcG9zaXRvcnkwDQYJK" +
|
||||
"oZIhvcNAQEFBQADgYEAduESbk5LFhKGMAaygQjP8AjHx3F+Zu7C7dQ7H//w8MhO1kM4sLkwfRjQVYOia" +
|
||||
"ss2EZzoSGajbX+4E9RH/otaXHP8rtkbMhk4q5c0FKqW0uujHBQISba75ZHvgzbrHVZvytq8c2OQ5H97P" +
|
||||
"iLLPQftXzh0nOMDUE6hr5juYfKEPxIAAyQwggMgMIICiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAM" +
|
||||
"E4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZ" +
|
||||
"XJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDV" +
|
||||
"QQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhd" +
|
||||
"GUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKH" +
|
||||
"v7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH" +
|
||||
"4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXvF2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCA" +
|
||||
"QUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDV" +
|
||||
"QQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDV" +
|
||||
"R0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQEAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2" +
|
||||
"CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9gjIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqh" +
|
||||
"kiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvcNAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePgl" +
|
||||
"cwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNNVsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn0" +
|
||||
"0DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZURf+vzvKWipood573nvT6wHdzg=\"},{\"leaf_i" +
|
||||
"nput\":\"AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvrvQwDQY" +
|
||||
"JKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCB" +
|
||||
"MdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwMzE0MzN" +
|
||||
"aFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVBAcTB0N" +
|
||||
"odW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZWRpYSB" +
|
||||
"EZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8" +
|
||||
"AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/kND7au" +
|
||||
"fyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV5fn4s7" +
|
||||
"I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJENEWOY2" +
|
||||
"AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZRBMGBs" +
|
||||
"HTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDVR0gBIG" +
|
||||
"wMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwbGVhc2U" +
|
||||
"gdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGAQUFBwI" +
|
||||
"BFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0bWwwGwY" +
|
||||
"DVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwE" +
|
||||
"GCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlcnRydXN" +
|
||||
"0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQAw8sX" +
|
||||
"P2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWBVAHQsH" +
|
||||
"i4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WASQszuqT" +
|
||||
"NW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/ugcnk0a" +
|
||||
"7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jka24Cfc" +
|
||||
"m+vfUfHKB56sIzquxAAA=\",\"extra_data\":\"AArbAAQ4MIIENDCCAxygAwIBAgIEBydcJjANBgk" +
|
||||
"qhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJ" +
|
||||
"lclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTExMDgxODE4MzYzM1o" +
|
||||
"XDTE4MDgwOTE4MzU0OVowWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28" +
|
||||
"uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjCCASIwDQYJKoZIhvc" +
|
||||
"NAQEBBQADggEPADCCAQoCggEBALbcdvu5RPsSfFSwu0F1dPA1R54nukNERWAZzUQKsnjl+h4kOwIfaHd" +
|
||||
"g9OsiBQo3btv3FSC7PVPU0BGO1OtnvtjdBTeUQSUj75oQo8P3AL26JpJngVCpT56RPE4gulJ//0xNjqq" +
|
||||
"tTl+8J5cCKf2Vg0m/CrqxNRg1qXOIYlGsFBc0UOefxvOTXbnFAE83kHqBD9T1cinojGKscTvzLt8qXOm" +
|
||||
"+51Ykgiiavz39cUL9xXtrNwlHUD5ykao7xU+dEm49gANUSUEVPPKGRHQo9bmjG9t2x+oDiaBg6VH2oWQ" +
|
||||
"+dJvbKssYPMHnaBiJ7Ks4LlC5b24VMygdL9WAF4Yi8x0M4IcCAwEAAaOCAQAwgf0wEgYDVR0TAQH/BAg" +
|
||||
"wBgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1odHRwOi8vY3liZXJ" +
|
||||
"0cnVzdC5vbW5pcm9vdC5jb20vcmVwb3NpdG9yeS5jZm0wDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBa" +
|
||||
"AFOWdWTCCR1jMrPoIVDaGezq1BE3wMEIGA1UdHwQ7MDkwN6A1oDOGMWh0dHA6Ly9jZHAxLnB1YmxpYy1" +
|
||||
"0cnVzdC5jb20vQ1JML09tbmlyb290MjAyNS5jcmwwHQYDVR0OBBYEFBvkje86cWsSZWjPtpG8OUMBjXX" +
|
||||
"JMA0GCSqGSIb3DQEBBQUAA4IBAQBtK+3pj7Yp1rYwuuZttcNT0sm4Ck5In/E/Oiq0+3SW5r0YvKd5wHj" +
|
||||
"BObog406A0iTVpXt/YqPa1A8NqZ2qxem8CMlIZpiewPneq23lsDPCcNCW1x5vmAQVY0i7moVdG2nztE/" +
|
||||
"zpnAWDyEZf62wAzlJhoyic06T3CEBaLDvDXAaeqKyzCJCkVS9rHAEjUxc/Dqikvb5KhJAzXa3ZvTX0qv" +
|
||||
"ejizZ3Qk1NydWC662rpqDYPBff/Ctsxz6uHRfx+zADq3Yw8+f0jAOXFEfPhniwdKpkA/mV7mvBHai8gg" +
|
||||
"EJQo1u3MEMdCYRn82wWEWo4qMmd4QBfLe7aUJZJeEj0KoeyLEAAQ8MIIEODCCA6GgAwIBAgIEBydtuTA" +
|
||||
"NBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" +
|
||||
"DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" +
|
||||
"0IEdsb2JhbCBSb290MB4XDTEwMTEzMDE2MzUyMVoXDTE4MDgxMDE1MzQyNlowWjELMAkGA1UEBhMCSUU" +
|
||||
"xEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3J" +
|
||||
"lIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKrmD1X6CZ" +
|
||||
"ymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsBUnuId9Mcj8e6uYi" +
|
||||
"1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dn" +
|
||||
"KM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xX" +
|
||||
"tabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/yejl0qhqd" +
|
||||
"NkNwnGjkCAwEAAaOCAWowggFmMBIGA1UdEwEB/wQIMAYBAf8CAQMwTgYDVR0gBEcwRTBDBgRVHSAAMDs" +
|
||||
"wOQYIKwYBBQUHAgEWLWh0dHA6Ly9jeWJlcnRydXN0Lm9tbmlyb290LmNvbS9yZXBvc2l0b3J5LmNmbTA" +
|
||||
"OBgNVHQ8BAf8EBAMCAQYwgYkGA1UdIwSBgTB/oXmkdzB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1R" +
|
||||
"FIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgN" +
|
||||
"VBAMTGkdURSBDeWJlclRydXN0IEdsb2JhbCBSb290ggIBpTBFBgNVHR8EPjA8MDqgOKA2hjRodHRwOi8" +
|
||||
"vd3d3LnB1YmxpYy10cnVzdC5jb20vY2dpLWJpbi9DUkwvMjAxOC9jZHAuY3JsMB0GA1UdDgQWBBTlnVk" +
|
||||
"wgkdYzKz6CFQ2hns6tQRN8DANBgkqhkiG9w0BAQUFAAOBgQAWtCzJ8V7honubeCB6SnBwhhkAtwUq6Mk" +
|
||||
"lOQ/DZDx1CdmJFYAHwo28KaVkUM9xdUcjvU3Yf3eaURBuTh8gPEecQ3R/loQQTBNDvvjgci7/v648CgN" +
|
||||
"ggktv+ZrFHvavkDufYTs+3psFGsYsPFchCA9U+ihjbOgbnA/P3TBEE7lX/gACXjCCAlowggHDAgIBpTA" +
|
||||
"NBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" +
|
||||
"DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" +
|
||||
"0IEdsb2JhbCBSb290MB4XDTk4MDgxMzAwMjkwMFoXDTE4MDgxMzIzNTkwMFowdTELMAkGA1UEBhMCVVM" +
|
||||
"xGDAWBgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3QgU29sdXRpb25" +
|
||||
"zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwgUm9vdDCBnzANBgkqhkiG9w0BAQE" +
|
||||
"FAAOBjQAwgYkCgYEAlQ+gtvBQnOh6x4jN3RcOLrCU0Bs9DvaUwIqUxwbIkJfIuGQaen5sPFPhNyhzYH+" +
|
||||
"yl1MHn1P5bViU0q+NbYhngObtspXPcjHKpRxyulwC52RC5/mpLNY6DayNQqokATnmnD8BhVcNWIdF+NO" +
|
||||
"FqpNpJoVwSIA/EhXHebQfBS87YpkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBt6xsJ6V7ZUdtnImGkKjx" +
|
||||
"Id+OgfKbec6IUA4U9+6sOMMWDFjOBEwieezRO30DIdNe5fdz0dlV9m2NUGOnw6vNcsdmLQh65wJVOuvr" +
|
||||
"V4nz1aGG/juwFl19bsNejhTTEJKcND5WT78uU2J4fnVyFbceqrk8fIrXNla26p8z5qwt6fw==\"},{\"" +
|
||||
"leaf_input\":\"AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7QhM" +
|
||||
"A0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8wL" +
|
||||
"QYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE2M" +
|
||||
"zdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1UEB" +
|
||||
"RMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZvc" +
|
||||
"mRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAsTA" +
|
||||
"klUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9yZ" +
|
||||
"HBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq5Y" +
|
||||
"ajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq1o" +
|
||||
"hquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR6L" +
|
||||
"5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKxpK" +
|
||||
"AROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUsxP" +
|
||||
"LcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDAyB" +
|
||||
"ggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDAwL" +
|
||||
"oIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADAdB" +
|
||||
"gNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5nb" +
|
||||
"G9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQUFB" +
|
||||
"zAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA1B" +
|
||||
"ggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0OB" +
|
||||
"BYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0GC" +
|
||||
"SqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CKIq" +
|
||||
"0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOROI" +
|
||||
"8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7Jab" +
|
||||
"TcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XXPY" +
|
||||
"oTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA=\",\"extra_data\":\"AAgjAARfMIIEW" +
|
||||
"zCCA0OgAwIBAgILBAAAAAABL07hW2MwDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnb" +
|
||||
"iBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNM" +
|
||||
"TEwNDEzMTAwMDAwWhcNMjIwNDEzMTAwMDAwWjBZMQswCQYDVQQGEwJCRTEZMBcGA1UEChMQR2xvYmFsU" +
|
||||
"2lnbiBudi1zYTEvMC0GA1UEAxMmR2xvYmFsU2lnbiBFeHRlbmRlZCBWYWxpZGF0aW9uIENBIC0gRzIwg" +
|
||||
"gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNoUbMUpq4pbR/WNnN2EugcgyXW6aIIMO5PUbc0" +
|
||||
"FxSMPb6WU+FX7DbiLSpXysjSKyr9ZJ4FLYyD/tcaoVbAJDgu2X1WvlPZ37HbCnsk8ArysRe2LDb1r4/m" +
|
||||
"wvAj6ldrvcAAqT8umYROHf+IyAlVRDFvYK5TLFoxuJwe4NcE2fBofN8C6iZmtDimyUxyCuNQPZSY7Ggr" +
|
||||
"Vou9Xk2bTUsDt0F5NDiB0i3KF4r1VjVbNAMoQFGAVqPxq9kx1UBXeHRxmxQJaAFrQCrDI1la93rwnJUy" +
|
||||
"Q88ABeHIu/buYZ4FlGud9mmKE3zWI2DZ7k0JZscUYBR84OSaqOuR5rW5IsbwO2xAgMBAAGjggEvMIIBK" +
|
||||
"zAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUsLBK/Rx1KPgcYaoT9" +
|
||||
"vrBkD1rFqMwRwYDVR0gBEAwPjA8BgRVHSAAMDQwMgYIKwYBBQUHAgEWJmh0dHBzOi8vd3d3Lmdsb2Jhb" +
|
||||
"HNpZ24uY29tL3JlcG9zaXRvcnkvMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwuZ2xvYmFsc2lnb" +
|
||||
"i5uZXQvcm9vdC1yMi5jcmwwRAYIKwYBBQUHAQEEODA2MDQGCCsGAQUFBzABhihodHRwOi8vb2NzcC5nb" +
|
||||
"G9iYWxzaWduLmNvbS9FeHRlbmRlZFNTTENBMB8GA1UdIwQYMBaAFJviB1dnHB7AagbeWbSaLd/cGYYuM" +
|
||||
"A0GCSqGSIb3DQEBBQUAA4IBAQBfKJAMLekgsjB8iKtABfqxnVwik9WdyjUx+izqHZNZGcSgDfsJQDHaZ" +
|
||||
"FbNUr7nGGbobQmbstuUPu42RR4kVLYgBZO1MRq4ZFfm0ywBTDmWef63BJgS77cuWnf+R/N5mELdFr5ba" +
|
||||
"SvJJsgpaHfmrPZOkBMoZwTsciUf16cKUH84DnIYsSm4/66h1FS4Zk2g1c/T76kyKsWXYtKEzLCg2Jipy" +
|
||||
"jjkzEQ1b2EmsC6Ycvk4Mg20oWIKIWIV3rttkxA2UztKIXvC9b4u9gIT6a5McOkq9h/Di+Wf4I0qKOgZL" +
|
||||
"LNl3ffxb5c1ntuSNWOB1yfkK2Kq+mKhcZKMCha3PbVKZVsCAAO+MIIDujCCAqKgAwIBAgILBAAAAAABD" +
|
||||
"4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARB" +
|
||||
"gNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExM" +
|
||||
"jE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvY" +
|
||||
"mFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBA" +
|
||||
"KbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isU" +
|
||||
"oh7SqbKSaZeqKeMWhG8eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfX" +
|
||||
"klqtTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc" +
|
||||
"5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgABy" +
|
||||
"Ur6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/" +
|
||||
"zAdBgNVHQ4EFgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2Nyb" +
|
||||
"C5nbG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGL" +
|
||||
"jANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp9" +
|
||||
"Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiP" +
|
||||
"qFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMN" +
|
||||
"YxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7TBj0/VLZjmmx6" +
|
||||
"BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==\"}]}"
|
||||
|
||||
Entry0 = "AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuugAwIBAgIKFIT5BQAA" +
|
||||
"AAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29vZ2xlIEluYzEiMCAG" +
|
||||
"A1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTFaFw0xMzA2MDcxOTQz" +
|
||||
"MjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBW" +
|
||||
"aWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5jb20wgZ8wDQYJKoZI" +
|
||||
"hvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/HUq+gjprmUNxLSvcK" +
|
||||
"+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBRYngbRkAPSGCufzJy+" +
|
||||
"QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSUEFjAUBggrBgEFBQcD" +
|
||||
"AQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1UdIwQYMBaAFL/AMOv1" +
|
||||
"QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N0YXRpYy5jb20vR29v" +
|
||||
"Z2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3JsMGYGCCsGAQUFBwEB" +
|
||||
"BFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGVJbnRlcm5ldEF1dGhv" +
|
||||
"cml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9t" +
|
||||
"YWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kpqJw5Q+Hf8gzGhxDG6" +
|
||||
"aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6cLwfldIXgTOYkw/uck" +
|
||||
"Gwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA=="
|
||||
|
||||
Entry1 = "AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgAwIBAgIDAPY6MA0GCS" +
|
||||
"qGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgSW5jLjEYMBYGA1UEAx" +
|
||||
"MPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N1owgc4xKTAnBgNVBA" +
|
||||
"UTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOU2" +
|
||||
"91dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsYWNrYmF1ZCBJbmMuMR" +
|
||||
"AwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZS5wdXJjaGFzZS10aW" +
|
||||
"NrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJtkbcF8x3TtIA" +
|
||||
"RHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0uBYcJpkwhEg4Vg5sk" +
|
||||
"yfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5WIZq3BUVQ9GVqGe9n" +
|
||||
"1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/pIn6M0gFAbu0egFgD" +
|
||||
"ybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtmwwKS2Zo+tnulPnEe9" +
|
||||
"jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KMA4GA1UdDw" +
|
||||
"EB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RBDYwNIIyd3d3LnN0cn" +
|
||||
"VsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDVR0fBDYwNDAyoDCgLo" +
|
||||
"YsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwHQYDVR0OBBYEFDIdT1" +
|
||||
"lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1MDMGCCsGAQUFBzAChi" +
|
||||
"dodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIhvcNAQEFBQADggEBAF" +
|
||||
"hFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGPr8X5u8KzBOQ+fl6aR" +
|
||||
"xvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9JHQ95YwtlyC7H4VeDK" +
|
||||
"tJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJKptG2OeM8AaynHsFcK" +
|
||||
"/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+sYhkvp5IcCypoJy/R" +
|
||||
"k4fHgD8VTNiNWj7KGuHRYAAA=="
|
||||
|
||||
Entry2 = "AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvr" +
|
||||
"vQwDQYJKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ" +
|
||||
"28uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwM" +
|
||||
"zE0MzNaFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVB" +
|
||||
"AcTB0NodW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZ" +
|
||||
"WRpYSBEZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFA" +
|
||||
"AOCAQ8AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/" +
|
||||
"kND7aufyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV" +
|
||||
"5fn4s7I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJE" +
|
||||
"NEWOY2AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZ" +
|
||||
"RBMGBsHTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDV" +
|
||||
"R0gBIGwMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwb" +
|
||||
"GVhc2UgdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGA" +
|
||||
"QUFBwIBFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0b" +
|
||||
"WwwGwYDVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBB" +
|
||||
"QUHAwEGCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlc" +
|
||||
"nRydXN0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBA" +
|
||||
"QAw8sXP2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWB" +
|
||||
"VAHQsHi4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WAS" +
|
||||
"QszuqTNW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/u" +
|
||||
"gcnk0a7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jk" +
|
||||
"a24Cfcm+vfUfHKB56sIzquxAAA="
|
||||
|
||||
Entry3 = "AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7Q" +
|
||||
"hMA0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8" +
|
||||
"wLQYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE" +
|
||||
"2MzdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1U" +
|
||||
"EBRMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZ" +
|
||||
"vcmRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAs" +
|
||||
"TAklUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9" +
|
||||
"yZHBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq" +
|
||||
"5YajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq" +
|
||||
"1ohquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR" +
|
||||
"6L5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKx" +
|
||||
"pKAROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUs" +
|
||||
"xPLcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDA" +
|
||||
"yBggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDA" +
|
||||
"wLoIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADA" +
|
||||
"dBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5" +
|
||||
"nbG9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQU" +
|
||||
"FBzAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA" +
|
||||
"1BggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0" +
|
||||
"OBBYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0" +
|
||||
"GCSqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CK" +
|
||||
"Iq0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOR" +
|
||||
"OI8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7J" +
|
||||
"abTcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XX" +
|
||||
"PYoTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA="
|
||||
)
|
||||
|
||||
func makeParent(a []byte, b []byte) [sha256.Size]byte {
|
||||
if len(a) != len(b) {
|
||||
log.Fatalf("a & b are different lengths: %d vs %d", len(a), len(b))
|
||||
}
|
||||
if len(a) != sha256.Size {
|
||||
log.Fatalf("a & b incorrect length for Sha256 hash")
|
||||
}
|
||||
var r [sha256.Size * 2]byte
|
||||
copy(r[0:31], a)
|
||||
copy(r[32:63], b)
|
||||
return sha256.Sum256(r[:])
|
||||
}
|
||||
|
||||
func CalcRootHash() {
|
||||
e0, err := base64.StdEncoding.DecodeString(Entry0)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h0 := sha256.Sum256(e0)
|
||||
e1, err := base64.StdEncoding.DecodeString(Entry1)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h1 := sha256.Sum256(e1)
|
||||
e2, err := base64.StdEncoding.DecodeString(Entry2)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h2 := sha256.Sum256(e2)
|
||||
e3, err := base64.StdEncoding.DecodeString(Entry3)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h3 := sha256.Sum256(e3)
|
||||
|
||||
hash01 := makeParent(h0[:], h1[:])
|
||||
hash23 := makeParent(h2[:], h3[:])
|
||||
root := makeParent(hash01[:], hash23[:])
|
||||
log.Println(base64.StdEncoding.EncodeToString(root[:]))
|
||||
}
|
||||
463
Godeps/_workspace/src/github.com/google/certificate-transparency/go/serialization.go
generated
vendored
Normal file
463
Godeps/_workspace/src/github.com/google/certificate-transparency/go/serialization.go
generated
vendored
Normal file
|
|
@ -0,0 +1,463 @@
|
|||
package ct
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Variable size structure prefix-header byte lengths
|
||||
const (
|
||||
CertificateLengthBytes = 3
|
||||
PreCertificateLengthBytes = 3
|
||||
ExtensionsLengthBytes = 2
|
||||
CertificateChainLengthBytes = 3
|
||||
SignatureLengthBytes = 2
|
||||
)
|
||||
|
||||
// Max lengths
|
||||
const (
|
||||
MaxCertificateLength = (1 << 24) - 1
|
||||
MaxExtensionsLength = (1 << 16) - 1
|
||||
)
|
||||
|
||||
func writeUint(w io.Writer, value uint64, numBytes int) error {
|
||||
buf := make([]uint8, numBytes)
|
||||
for i := 0; i < numBytes; i++ {
|
||||
buf[numBytes-i-1] = uint8(value & 0xff)
|
||||
value >>= 8
|
||||
}
|
||||
if value != 0 {
|
||||
return errors.New("numBytes was insufficiently large to represent value")
|
||||
}
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error {
|
||||
if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(value); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readUint(r io.Reader, numBytes int) (uint64, error) {
|
||||
var l uint64
|
||||
for i := 0; i < numBytes; i++ {
|
||||
l <<= 8
|
||||
var t uint8
|
||||
if err := binary.Read(r, binary.BigEndian, &t); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l |= uint64(t)
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the
|
||||
// number of (BigEndian) prefix-bytes which contain the length of the actual
|
||||
// array data bytes that follow.
|
||||
// Allocates an array to hold the contents and returns a slice view into it if
|
||||
// the read was successful, or an error otherwise.
|
||||
func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) {
|
||||
switch {
|
||||
case numLenBytes > 8:
|
||||
return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes)
|
||||
case numLenBytes == 0:
|
||||
return nil, errors.New("numLenBytes should be > 0")
|
||||
}
|
||||
l, err := readUint(r, numLenBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make([]byte, l)
|
||||
n, err := r.Read(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int(l) {
|
||||
return nil, fmt.Errorf("short read: expected %d but got %d", l, n)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Reads a list of ASN1Cert types from |r|
|
||||
func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) {
|
||||
listBytes, err := readVarBytes(r, totalLenBytes)
|
||||
if err != nil {
|
||||
return []ASN1Cert{}, err
|
||||
}
|
||||
list := list.New()
|
||||
listReader := bytes.NewReader(listBytes)
|
||||
var entry []byte
|
||||
for err == nil {
|
||||
entry, err = readVarBytes(listReader, elementLenBytes)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return []ASN1Cert{}, err
|
||||
}
|
||||
} else {
|
||||
list.PushBack(entry)
|
||||
}
|
||||
}
|
||||
ret := make([]ASN1Cert, list.Len())
|
||||
i := 0
|
||||
for e := list.Front(); e != nil; e = e.Next() {
|
||||
ret[i] = e.Value.([]byte)
|
||||
i++
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ReadTimestampedEntryInto parses the byte-stream representation of a
|
||||
// TimestampedEntry from |r| and populates the struct |t| with the data. See
|
||||
// RFC section 3.4 for details on the format.
|
||||
// Returns a non-nil error if there was a problem.
|
||||
func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error {
|
||||
var err error
|
||||
if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil {
|
||||
return err
|
||||
}
|
||||
switch t.EntryType {
|
||||
case X509LogEntryType:
|
||||
if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
case PrecertLogEntryType:
|
||||
if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil {
|
||||
return err
|
||||
}
|
||||
if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown EntryType: %d", t.EntryType)
|
||||
}
|
||||
t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf
|
||||
// and returns a pointer to a new MerkleTreeLeaf structure containing the
|
||||
// parsed data.
|
||||
// See RFC section 3.4 for details on the format.
|
||||
// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a
|
||||
// problem
|
||||
func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) {
|
||||
var m MerkleTreeLeaf
|
||||
if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.Version != V1 {
|
||||
return nil, fmt.Errorf("unknown Version %d", m.Version)
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.LeafType != TimestampedEntryLeafType {
|
||||
return nil, fmt.Errorf("unknown LeafType %d", m.LeafType)
|
||||
}
|
||||
if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a
|
||||
// GetEntries response in the case where the entry refers to an X509 leaf.
|
||||
func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) {
|
||||
return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes)
|
||||
}
|
||||
|
||||
// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in
|
||||
// a GetEntries response in the case where the entry refers to a Precertificate
|
||||
// leaf.
|
||||
func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) {
|
||||
var chain []ASN1Cert
|
||||
|
||||
reader := bytes.NewReader(b)
|
||||
// read the pre-cert entry:
|
||||
precert, err := readVarBytes(reader, CertificateLengthBytes)
|
||||
if err != nil {
|
||||
return chain, err
|
||||
}
|
||||
chain = append(chain, precert)
|
||||
// and then read and return the chain up to the root:
|
||||
remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes)
|
||||
if err != nil {
|
||||
return chain, err
|
||||
}
|
||||
chain = append(chain, remainingChain...)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader
|
||||
func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) {
|
||||
var h byte
|
||||
if err := binary.Read(r, binary.BigEndian, &h); err != nil {
|
||||
return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err)
|
||||
}
|
||||
|
||||
var s byte
|
||||
if err := binary.Read(r, binary.BigEndian, &s); err != nil {
|
||||
return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err)
|
||||
}
|
||||
|
||||
sig, err := readVarBytes(r, SignatureLengthBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read Signature bytes: %v", err)
|
||||
}
|
||||
|
||||
return &DigitallySigned{
|
||||
HashAlgorithm: HashAlgorithm(h),
|
||||
SignatureAlgorithm: SignatureAlgorithm(s),
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array
|
||||
func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
if err := b.WriteByte(byte(ds.HashAlgorithm)); err != nil {
|
||||
return nil, fmt.Errorf("failed to write HashAlgorithm: %v", err)
|
||||
}
|
||||
if err := b.WriteByte(byte(ds.SignatureAlgorithm)); err != nil {
|
||||
return nil, fmt.Errorf("failed to write SignatureAlgorithm: %v", err)
|
||||
}
|
||||
if err := writeVarBytes(&b, ds.Signature, SignatureLengthBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to write HashAlgorithm: %v", err)
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func checkCertificateFormat(cert ASN1Cert) error {
|
||||
if len(cert) == 0 {
|
||||
return errors.New("certificate is zero length")
|
||||
}
|
||||
if len(cert) > MaxCertificateLength {
|
||||
return errors.New("certificate too large")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkExtensionsFormat(ext CTExtensions) error {
|
||||
if len(ext) > MaxExtensionsLength {
|
||||
return errors.New("extensions too large")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) {
|
||||
if err := checkCertificateFormat(cert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkExtensionsFormat(ext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) {
|
||||
if err := checkCertificateFormat(tbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkExtensionsFormat(ext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := buf.Write(issuerKeyHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
|
||||
if sct.SCTVersion != V1 {
|
||||
return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion)
|
||||
}
|
||||
if entry.Leaf.LeafType != TimestampedEntryLeafType {
|
||||
return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType)
|
||||
}
|
||||
switch entry.Leaf.TimestampedEntry.EntryType {
|
||||
case X509LogEntryType:
|
||||
return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions)
|
||||
case PrecertLogEntryType:
|
||||
return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
||||
entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
|
||||
entry.Leaf.TimestampedEntry.Extensions)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
|
||||
// the correct format for signing.
|
||||
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return serializeV1SCTSignatureInput(sct, entry)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func serializeV1SCT(sct SignedCertificateTimestamp) ([]byte, error) {
|
||||
if err := checkExtensionsFormat(sct.Extensions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sct.LogID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sct.Timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, sct.Extensions, ExtensionsLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := MarshalDigitallySigned(sct.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// SerializeSCT serializes the passed in sct into the format specified
|
||||
// by RFC6962 section 3.2
|
||||
func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) {
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return serializeV1SCT(sct)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error {
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
ext, err := readVarBytes(r, ExtensionsLengthBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sct.Extensions = ext
|
||||
ds, err := UnmarshalDigitallySigned(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sct.Signature = *ds
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) {
|
||||
var sct SignedCertificateTimestamp
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return &sct, deserializeSCTV1(r, &sct)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) {
|
||||
if sth.Version != V1 {
|
||||
return nil, fmt.Errorf("invalid STH version %d", sth.Version)
|
||||
}
|
||||
if sth.TreeSize < 0 {
|
||||
return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize)
|
||||
}
|
||||
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
|
||||
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// SerializeSTHSignatureInput serializes the passed in sth into the correct
|
||||
// format for signing.
|
||||
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
|
||||
switch sth.Version {
|
||||
case V1:
|
||||
return serializeV1STHSignatureInput(sth)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
|
||||
}
|
||||
}
|
||||
131
Godeps/_workspace/src/github.com/google/certificate-transparency/go/signatures.go
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/google/certificate-transparency/go/signatures.go
generated
vendored
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
package ct
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false,
|
||||
"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
|
||||
|
||||
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
|
||||
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
||||
p, rest := pem.Decode(b)
|
||||
if p == nil {
|
||||
return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
|
||||
}
|
||||
k, err := x509.ParsePKIXPublicKey(p.Bytes)
|
||||
return k, sha256.Sum256(p.Bytes), rest, err
|
||||
}
|
||||
|
||||
// SignatureVerifier can verify signatures on SCTs and STHs
|
||||
type SignatureVerifier struct {
|
||||
pubKey crypto.PublicKey
|
||||
}
|
||||
|
||||
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
|
||||
func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
||||
switch pkType := pk.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if pkType.N.BitLen() < 2048 {
|
||||
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
params := *(pkType.Params())
|
||||
if params != *elliptic.P256().Params() {
|
||||
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
|
||||
}
|
||||
|
||||
return &SignatureVerifier{
|
||||
pubKey: pk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifySignature verifies that the passed in signature over data was created by our PublicKey.
|
||||
// Currently, only SHA256 is supported as a HashAlgorithm, and only ECDSA and RSA signatures are supported.
|
||||
func (s SignatureVerifier) verifySignature(data []byte, sig DigitallySigned) error {
|
||||
if sig.HashAlgorithm != SHA256 {
|
||||
return fmt.Errorf("unsupported HashAlgorithm in signature: %v", sig.HashAlgorithm)
|
||||
}
|
||||
|
||||
hasherType := crypto.SHA256
|
||||
hasher := hasherType.New()
|
||||
if _, err := hasher.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write to hasher: %v", err)
|
||||
}
|
||||
hash := hasher.Sum([]byte{})
|
||||
|
||||
switch sig.SignatureAlgorithm {
|
||||
case RSA:
|
||||
rsaKey, ok := s.pubKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot verify RSA signature with %T key", s.pubKey)
|
||||
}
|
||||
if err := rsa.VerifyPKCS1v15(rsaKey, hasherType, hash, sig.Signature); err != nil {
|
||||
return fmt.Errorf("failed to verify rsa signature: %v", err)
|
||||
}
|
||||
case ECDSA:
|
||||
ecdsaKey, ok := s.pubKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot verify ECDSA signature with %T key", s.pubKey)
|
||||
}
|
||||
var ecdsaSig struct {
|
||||
R, S *big.Int
|
||||
}
|
||||
rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
log.Printf("Garbage following signature %v", rest)
|
||||
}
|
||||
|
||||
if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
|
||||
return errors.New("failed to verify ecdsa signature")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported signature type %v", sig.SignatureAlgorithm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry
|
||||
func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
|
||||
sctData, err := SerializeSCTSignatureInput(sct, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.verifySignature(sctData, sct.Signature)
|
||||
}
|
||||
|
||||
// VerifySTHSignature verifies that the STH's signature is valid.
|
||||
func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
|
||||
sthData, err := SerializeSTHSignatureInput(sth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.verifySignature(sthData, sth.TreeHeadSignature)
|
||||
}
|
||||
344
Godeps/_workspace/src/github.com/google/certificate-transparency/go/types.go
generated
vendored
Normal file
344
Godeps/_workspace/src/github.com/google/certificate-transparency/go/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
package ct
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509"
|
||||
)
|
||||
|
||||
const (
|
||||
issuerKeyHashLength = 32
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// The following structures represent those outlined in the RFC6962 document:
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// LogEntryType represents the LogEntryType enum from section 3.1 of the RFC:
|
||||
// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
|
||||
type LogEntryType uint16
|
||||
|
||||
func (e LogEntryType) String() string {
|
||||
switch e {
|
||||
case X509LogEntryType:
|
||||
return "X509LogEntryType"
|
||||
case PrecertLogEntryType:
|
||||
return "PrecertLogEntryType"
|
||||
}
|
||||
panic(fmt.Sprintf("No string defined for LogEntryType constant value %d", e))
|
||||
}
|
||||
|
||||
// LogEntryType constants, see section 3.1 of RFC6962.
|
||||
const (
|
||||
X509LogEntryType LogEntryType = 0
|
||||
PrecertLogEntryType LogEntryType = 1
|
||||
)
|
||||
|
||||
// MerkleLeafType represents the MerkleLeafType enum from section 3.4 of the
|
||||
// RFC: enum { timestamped_entry(0), (255) } MerkleLeafType;
|
||||
type MerkleLeafType uint8
|
||||
|
||||
func (m MerkleLeafType) String() string {
|
||||
switch m {
|
||||
case TimestampedEntryLeafType:
|
||||
return "TimestampedEntryLeafType"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownLeafType(%d)", m)
|
||||
}
|
||||
}
|
||||
|
||||
// MerkleLeafType constants, see section 3.4 of the RFC.
|
||||
const (
|
||||
TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
|
||||
)
|
||||
|
||||
// Version represents the Version enum from section 3.2 of the RFC:
|
||||
// enum { v1(0), (255) } Version;
|
||||
type Version uint8
|
||||
|
||||
func (v Version) String() string {
|
||||
switch v {
|
||||
case V1:
|
||||
return "V1"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownVersion(%d)", v)
|
||||
}
|
||||
}
|
||||
|
||||
// CT Version constants, see section 3.2 of the RFC.
|
||||
const (
|
||||
V1 Version = 0
|
||||
)
|
||||
|
||||
// SignatureType differentiates STH signatures from SCT signatures, see RFC
|
||||
// section 3.2
|
||||
type SignatureType uint8
|
||||
|
||||
func (st SignatureType) String() string {
|
||||
switch st {
|
||||
case CertificateTimestampSignatureType:
|
||||
return "CertificateTimestamp"
|
||||
case TreeHashSignatureType:
|
||||
return "TreeHash"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownSignatureType(%d)", st)
|
||||
}
|
||||
}
|
||||
|
||||
// SignatureType constants, see RFC section 3.2
|
||||
const (
|
||||
CertificateTimestampSignatureType SignatureType = 0
|
||||
TreeHashSignatureType SignatureType = 1
|
||||
)
|
||||
|
||||
// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
|
||||
// (section 3.1)
|
||||
type ASN1Cert []byte
|
||||
|
||||
// PreCert represents a Precertificate (section 3.2)
|
||||
type PreCert struct {
|
||||
IssuerKeyHash [issuerKeyHashLength]byte
|
||||
TBSCertificate []byte
|
||||
}
|
||||
|
||||
// CTExtensions is a representation of the raw bytes of any CtExtension
|
||||
// structure (see section 3.2)
|
||||
type CTExtensions []byte
|
||||
|
||||
// MerkleTreeNode represents an internal node in the CT tree
|
||||
type MerkleTreeNode []byte
|
||||
|
||||
// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
|
||||
// 4.4)
|
||||
type ConsistencyProof []MerkleTreeNode
|
||||
|
||||
// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5)
|
||||
type AuditPath []MerkleTreeNode
|
||||
|
||||
// LeafInput represents a serialized MerkleTreeLeaf structure
|
||||
type LeafInput []byte
|
||||
|
||||
// HashAlgorithm from the DigitallySigned struct
|
||||
type HashAlgorithm byte
|
||||
|
||||
// HashAlgorithm constants
|
||||
const (
|
||||
None HashAlgorithm = 0
|
||||
MD5 HashAlgorithm = 1
|
||||
SHA1 HashAlgorithm = 2
|
||||
SHA224 HashAlgorithm = 3
|
||||
SHA256 HashAlgorithm = 4
|
||||
SHA384 HashAlgorithm = 5
|
||||
SHA512 HashAlgorithm = 6
|
||||
)
|
||||
|
||||
func (h HashAlgorithm) String() string {
|
||||
switch h {
|
||||
case None:
|
||||
return "None"
|
||||
case MD5:
|
||||
return "MD5"
|
||||
case SHA1:
|
||||
return "SHA1"
|
||||
case SHA224:
|
||||
return "SHA224"
|
||||
case SHA256:
|
||||
return "SHA256"
|
||||
case SHA384:
|
||||
return "SHA384"
|
||||
case SHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return fmt.Sprintf("UNKNOWN(%d)", h)
|
||||
}
|
||||
}
|
||||
|
||||
// SignatureAlgorithm from the the DigitallySigned struct
|
||||
type SignatureAlgorithm byte
|
||||
|
||||
// SignatureAlgorithm constants
|
||||
const (
|
||||
Anonymous SignatureAlgorithm = 0
|
||||
RSA SignatureAlgorithm = 1
|
||||
DSA SignatureAlgorithm = 2
|
||||
ECDSA SignatureAlgorithm = 3
|
||||
)
|
||||
|
||||
func (s SignatureAlgorithm) String() string {
|
||||
switch s {
|
||||
case Anonymous:
|
||||
return "Anonymous"
|
||||
case RSA:
|
||||
return "RSA"
|
||||
case DSA:
|
||||
return "DSA"
|
||||
case ECDSA:
|
||||
return "ECDSA"
|
||||
default:
|
||||
return fmt.Sprintf("UNKNOWN(%d)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// DigitallySigned represents an RFC5246 DigitallySigned structure
|
||||
type DigitallySigned struct {
|
||||
HashAlgorithm HashAlgorithm
|
||||
SignatureAlgorithm SignatureAlgorithm
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
|
||||
// Returns an error if the base64 data is invalid.
|
||||
func (d *DigitallySigned) FromBase64String(b64 string) error {
|
||||
raw, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
|
||||
}
|
||||
ds, err := UnmarshalDigitallySigned(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
|
||||
}
|
||||
*d = *ds
|
||||
return nil
|
||||
}
|
||||
|
||||
// Base64String returns the base64 representation of the DigitallySigned struct.
|
||||
func (d DigitallySigned) Base64String() (string, error) {
|
||||
b, err := MarshalDigitallySigned(d)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaller interface.
|
||||
func (d DigitallySigned) MarshalJSON() ([]byte, error) {
|
||||
b64, err := d.Base64String()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return []byte(`"` + b64 + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
|
||||
var content string
|
||||
if err := json.Unmarshal(b, &content); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
|
||||
}
|
||||
return d.FromBase64String(content)
|
||||
}
|
||||
|
||||
// LogEntry represents the contents of an entry in a CT log, see section 3.1.
|
||||
type LogEntry struct {
|
||||
Index int64
|
||||
Leaf MerkleTreeLeaf
|
||||
X509Cert *x509.Certificate
|
||||
Precert *Precertificate
|
||||
Chain []ASN1Cert
|
||||
}
|
||||
|
||||
// SHA256Hash represents the output from the SHA256 hash function.
|
||||
type SHA256Hash [sha256.Size]byte
|
||||
|
||||
// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
|
||||
func (s *SHA256Hash) FromBase64String(b64 string) error {
|
||||
bs, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unbase64 LogID: %v", err)
|
||||
}
|
||||
if len(bs) != sha256.Size {
|
||||
return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
|
||||
}
|
||||
copy(s[:], bs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Base64String returns the base64 representation of this SHA256Hash.
|
||||
func (s SHA256Hash) Base64String() string {
|
||||
return base64.StdEncoding.EncodeToString(s[:])
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
|
||||
func (s SHA256Hash) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + s.Base64String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface.
|
||||
func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
|
||||
var content string
|
||||
if err := json.Unmarshal(b, &content); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
|
||||
}
|
||||
return s.FromBase64String(content)
|
||||
}
|
||||
|
||||
// SignedTreeHead represents the structure returned by the get-sth CT method
|
||||
// after base64 decoding. See sections 3.5 and 4.3 in the RFC)
|
||||
type SignedTreeHead struct {
|
||||
Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
|
||||
TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
|
||||
Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
|
||||
SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
|
||||
TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // The Log's signature for this STH (see RFC section 3.5)
|
||||
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
|
||||
}
|
||||
|
||||
// SignedCertificateTimestamp represents the structure returned by the
|
||||
// add-chain and add-pre-chain methods after base64 decoding. (see RFC sections
|
||||
// 3.2 ,4.1 and 4.2)
|
||||
type SignedCertificateTimestamp struct {
|
||||
SCTVersion Version // The version of the protocol to which the SCT conforms
|
||||
LogID SHA256Hash // the SHA-256 hash of the log's public key, calculated over
|
||||
// the DER encoding of the key represented as SubjectPublicKeyInfo.
|
||||
Timestamp uint64 // Timestamp (in ms since unix epoc) at which the SCT was issued
|
||||
Extensions CTExtensions // For future extensions to the protocol
|
||||
Signature DigitallySigned // The Log's signature for this SCT
|
||||
}
|
||||
|
||||
func (s SignedCertificateTimestamp) String() string {
|
||||
return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
|
||||
base64.StdEncoding.EncodeToString(s.LogID[:]),
|
||||
s.Timestamp,
|
||||
s.Extensions,
|
||||
s.Signature)
|
||||
}
|
||||
|
||||
// TimestampedEntry is part of the MerkleTreeLeaf structure.
|
||||
// See RFC section 3.4
|
||||
type TimestampedEntry struct {
|
||||
Timestamp uint64
|
||||
EntryType LogEntryType
|
||||
X509Entry ASN1Cert
|
||||
PrecertEntry PreCert
|
||||
Extensions CTExtensions
|
||||
}
|
||||
|
||||
// MerkleTreeLeaf represents the deserialized sructure of the hash input for the
|
||||
// leaves of a log's Merkle tree. See RFC section 3.4
|
||||
type MerkleTreeLeaf struct {
|
||||
Version Version // the version of the protocol to which the MerkleTreeLeaf corresponds
|
||||
LeafType MerkleLeafType // The type of the leaf input, currently only TimestampedEntry can exist
|
||||
TimestampedEntry TimestampedEntry // The entry data itself
|
||||
}
|
||||
|
||||
// Precertificate represents the parsed CT Precertificate structure.
|
||||
type Precertificate struct {
|
||||
// Raw DER bytes of the precert
|
||||
Raw []byte
|
||||
// SHA256 hash of the issuing key
|
||||
IssuerKeyHash [issuerKeyHashLength]byte
|
||||
// Parsed TBSCertificate structure (held in an x509.Certificate for ease of
|
||||
// access.
|
||||
TBSCertificate x509.Certificate
|
||||
}
|
||||
|
||||
// X509Certificate returns the X.509 Certificate contained within the
|
||||
// MerkleTreeLeaf.
|
||||
// Returns a pointer to an x509.Certificate or a non-nil error.
|
||||
func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
|
||||
return x509.ParseCertificate(m.TimestampedEntry.X509Entry)
|
||||
}
|
||||
116
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/cert_pool.go
generated
vendored
Normal file
116
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/cert_pool.go
generated
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
)
|
||||
|
||||
// CertPool is a set of certificates.
|
||||
type CertPool struct {
|
||||
bySubjectKeyId map[string][]int
|
||||
byName map[string][]int
|
||||
certs []*Certificate
|
||||
}
|
||||
|
||||
// NewCertPool returns a new, empty CertPool.
|
||||
func NewCertPool() *CertPool {
|
||||
return &CertPool{
|
||||
make(map[string][]int),
|
||||
make(map[string][]int),
|
||||
nil,
|
||||
}
|
||||
}
|
||||
|
||||
// findVerifiedParents attempts to find certificates in s which have signed the
|
||||
// given certificate. If any candidates were rejected then errCert will be set
|
||||
// to one of them, arbitrarily, and err will contain the reason that it was
|
||||
// rejected.
|
||||
func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
var candidates []int
|
||||
|
||||
if len(cert.AuthorityKeyId) > 0 {
|
||||
candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
candidates = s.byName[string(cert.RawIssuer)]
|
||||
}
|
||||
|
||||
for _, c := range candidates {
|
||||
if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
|
||||
parents = append(parents, c)
|
||||
} else {
|
||||
errCert = s.certs[c]
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AddCert adds a certificate to a pool.
|
||||
func (s *CertPool) AddCert(cert *Certificate) {
|
||||
if cert == nil {
|
||||
panic("adding nil Certificate to CertPool")
|
||||
}
|
||||
|
||||
// Check that the certificate isn't being added twice.
|
||||
for _, c := range s.certs {
|
||||
if c.Equal(cert) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n := len(s.certs)
|
||||
s.certs = append(s.certs, cert)
|
||||
|
||||
if len(cert.SubjectKeyId) > 0 {
|
||||
keyId := string(cert.SubjectKeyId)
|
||||
s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
|
||||
}
|
||||
name := string(cert.RawSubject)
|
||||
s.byName[name] = append(s.byName[name], n)
|
||||
}
|
||||
|
||||
// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
|
||||
// It appends any certificates found to s and returns true if any certificates
|
||||
// were successfully parsed.
|
||||
//
|
||||
// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
|
||||
// of root CAs in a format suitable for this function.
|
||||
func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
|
||||
for len(pemCerts) > 0 {
|
||||
var block *pem.Block
|
||||
block, pemCerts = pem.Decode(pemCerts)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
s.AddCert(cert)
|
||||
ok = true
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Subjects returns a list of the DER-encoded subjects of
|
||||
// all of the certificates in the pool.
|
||||
func (s *CertPool) Subjects() (res [][]byte) {
|
||||
res = make([][]byte, len(s.certs))
|
||||
for i, c := range s.certs {
|
||||
res[i] = c.RawSubject
|
||||
}
|
||||
return
|
||||
}
|
||||
233
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
generated
vendored
Normal file
233
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
generated
vendored
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
|
||||
// generate a key from the password was derived by looking at the OpenSSL
|
||||
// implementation.
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PEMCipher int
|
||||
|
||||
// Possible values for the EncryptPEMBlock encryption algorithm.
|
||||
const (
|
||||
_ PEMCipher = iota
|
||||
PEMCipherDES
|
||||
PEMCipher3DES
|
||||
PEMCipherAES128
|
||||
PEMCipherAES192
|
||||
PEMCipherAES256
|
||||
)
|
||||
|
||||
// rfc1423Algo holds a method for enciphering a PEM block.
|
||||
type rfc1423Algo struct {
|
||||
cipher PEMCipher
|
||||
name string
|
||||
cipherFunc func(key []byte) (cipher.Block, error)
|
||||
keySize int
|
||||
blockSize int
|
||||
}
|
||||
|
||||
// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
|
||||
// block. The ivSize numbers were taken from the OpenSSL source.
|
||||
var rfc1423Algos = []rfc1423Algo{{
|
||||
cipher: PEMCipherDES,
|
||||
name: "DES-CBC",
|
||||
cipherFunc: des.NewCipher,
|
||||
keySize: 8,
|
||||
blockSize: des.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipher3DES,
|
||||
name: "DES-EDE3-CBC",
|
||||
cipherFunc: des.NewTripleDESCipher,
|
||||
keySize: 24,
|
||||
blockSize: des.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES128,
|
||||
name: "AES-128-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 16,
|
||||
blockSize: aes.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES192,
|
||||
name: "AES-192-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 24,
|
||||
blockSize: aes.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES256,
|
||||
name: "AES-256-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 32,
|
||||
blockSize: aes.BlockSize,
|
||||
},
|
||||
}
|
||||
|
||||
// deriveKey uses a key derivation function to stretch the password into a key
|
||||
// with the number of bits our cipher requires. This algorithm was derived from
|
||||
// the OpenSSL source.
|
||||
func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
|
||||
hash := md5.New()
|
||||
out := make([]byte, c.keySize)
|
||||
var digest []byte
|
||||
|
||||
for i := 0; i < len(out); i += len(digest) {
|
||||
hash.Reset()
|
||||
hash.Write(digest)
|
||||
hash.Write(password)
|
||||
hash.Write(salt)
|
||||
digest = hash.Sum(digest[:0])
|
||||
copy(out[i:], digest)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
|
||||
func IsEncryptedPEMBlock(b *pem.Block) bool {
|
||||
_, ok := b.Headers["DEK-Info"]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IncorrectPasswordError is returned when an incorrect password is detected.
|
||||
var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
|
||||
|
||||
// DecryptPEMBlock takes a password encrypted PEM block and the password used to
|
||||
// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
|
||||
// the DEK-Info header to determine the algorithm used for decryption. If no
|
||||
// DEK-Info header is present, an error is returned. If an incorrect password
|
||||
// is detected an IncorrectPasswordError is returned.
|
||||
func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
|
||||
dek, ok := b.Headers["DEK-Info"]
|
||||
if !ok {
|
||||
return nil, errors.New("x509: no DEK-Info header in block")
|
||||
}
|
||||
|
||||
idx := strings.Index(dek, ",")
|
||||
if idx == -1 {
|
||||
return nil, errors.New("x509: malformed DEK-Info header")
|
||||
}
|
||||
|
||||
mode, hexIV := dek[:idx], dek[idx+1:]
|
||||
ciph := cipherByName(mode)
|
||||
if ciph == nil {
|
||||
return nil, errors.New("x509: unknown encryption mode")
|
||||
}
|
||||
iv, err := hex.DecodeString(hexIV)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(iv) != ciph.blockSize {
|
||||
return nil, errors.New("x509: incorrect IV size")
|
||||
}
|
||||
|
||||
// Based on the OpenSSL implementation. The salt is the first 8 bytes
|
||||
// of the initialization vector.
|
||||
key := ciph.deriveKey(password, iv[:8])
|
||||
block, err := ciph.cipherFunc(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := make([]byte, len(b.Bytes))
|
||||
dec := cipher.NewCBCDecrypter(block, iv)
|
||||
dec.CryptBlocks(data, b.Bytes)
|
||||
|
||||
// Blocks are padded using a scheme where the last n bytes of padding are all
|
||||
// equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
|
||||
// For example:
|
||||
// [x y z 2 2]
|
||||
// [x y 7 7 7 7 7 7 7]
|
||||
// If we detect a bad padding, we assume it is an invalid password.
|
||||
dlen := len(data)
|
||||
if dlen == 0 || dlen%ciph.blockSize != 0 {
|
||||
return nil, errors.New("x509: invalid padding")
|
||||
}
|
||||
last := int(data[dlen-1])
|
||||
if dlen < last {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
if last == 0 || last > ciph.blockSize {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
for _, val := range data[dlen-last:] {
|
||||
if int(val) != last {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
}
|
||||
return data[:dlen-last], nil
|
||||
}
|
||||
|
||||
// EncryptPEMBlock returns a PEM block of the specified type holding the
|
||||
// given DER-encoded data encrypted with the specified algorithm and
|
||||
// password.
|
||||
func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
|
||||
ciph := cipherByKey(alg)
|
||||
if ciph == nil {
|
||||
return nil, errors.New("x509: unknown encryption mode")
|
||||
}
|
||||
iv := make([]byte, ciph.blockSize)
|
||||
if _, err := io.ReadFull(rand, iv); err != nil {
|
||||
return nil, errors.New("x509: cannot generate IV: " + err.Error())
|
||||
}
|
||||
// The salt is the first 8 bytes of the initialization vector,
|
||||
// matching the key derivation in DecryptPEMBlock.
|
||||
key := ciph.deriveKey(password, iv[:8])
|
||||
block, err := ciph.cipherFunc(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enc := cipher.NewCBCEncrypter(block, iv)
|
||||
pad := ciph.blockSize - len(data)%ciph.blockSize
|
||||
encrypted := make([]byte, len(data), len(data)+pad)
|
||||
// We could save this copy by encrypting all the whole blocks in
|
||||
// the data separately, but it doesn't seem worth the additional
|
||||
// code.
|
||||
copy(encrypted, data)
|
||||
// See RFC 1423, section 1.1
|
||||
for i := 0; i < pad; i++ {
|
||||
encrypted = append(encrypted, byte(pad))
|
||||
}
|
||||
enc.CryptBlocks(encrypted, encrypted)
|
||||
|
||||
return &pem.Block{
|
||||
Type: blockType,
|
||||
Headers: map[string]string{
|
||||
"Proc-Type": "4,ENCRYPTED",
|
||||
"DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
|
||||
},
|
||||
Bytes: encrypted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cipherByName(name string) *rfc1423Algo {
|
||||
for i := range rfc1423Algos {
|
||||
alg := &rfc1423Algos[i]
|
||||
if alg.name == name {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cipherByKey(key PEMCipher) *rfc1423Algo {
|
||||
for i := range rfc1423Algos {
|
||||
alg := &rfc1423Algos[i]
|
||||
if alg.cipher == key {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
124
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs1.go
generated
vendored
Normal file
124
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
// START CT CHANGES
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1"
|
||||
// END CT CHANGES
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
|
||||
type pkcs1PrivateKey struct {
|
||||
Version int
|
||||
N *big.Int
|
||||
E int
|
||||
D *big.Int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
// We ignore these values, if present, because rsa will calculate them.
|
||||
Dp *big.Int `asn1:"optional"`
|
||||
Dq *big.Int `asn1:"optional"`
|
||||
Qinv *big.Int `asn1:"optional"`
|
||||
|
||||
AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
|
||||
}
|
||||
|
||||
type pkcs1AdditionalRSAPrime struct {
|
||||
Prime *big.Int
|
||||
|
||||
// We ignore these values because rsa will calculate them.
|
||||
Exp *big.Int
|
||||
Coeff *big.Int
|
||||
}
|
||||
|
||||
// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
|
||||
func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err error) {
|
||||
var priv pkcs1PrivateKey
|
||||
rest, err := asn1.Unmarshal(der, &priv)
|
||||
if len(rest) > 0 {
|
||||
err = asn1.SyntaxError{Msg: "trailing data"}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if priv.Version > 1 {
|
||||
return nil, errors.New("x509: unsupported private key version")
|
||||
}
|
||||
|
||||
if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
|
||||
return nil, errors.New("x509: private key contains zero or negative value")
|
||||
}
|
||||
|
||||
key = new(rsa.PrivateKey)
|
||||
key.PublicKey = rsa.PublicKey{
|
||||
E: priv.E,
|
||||
N: priv.N,
|
||||
}
|
||||
|
||||
key.D = priv.D
|
||||
key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
|
||||
key.Primes[0] = priv.P
|
||||
key.Primes[1] = priv.Q
|
||||
for i, a := range priv.AdditionalPrimes {
|
||||
if a.Prime.Sign() <= 0 {
|
||||
return nil, errors.New("x509: private key contains zero or negative prime")
|
||||
}
|
||||
key.Primes[i+2] = a.Prime
|
||||
// We ignore the other two values because rsa will calculate
|
||||
// them as needed.
|
||||
}
|
||||
|
||||
err = key.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key.Precompute()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
|
||||
func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
|
||||
key.Precompute()
|
||||
|
||||
version := 0
|
||||
if len(key.Primes) > 2 {
|
||||
version = 1
|
||||
}
|
||||
|
||||
priv := pkcs1PrivateKey{
|
||||
Version: version,
|
||||
N: key.N,
|
||||
E: key.PublicKey.E,
|
||||
D: key.D,
|
||||
P: key.Primes[0],
|
||||
Q: key.Primes[1],
|
||||
Dp: key.Precomputed.Dp,
|
||||
Dq: key.Precomputed.Dq,
|
||||
Qinv: key.Precomputed.Qinv,
|
||||
}
|
||||
|
||||
priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
|
||||
for i, values := range key.Precomputed.CRTValues {
|
||||
priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
|
||||
priv.AdditionalPrimes[i].Exp = values.Exp
|
||||
priv.AdditionalPrimes[i].Coeff = values.Coeff
|
||||
}
|
||||
|
||||
b, _ := asn1.Marshal(priv)
|
||||
return b
|
||||
}
|
||||
|
||||
// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key.
|
||||
type rsaPublicKey struct {
|
||||
N *big.Int
|
||||
E int
|
||||
}
|
||||
56
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs8.go
generated
vendored
Normal file
56
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs8.go
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1"
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix"
|
||||
// END CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
|
||||
// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
|
||||
// and RFC5208.
|
||||
type pkcs8 struct {
|
||||
Version int
|
||||
Algo pkix.AlgorithmIdentifier
|
||||
PrivateKey []byte
|
||||
// optional attributes omitted.
|
||||
}
|
||||
|
||||
// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. See
|
||||
// http://www.rsa.com/rsalabs/node.asp?id=2130 and RFC5208.
|
||||
func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
|
||||
var privKey pkcs8
|
||||
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case privKey.Algo.Algorithm.Equal(oidPublicKeyRSA):
|
||||
key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
|
||||
}
|
||||
return key, nil
|
||||
|
||||
case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA):
|
||||
bytes := privKey.Algo.Parameters.FullBytes
|
||||
namedCurveOID := new(asn1.ObjectIdentifier)
|
||||
if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
|
||||
namedCurveOID = nil
|
||||
}
|
||||
key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
|
||||
}
|
||||
return key, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
|
||||
}
|
||||
}
|
||||
173
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
generated
vendored
Normal file
173
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pkix contains shared, low level structures used for ASN.1 parsing
|
||||
// and serialization of X.509 certificates, CRL and OCSP.
|
||||
package pkix
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1"
|
||||
// END CT CHANGES
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 4.1.1.2.
|
||||
type AlgorithmIdentifier struct {
|
||||
Algorithm asn1.ObjectIdentifier
|
||||
Parameters asn1.RawValue `asn1:"optional"`
|
||||
}
|
||||
|
||||
type RDNSequence []RelativeDistinguishedNameSET
|
||||
|
||||
type RelativeDistinguishedNameSET []AttributeTypeAndValue
|
||||
|
||||
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
|
||||
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
|
||||
type AttributeTypeAndValue struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Extension represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 4.2.
|
||||
type Extension struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Critical bool `asn1:"optional"`
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Name represents an X.509 distinguished name. This only includes the common
|
||||
// elements of a DN. Additional elements in the name are ignored.
|
||||
type Name struct {
|
||||
Country, Organization, OrganizationalUnit []string
|
||||
Locality, Province []string
|
||||
StreetAddress, PostalCode []string
|
||||
SerialNumber, CommonName string
|
||||
|
||||
Names []AttributeTypeAndValue
|
||||
}
|
||||
|
||||
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
|
||||
for _, rdn := range *rdns {
|
||||
if len(rdn) == 0 {
|
||||
continue
|
||||
}
|
||||
atv := rdn[0]
|
||||
n.Names = append(n.Names, atv)
|
||||
value, ok := atv.Value.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
t := atv.Type
|
||||
if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
|
||||
switch t[3] {
|
||||
case 3:
|
||||
n.CommonName = value
|
||||
case 5:
|
||||
n.SerialNumber = value
|
||||
case 6:
|
||||
n.Country = append(n.Country, value)
|
||||
case 7:
|
||||
n.Locality = append(n.Locality, value)
|
||||
case 8:
|
||||
n.Province = append(n.Province, value)
|
||||
case 9:
|
||||
n.StreetAddress = append(n.StreetAddress, value)
|
||||
case 10:
|
||||
n.Organization = append(n.Organization, value)
|
||||
case 11:
|
||||
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
|
||||
case 17:
|
||||
n.PostalCode = append(n.PostalCode, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
oidCountry = []int{2, 5, 4, 6}
|
||||
oidOrganization = []int{2, 5, 4, 10}
|
||||
oidOrganizationalUnit = []int{2, 5, 4, 11}
|
||||
oidCommonName = []int{2, 5, 4, 3}
|
||||
oidSerialNumber = []int{2, 5, 4, 5}
|
||||
oidLocality = []int{2, 5, 4, 7}
|
||||
oidProvince = []int{2, 5, 4, 8}
|
||||
oidStreetAddress = []int{2, 5, 4, 9}
|
||||
oidPostalCode = []int{2, 5, 4, 17}
|
||||
)
|
||||
|
||||
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
|
||||
// and returns the new value. The relativeDistinguishedNameSET contains an
|
||||
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
|
||||
// search for AttributeTypeAndValue.
|
||||
func appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
|
||||
if len(values) == 0 {
|
||||
return in
|
||||
}
|
||||
|
||||
s := make([]AttributeTypeAndValue, len(values))
|
||||
for i, value := range values {
|
||||
s[i].Type = oid
|
||||
s[i].Value = value
|
||||
}
|
||||
|
||||
return append(in, s)
|
||||
}
|
||||
|
||||
func (n Name) ToRDNSequence() (ret RDNSequence) {
|
||||
ret = appendRDNs(ret, n.Country, oidCountry)
|
||||
ret = appendRDNs(ret, n.Organization, oidOrganization)
|
||||
ret = appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
|
||||
ret = appendRDNs(ret, n.Locality, oidLocality)
|
||||
ret = appendRDNs(ret, n.Province, oidProvince)
|
||||
ret = appendRDNs(ret, n.StreetAddress, oidStreetAddress)
|
||||
ret = appendRDNs(ret, n.PostalCode, oidPostalCode)
|
||||
if len(n.CommonName) > 0 {
|
||||
ret = appendRDNs(ret, []string{n.CommonName}, oidCommonName)
|
||||
}
|
||||
if len(n.SerialNumber) > 0 {
|
||||
ret = appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// CertificateList represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
|
||||
// signature.
|
||||
type CertificateList struct {
|
||||
TBSCertList TBSCertificateList
|
||||
SignatureAlgorithm AlgorithmIdentifier
|
||||
SignatureValue asn1.BitString
|
||||
}
|
||||
|
||||
// HasExpired reports whether now is past the expiry time of certList.
|
||||
func (certList *CertificateList) HasExpired(now time.Time) bool {
|
||||
return now.After(certList.TBSCertList.NextUpdate)
|
||||
}
|
||||
|
||||
// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1.
|
||||
type TBSCertificateList struct {
|
||||
Raw asn1.RawContent
|
||||
Version int `asn1:"optional,default:2"`
|
||||
Signature AlgorithmIdentifier
|
||||
Issuer RDNSequence
|
||||
ThisUpdate time.Time
|
||||
NextUpdate time.Time
|
||||
RevokedCertificates []RevokedCertificate `asn1:"optional"`
|
||||
Extensions []Extension `asn1:"tag:0,optional,explicit"`
|
||||
}
|
||||
|
||||
// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1.
|
||||
type RevokedCertificate struct {
|
||||
SerialNumber *big.Int
|
||||
RevocationTime time.Time
|
||||
Extensions []Extension `asn1:"optional"`
|
||||
}
|
||||
17
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root.go
generated
vendored
Normal file
17
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import "sync"
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
systemRoots *CertPool
|
||||
)
|
||||
|
||||
func systemRootsPool() *CertPool {
|
||||
once.Do(initSystemRoots)
|
||||
return systemRoots
|
||||
}
|
||||
83
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_darwin.go
generated
vendored
Normal file
83
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,cgo
|
||||
|
||||
package x509
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <Security/Security.h>
|
||||
|
||||
// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
|
||||
//
|
||||
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
|
||||
// certificates of the system. On failure, the function returns -1.
|
||||
//
|
||||
// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
|
||||
// we've consumed its content.
|
||||
int FetchPEMRootsCTX509(CFDataRef *pemRoots) {
|
||||
if (pemRoots == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFArrayRef certs = NULL;
|
||||
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
|
||||
if (err != noErr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
int i, ncerts = CFArrayGetCount(certs);
|
||||
for (i = 0; i < ncerts; i++) {
|
||||
CFDataRef data = NULL;
|
||||
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
|
||||
if (cert == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
|
||||
// Once we support weak imports via cgo we should prefer that, and fall back to this
|
||||
// for older systems.
|
||||
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
if (err != noErr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (data != NULL) {
|
||||
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
|
||||
CFRelease(data);
|
||||
}
|
||||
}
|
||||
|
||||
CFRelease(certs);
|
||||
|
||||
*pemRoots = combinedData;
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
|
||||
var data C.CFDataRef = nil
|
||||
err := C.FetchPEMRootsCTX509(&data)
|
||||
if err == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
defer C.CFRelease(C.CFTypeRef(data))
|
||||
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
|
||||
roots.AppendCertsFromPEM(buf)
|
||||
systemRoots = roots
|
||||
}
|
||||
33
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_plan9.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_plan9.go
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build plan9
|
||||
|
||||
package x509
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/sys/lib/tls/ca.pem",
|
||||
}
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_stub.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_stub.go
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,!cgo
|
||||
|
||||
package x509
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
}
|
||||
37
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_unix.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build dragonfly freebsd linux openbsd netbsd
|
||||
|
||||
package x509
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/ssl/cert.pem", // OpenBSD
|
||||
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
|
||||
}
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
||||
229
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_windows.go
generated
vendored
Normal file
229
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory
|
||||
// certificate store containing itself and all of the intermediate certificates specified
|
||||
// in the opts.Intermediates CertPool.
|
||||
//
|
||||
// A pointer to the in-memory store is available in the returned CertContext's Store field.
|
||||
// The store is automatically freed when the CertContext is freed using
|
||||
// syscall.CertFreeCertificateContext.
|
||||
func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) {
|
||||
var storeCtx *syscall.CertContext
|
||||
|
||||
leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateContext(leafCtx)
|
||||
|
||||
handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertCloseStore(handle, 0)
|
||||
|
||||
err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.Intermediates != nil {
|
||||
for _, intermediate := range opts.Intermediates.certs {
|
||||
ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil)
|
||||
syscall.CertFreeCertificateContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storeCtx, nil
|
||||
}
|
||||
|
||||
// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
|
||||
func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) {
|
||||
if simpleChain == nil || count == 0 {
|
||||
return nil, errors.New("x509: invalid simple chain")
|
||||
}
|
||||
|
||||
simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
|
||||
lastChain := simpleChains[count-1]
|
||||
elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
|
||||
for i := 0; i < int(lastChain.NumElements); i++ {
|
||||
// Copy the buf, since ParseCertificate does not create its own copy.
|
||||
cert := elements[i].CertContext
|
||||
encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
|
||||
buf := make([]byte, cert.Length)
|
||||
copy(buf, encodedCert[:])
|
||||
parsedCert, err := ParseCertificate(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chain = append(chain, parsedCert)
|
||||
}
|
||||
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// checkChainTrustStatus checks the trust status of the certificate chain, translating
|
||||
// any errors it finds into Go errors in the process.
|
||||
func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error {
|
||||
if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR {
|
||||
status := chainCtx.TrustStatus.ErrorStatus
|
||||
switch status {
|
||||
case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
|
||||
return CertificateInvalidError{c, Expired}
|
||||
default:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
|
||||
// use as a certificate chain for a SSL/TLS server.
|
||||
func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
|
||||
servernamep, err := syscall.UTF16PtrFromString(opts.DNSName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sslPara := &syscall.SSLExtraCertChainPolicyPara{
|
||||
AuthType: syscall.AUTHTYPE_SERVER,
|
||||
ServerName: servernamep,
|
||||
}
|
||||
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
|
||||
|
||||
para := &syscall.CertChainPolicyPara{
|
||||
ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
|
||||
}
|
||||
para.Size = uint32(unsafe.Sizeof(*para))
|
||||
|
||||
status := syscall.CertChainPolicyStatus{}
|
||||
err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mkrautz): use the lChainIndex and lElementIndex fields
|
||||
// of the CertChainPolicyStatus to provide proper context, instead
|
||||
// using c.
|
||||
if status.Error != 0 {
|
||||
switch status.Error {
|
||||
case syscall.CERT_E_EXPIRED:
|
||||
return CertificateInvalidError{c, Expired}
|
||||
case syscall.CERT_E_CN_NO_MATCH:
|
||||
return HostnameError{c, opts.DNSName}
|
||||
case syscall.CERT_E_UNTRUSTEDROOT:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
default:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// systemVerify is like Verify, except that it uses CryptoAPI calls
|
||||
// to build certificate chains and verify them.
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
hasDNSName := opts != nil && len(opts.DNSName) > 0
|
||||
|
||||
storeCtx, err := createStoreContext(c, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateContext(storeCtx)
|
||||
|
||||
para := new(syscall.CertChainPara)
|
||||
para.Size = uint32(unsafe.Sizeof(*para))
|
||||
|
||||
// If there's a DNSName set in opts, assume we're verifying
|
||||
// a certificate from a TLS server.
|
||||
if hasDNSName {
|
||||
oids := []*byte{
|
||||
&syscall.OID_PKIX_KP_SERVER_AUTH[0],
|
||||
// Both IE and Chrome allow certificates with
|
||||
// Server Gated Crypto as well. Some certificates
|
||||
// in the wild require them.
|
||||
&syscall.OID_SERVER_GATED_CRYPTO[0],
|
||||
&syscall.OID_SGC_NETSCAPE[0],
|
||||
}
|
||||
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
|
||||
para.RequestedUsage.Usage.Length = uint32(len(oids))
|
||||
para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
|
||||
} else {
|
||||
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND
|
||||
para.RequestedUsage.Usage.Length = 0
|
||||
para.RequestedUsage.Usage.UsageIdentifiers = nil
|
||||
}
|
||||
|
||||
var verifyTime *syscall.Filetime
|
||||
if opts != nil && !opts.CurrentTime.IsZero() {
|
||||
ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano())
|
||||
verifyTime = &ft
|
||||
}
|
||||
|
||||
// CertGetCertificateChain will traverse Windows's root stores
|
||||
// in an attempt to build a verified certificate chain. Once
|
||||
// it has found a verified chain, it stops. MSDN docs on
|
||||
// CERT_CHAIN_CONTEXT:
|
||||
//
|
||||
// When a CERT_CHAIN_CONTEXT is built, the first simple chain
|
||||
// begins with an end certificate and ends with a self-signed
|
||||
// certificate. If that self-signed certificate is not a root
|
||||
// or otherwise trusted certificate, an attempt is made to
|
||||
// build a new chain. CTLs are used to create the new chain
|
||||
// beginning with the self-signed certificate from the original
|
||||
// chain as the end certificate of the new chain. This process
|
||||
// continues building additional simple chains until the first
|
||||
// self-signed certificate is a trusted certificate or until
|
||||
// an additional simple chain cannot be built.
|
||||
//
|
||||
// The result is that we'll only get a single trusted chain to
|
||||
// return to our caller.
|
||||
var chainCtx *syscall.CertChainContext
|
||||
err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateChain(chainCtx)
|
||||
|
||||
err = checkChainTrustStatus(c, chainCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasDNSName {
|
||||
err = checkChainSSLServerPolicy(c, chainCtx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chains = append(chains, chain)
|
||||
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
}
|
||||
85
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/sec1.go
generated
vendored
Normal file
85
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/sec1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
// START CT CHANGES
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1"
|
||||
// START CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
const ecPrivKeyVersion = 1
|
||||
|
||||
// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
|
||||
// References:
|
||||
// RFC5915
|
||||
// SEC1 - http://www.secg.org/download/aid-780/sec1-v2.pdf
|
||||
// Per RFC5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
|
||||
// most cases it is not.
|
||||
type ecPrivateKey struct {
|
||||
Version int
|
||||
PrivateKey []byte
|
||||
NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
|
||||
PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
|
||||
}
|
||||
|
||||
// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
|
||||
func ParseECPrivateKey(der []byte) (key *ecdsa.PrivateKey, err error) {
|
||||
return parseECPrivateKey(nil, der)
|
||||
}
|
||||
|
||||
// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
|
||||
func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
|
||||
oid, ok := oidFromNamedCurve(key.Curve)
|
||||
if !ok {
|
||||
return nil, errors.New("x509: unknown elliptic curve")
|
||||
}
|
||||
return asn1.Marshal(ecPrivateKey{
|
||||
Version: 1,
|
||||
PrivateKey: key.D.Bytes(),
|
||||
NamedCurveOID: oid,
|
||||
PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
|
||||
})
|
||||
}
|
||||
|
||||
// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
|
||||
// The OID for the named curve may be provided from another source (such as
|
||||
// the PKCS8 container) - if it is provided then use this instead of the OID
|
||||
// that may exist in the EC private key structure.
|
||||
func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
|
||||
var privKey ecPrivateKey
|
||||
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
|
||||
return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
|
||||
}
|
||||
if privKey.Version != ecPrivKeyVersion {
|
||||
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
|
||||
}
|
||||
|
||||
var curve elliptic.Curve
|
||||
if namedCurveOID != nil {
|
||||
curve = namedCurveFromOID(*namedCurveOID)
|
||||
} else {
|
||||
curve = namedCurveFromOID(privKey.NamedCurveOID)
|
||||
}
|
||||
if curve == nil {
|
||||
return nil, errors.New("x509: unknown elliptic curve")
|
||||
}
|
||||
|
||||
k := new(big.Int).SetBytes(privKey.PrivateKey)
|
||||
if k.Cmp(curve.Params().N) >= 0 {
|
||||
return nil, errors.New("x509: invalid elliptic curve private key value")
|
||||
}
|
||||
priv := new(ecdsa.PrivateKey)
|
||||
priv.Curve = curve
|
||||
priv.D = k
|
||||
priv.X, priv.Y = curve.ScalarBaseMult(privKey.PrivateKey)
|
||||
|
||||
return priv, nil
|
||||
}
|
||||
476
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/verify.go
generated
vendored
Normal file
476
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/verify.go
generated
vendored
Normal file
|
|
@ -0,0 +1,476 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type InvalidReason int
|
||||
|
||||
const (
|
||||
// NotAuthorizedToSign results when a certificate is signed by another
|
||||
// which isn't marked as a CA certificate.
|
||||
NotAuthorizedToSign InvalidReason = iota
|
||||
// Expired results when a certificate has expired, based on the time
|
||||
// given in the VerifyOptions.
|
||||
Expired
|
||||
// CANotAuthorizedForThisName results when an intermediate or root
|
||||
// certificate has a name constraint which doesn't include the name
|
||||
// being checked.
|
||||
CANotAuthorizedForThisName
|
||||
// TooManyIntermediates results when a path length constraint is
|
||||
// violated.
|
||||
TooManyIntermediates
|
||||
// IncompatibleUsage results when the certificate's key usage indicates
|
||||
// that it may only be used for a different purpose.
|
||||
IncompatibleUsage
|
||||
)
|
||||
|
||||
// CertificateInvalidError results when an odd error occurs. Users of this
|
||||
// library probably want to handle all these errors uniformly.
|
||||
type CertificateInvalidError struct {
|
||||
Cert *Certificate
|
||||
Reason InvalidReason
|
||||
}
|
||||
|
||||
func (e CertificateInvalidError) Error() string {
|
||||
switch e.Reason {
|
||||
case NotAuthorizedToSign:
|
||||
return "x509: certificate is not authorized to sign other certificates"
|
||||
case Expired:
|
||||
return "x509: certificate has expired or is not yet valid"
|
||||
case CANotAuthorizedForThisName:
|
||||
return "x509: a root or intermediate certificate is not authorized to sign in this domain"
|
||||
case TooManyIntermediates:
|
||||
return "x509: too many intermediates for path length constraint"
|
||||
case IncompatibleUsage:
|
||||
return "x509: certificate specifies an incompatible key usage"
|
||||
}
|
||||
return "x509: unknown error"
|
||||
}
|
||||
|
||||
// HostnameError results when the set of authorized names doesn't match the
|
||||
// requested name.
|
||||
type HostnameError struct {
|
||||
Certificate *Certificate
|
||||
Host string
|
||||
}
|
||||
|
||||
func (h HostnameError) Error() string {
|
||||
c := h.Certificate
|
||||
|
||||
var valid string
|
||||
if ip := net.ParseIP(h.Host); ip != nil {
|
||||
// Trying to validate an IP
|
||||
if len(c.IPAddresses) == 0 {
|
||||
return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
|
||||
}
|
||||
for _, san := range c.IPAddresses {
|
||||
if len(valid) > 0 {
|
||||
valid += ", "
|
||||
}
|
||||
valid += san.String()
|
||||
}
|
||||
} else {
|
||||
if len(c.DNSNames) > 0 {
|
||||
valid = strings.Join(c.DNSNames, ", ")
|
||||
} else {
|
||||
valid = c.Subject.CommonName
|
||||
}
|
||||
}
|
||||
return "x509: certificate is valid for " + valid + ", not " + h.Host
|
||||
}
|
||||
|
||||
// UnknownAuthorityError results when the certificate issuer is unknown
|
||||
type UnknownAuthorityError struct {
|
||||
cert *Certificate
|
||||
// hintErr contains an error that may be helpful in determining why an
|
||||
// authority wasn't found.
|
||||
hintErr error
|
||||
// hintCert contains a possible authority certificate that was rejected
|
||||
// because of the error in hintErr.
|
||||
hintCert *Certificate
|
||||
}
|
||||
|
||||
func (e UnknownAuthorityError) Error() string {
|
||||
s := "x509: certificate signed by unknown authority"
|
||||
if e.hintErr != nil {
|
||||
certName := e.hintCert.Subject.CommonName
|
||||
if len(certName) == 0 {
|
||||
if len(e.hintCert.Subject.Organization) > 0 {
|
||||
certName = e.hintCert.Subject.Organization[0]
|
||||
}
|
||||
certName = "serial:" + e.hintCert.SerialNumber.String()
|
||||
}
|
||||
s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SystemRootsError results when we fail to load the system root certificates.
|
||||
type SystemRootsError struct {
|
||||
}
|
||||
|
||||
func (e SystemRootsError) Error() string {
|
||||
return "x509: failed to load system roots and no roots provided"
|
||||
}
|
||||
|
||||
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
|
||||
// because other PKIX verification APIs have ended up needing many options.
|
||||
type VerifyOptions struct {
|
||||
DNSName string
|
||||
Intermediates *CertPool
|
||||
Roots *CertPool // if nil, the system roots are used
|
||||
CurrentTime time.Time // if zero, the current time is used
|
||||
DisableTimeChecks bool
|
||||
// KeyUsage specifies which Extended Key Usage values are acceptable.
|
||||
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a
|
||||
// constraint down the chain which mirrors Windows CryptoAPI behaviour,
|
||||
// but not the spec. To accept any key usage, include ExtKeyUsageAny.
|
||||
KeyUsages []ExtKeyUsage
|
||||
}
|
||||
|
||||
const (
|
||||
leafCertificate = iota
|
||||
intermediateCertificate
|
||||
rootCertificate
|
||||
)
|
||||
|
||||
// isValid performs validity checks on the c.
|
||||
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
|
||||
if !opts.DisableTimeChecks {
|
||||
now := opts.CurrentTime
|
||||
if now.IsZero() {
|
||||
now = time.Now()
|
||||
}
|
||||
if now.Before(c.NotBefore) || now.After(c.NotAfter) {
|
||||
return CertificateInvalidError{c, Expired}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.PermittedDNSDomains) > 0 {
|
||||
ok := false
|
||||
for _, domain := range c.PermittedDNSDomains {
|
||||
if opts.DNSName == domain ||
|
||||
(strings.HasSuffix(opts.DNSName, domain) &&
|
||||
len(opts.DNSName) >= 1+len(domain) &&
|
||||
opts.DNSName[len(opts.DNSName)-len(domain)-1] == '.') {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return CertificateInvalidError{c, CANotAuthorizedForThisName}
|
||||
}
|
||||
}
|
||||
|
||||
// KeyUsage status flags are ignored. From Engineering Security, Peter
|
||||
// Gutmann: A European government CA marked its signing certificates as
|
||||
// being valid for encryption only, but no-one noticed. Another
|
||||
// European CA marked its signature keys as not being valid for
|
||||
// signatures. A different CA marked its own trusted root certificate
|
||||
// as being invalid for certificate signing. Another national CA
|
||||
// distributed a certificate to be used to encrypt data for the
|
||||
// country’s tax authority that was marked as only being usable for
|
||||
// digital signatures but not for encryption. Yet another CA reversed
|
||||
// the order of the bit flags in the keyUsage due to confusion over
|
||||
// encoding endianness, essentially setting a random keyUsage in
|
||||
// certificates that it issued. Another CA created a self-invalidating
|
||||
// certificate by adding a certificate policy statement stipulating
|
||||
// that the certificate had to be used strictly as specified in the
|
||||
// keyUsage, and a keyUsage containing a flag indicating that the RSA
|
||||
// encryption key could only be used for Diffie-Hellman key agreement.
|
||||
|
||||
if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
|
||||
return CertificateInvalidError{c, NotAuthorizedToSign}
|
||||
}
|
||||
|
||||
if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
||||
numIntermediates := len(currentChain) - 1
|
||||
if numIntermediates > c.MaxPathLen {
|
||||
return CertificateInvalidError{c, TooManyIntermediates}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify attempts to verify c by building one or more chains from c to a
|
||||
// certificate in opts.Roots, using certificates in opts.Intermediates if
|
||||
// needed. If successful, it returns one or more chains where the first
|
||||
// element of the chain is c and the last element is from opts.Roots.
|
||||
//
|
||||
// WARNING: this doesn't do any revocation checking.
|
||||
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
// Use Windows's own verification and chain building.
|
||||
if opts.Roots == nil && runtime.GOOS == "windows" {
|
||||
return c.systemVerify(&opts)
|
||||
}
|
||||
|
||||
if opts.Roots == nil {
|
||||
opts.Roots = systemRootsPool()
|
||||
if opts.Roots == nil {
|
||||
return nil, SystemRootsError{}
|
||||
}
|
||||
}
|
||||
|
||||
err = c.isValid(leafCertificate, nil, &opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(opts.DNSName) > 0 {
|
||||
err = c.VerifyHostname(opts.DNSName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
candidateChains, err := c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
keyUsages := opts.KeyUsages
|
||||
if len(keyUsages) == 0 {
|
||||
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
|
||||
}
|
||||
|
||||
// If any key usage is acceptable then we're done.
|
||||
for _, usage := range keyUsages {
|
||||
if usage == ExtKeyUsageAny {
|
||||
chains = candidateChains
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, candidate := range candidateChains {
|
||||
if checkChainForKeyUsage(candidate, keyUsages) {
|
||||
chains = append(chains, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
if len(chains) == 0 {
|
||||
err = CertificateInvalidError{c, IncompatibleUsage}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
|
||||
n := make([]*Certificate, len(chain)+1)
|
||||
copy(n, chain)
|
||||
n[len(chain)] = cert
|
||||
return n
|
||||
}
|
||||
|
||||
func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c)
|
||||
for _, rootNum := range possibleRoots {
|
||||
root := opts.Roots.certs[rootNum]
|
||||
err = root.isValid(rootCertificate, currentChain, opts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
chains = append(chains, appendToFreshChain(currentChain, root))
|
||||
}
|
||||
|
||||
possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c)
|
||||
nextIntermediate:
|
||||
for _, intermediateNum := range possibleIntermediates {
|
||||
intermediate := opts.Intermediates.certs[intermediateNum]
|
||||
for _, cert := range currentChain {
|
||||
if cert == intermediate {
|
||||
continue nextIntermediate
|
||||
}
|
||||
}
|
||||
err = intermediate.isValid(intermediateCertificate, currentChain, opts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var childChains [][]*Certificate
|
||||
childChains, ok := cache[intermediateNum]
|
||||
if !ok {
|
||||
childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts)
|
||||
cache[intermediateNum] = childChains
|
||||
}
|
||||
chains = append(chains, childChains...)
|
||||
}
|
||||
|
||||
if len(chains) > 0 {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if len(chains) == 0 && err == nil {
|
||||
hintErr := rootErr
|
||||
hintCert := failedRoot
|
||||
if hintErr == nil {
|
||||
hintErr = intermediateErr
|
||||
hintCert = failedIntermediate
|
||||
}
|
||||
err = UnknownAuthorityError{c, hintErr, hintCert}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func matchHostnames(pattern, host string) bool {
|
||||
if len(pattern) == 0 || len(host) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
patternParts := strings.Split(pattern, ".")
|
||||
hostParts := strings.Split(host, ".")
|
||||
|
||||
if len(patternParts) != len(hostParts) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, patternPart := range patternParts {
|
||||
if patternPart == "*" {
|
||||
continue
|
||||
}
|
||||
if patternPart != hostParts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
|
||||
// an explicitly ASCII function to avoid any sharp corners resulting from
|
||||
// performing Unicode operations on DNS labels.
|
||||
func toLowerCaseASCII(in string) string {
|
||||
// If the string is already lower-case then there's nothing to do.
|
||||
isAlreadyLowerCase := true
|
||||
for _, c := range in {
|
||||
if c == utf8.RuneError {
|
||||
// If we get a UTF-8 error then there might be
|
||||
// upper-case ASCII bytes in the invalid sequence.
|
||||
isAlreadyLowerCase = false
|
||||
break
|
||||
}
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
isAlreadyLowerCase = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isAlreadyLowerCase {
|
||||
return in
|
||||
}
|
||||
|
||||
out := []byte(in)
|
||||
for i, c := range out {
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
out[i] += 'a' - 'A'
|
||||
}
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// VerifyHostname returns nil if c is a valid certificate for the named host.
|
||||
// Otherwise it returns an error describing the mismatch.
|
||||
func (c *Certificate) VerifyHostname(h string) error {
|
||||
// IP addresses may be written in [ ].
|
||||
candidateIP := h
|
||||
if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
|
||||
candidateIP = h[1 : len(h)-1]
|
||||
}
|
||||
if ip := net.ParseIP(candidateIP); ip != nil {
|
||||
// We only match IP addresses against IP SANs.
|
||||
// https://tools.ietf.org/html/rfc6125#appendix-B.2
|
||||
for _, candidate := range c.IPAddresses {
|
||||
if ip.Equal(candidate) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return HostnameError{c, candidateIP}
|
||||
}
|
||||
|
||||
lowered := toLowerCaseASCII(h)
|
||||
|
||||
if len(c.DNSNames) > 0 {
|
||||
for _, match := range c.DNSNames {
|
||||
if matchHostnames(toLowerCaseASCII(match), lowered) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// If Subject Alt Name is given, we ignore the common name.
|
||||
} else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return HostnameError{c, h}
|
||||
}
|
||||
|
||||
func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
|
||||
usages := make([]ExtKeyUsage, len(keyUsages))
|
||||
copy(usages, keyUsages)
|
||||
|
||||
if len(chain) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
usagesRemaining := len(usages)
|
||||
|
||||
// We walk down the list and cross out any usages that aren't supported
|
||||
// by each certificate. If we cross out all the usages, then the chain
|
||||
// is unacceptable.
|
||||
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
cert := chain[i]
|
||||
if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
|
||||
// The certificate doesn't have any extended key usage specified.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if usage == ExtKeyUsageAny {
|
||||
// The certificate is explicitly good for any usage.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
const invalidUsage ExtKeyUsage = -1
|
||||
|
||||
NextRequestedUsage:
|
||||
for i, requestedUsage := range usages {
|
||||
if requestedUsage == invalidUsage {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if requestedUsage == usage {
|
||||
continue NextRequestedUsage
|
||||
} else if requestedUsage == ExtKeyUsageServerAuth &&
|
||||
(usage == ExtKeyUsageNetscapeServerGatedCrypto ||
|
||||
usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
|
||||
// In order to support COMODO
|
||||
// certificate chains, we have to
|
||||
// accept Netscape or Microsoft SGC
|
||||
// usages as equal to ServerAuth.
|
||||
continue NextRequestedUsage
|
||||
}
|
||||
}
|
||||
|
||||
usages[i] = invalidUsage
|
||||
usagesRemaining--
|
||||
if usagesRemaining == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
1622
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/x509.go
generated
vendored
Normal file
1622
Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/x509.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
*.sw[op]
|
||||
11
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.travis.yml
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
install:
|
||||
- go get github.com/bmizerany/assert
|
||||
script:
|
||||
- pushd $TRAVIS_BUILD_DIR
|
||||
- go test
|
||||
- popd
|
||||
notifications:
|
||||
email: false
|
||||
|
|
@ -1,7 +1,3 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
|
|
@ -9,13 +5,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
41
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/README.md
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
## go-httpclient
|
||||
|
||||
**requires Go 1.1+** as of `v0.4.0` the API has been completely re-written for Go 1.1 (for a Go
|
||||
1.0.x compatible release see [1adef50](https://github.com/mreiferson/go-httpclient/tree/1adef50))
|
||||
|
||||
[](http://travis-ci.org/mreiferson/go-httpclient)
|
||||
|
||||
Provides an HTTP Transport that implements the `RoundTripper` interface and
|
||||
can be used as a built in replacement for the standard library's, providing:
|
||||
|
||||
* connection timeouts
|
||||
* request timeouts
|
||||
|
||||
This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
|
||||
Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
transport := &httpclient.Transport{
|
||||
ConnectTimeout: 1*time.Second,
|
||||
RequestTimeout: 10*time.Second,
|
||||
ResponseHeaderTimeout: 5*time.Second,
|
||||
}
|
||||
defer transport.Close()
|
||||
|
||||
client := &http.Client{Transport: transport}
|
||||
req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
```
|
||||
|
||||
*Note:* you will want to re-use a single client object rather than creating one for each request, otherwise you will end up [leaking connections](https://code.google.com/p/go/issues/detail?id=4049#c3).
|
||||
|
||||
### Reference Docs
|
||||
|
||||
For API docs see [godoc](http://godoc.org/github.com/mreiferson/go-httpclient).
|
||||
237
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/httpclient.go
generated
vendored
Normal file
237
Godeps/_workspace/src/github.com/mreiferson/go-httpclient/httpclient.go
generated
vendored
Normal file
|
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
Provides an HTTP Transport that implements the `RoundTripper` interface and
|
||||
can be used as a built in replacement for the standard library's, providing:
|
||||
|
||||
* connection timeouts
|
||||
* request timeouts
|
||||
|
||||
This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
|
||||
Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
|
||||
*/
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// returns the current version of the package
|
||||
func Version() string {
|
||||
return "0.4.1"
|
||||
}
|
||||
|
||||
// Transport implements the RoundTripper interface and can be used as a replacement
|
||||
// for Go's built in http.Transport implementing end-to-end request timeouts.
|
||||
//
|
||||
// transport := &httpclient.Transport{
|
||||
// ConnectTimeout: 1*time.Second,
|
||||
// ResponseHeaderTimeout: 5*time.Second,
|
||||
// RequestTimeout: 10*time.Second,
|
||||
// }
|
||||
// defer transport.Close()
|
||||
//
|
||||
// client := &http.Client{Transport: transport}
|
||||
// req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
// resp, err := client.Do(req)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer resp.Body.Close()
|
||||
//
|
||||
type Transport struct {
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// *http.Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *url.URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// Dial specifies the dial function for creating TCP
|
||||
// connections. This will override the Transport's ConnectTimeout and
|
||||
// ReadWriteTimeout settings.
|
||||
// If Dial is nil, a dialer is generated on demand matching the Transport's
|
||||
// options.
|
||||
Dial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with
|
||||
// tls.Client. If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// DisableKeepAlives, if true, prevents re-use of TCP connections
|
||||
// between different HTTP requests.
|
||||
DisableKeepAlives bool
|
||||
|
||||
// DisableCompression, if true, prevents the Transport from
|
||||
// requesting compression with an "Accept-Encoding: gzip"
|
||||
// request header when the Request contains no existing
|
||||
// Accept-Encoding value. If the Transport requests gzip on
|
||||
// its own and gets a gzipped response, it's transparently
|
||||
// decoded in the Response.Body. However, if the user
|
||||
// explicitly requested gzip it is not automatically
|
||||
// uncompressed.
|
||||
DisableCompression bool
|
||||
|
||||
// MaxIdleConnsPerHost, if non-zero, controls the maximum idle
|
||||
// (keep-alive) to keep per-host. If zero,
|
||||
// http.DefaultMaxIdleConnsPerHost is used.
|
||||
MaxIdleConnsPerHost int
|
||||
|
||||
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
|
||||
// a connect to complete.
|
||||
ConnectTimeout time.Duration
|
||||
|
||||
// ResponseHeaderTimeout, if non-zero, specifies the amount of
|
||||
// time to wait for a server's response headers after fully
|
||||
// writing the request (including its body, if any). This
|
||||
// time does not include the time to read the response body.
|
||||
ResponseHeaderTimeout time.Duration
|
||||
|
||||
// RequestTimeout, if non-zero, specifies the amount of time for the entire
|
||||
// request to complete (including all of the above timeouts + entire response body).
|
||||
// This should never be less than the sum total of the above two timeouts.
|
||||
RequestTimeout time.Duration
|
||||
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout time.Duration
|
||||
|
||||
// TCPWriteBufferSize, the size of the operating system's write
|
||||
// buffer associated with the connection.
|
||||
TCPWriteBufferSize int
|
||||
|
||||
// TCPReadBuffserSize, the size of the operating system's read
|
||||
// buffer associated with the connection.
|
||||
TCPReadBufferSize int
|
||||
|
||||
starter sync.Once
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// Close cleans up the Transport, currently a no-op
|
||||
func (t *Transport) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transport) lazyStart() {
|
||||
if t.Dial == nil {
|
||||
t.Dial = func(netw, addr string) (net.Conn, error) {
|
||||
c, err := net.DialTimeout(netw, addr, t.ConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t.TCPReadBufferSize != 0 || t.TCPWriteBufferSize != 0 {
|
||||
if tcpCon, ok := c.(*net.TCPConn); ok {
|
||||
if t.TCPWriteBufferSize != 0 {
|
||||
if err = tcpCon.SetWriteBuffer(t.TCPWriteBufferSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if t.TCPReadBufferSize != 0 {
|
||||
if err = tcpCon.SetReadBuffer(t.TCPReadBufferSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = errors.New("Not Tcp Connection")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if t.ReadWriteTimeout > 0 {
|
||||
timeoutConn := &rwTimeoutConn{
|
||||
TCPConn: c.(*net.TCPConn),
|
||||
rwTimeout: t.ReadWriteTimeout,
|
||||
}
|
||||
return timeoutConn, nil
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
t.transport = &http.Transport{
|
||||
Dial: t.Dial,
|
||||
Proxy: t.Proxy,
|
||||
TLSClientConfig: t.TLSClientConfig,
|
||||
DisableKeepAlives: t.DisableKeepAlives,
|
||||
DisableCompression: t.DisableCompression,
|
||||
MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: t.ResponseHeaderTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.CancelRequest(req)
|
||||
}
|
||||
|
||||
func (t *Transport) CloseIdleConnections() {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.CloseIdleConnections()
|
||||
}
|
||||
|
||||
func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.RegisterProtocol(scheme, rt)
|
||||
}
|
||||
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
if t.RequestTimeout > 0 {
|
||||
timer := time.AfterFunc(t.RequestTimeout, func() {
|
||||
t.transport.CancelRequest(req)
|
||||
})
|
||||
|
||||
resp, err = t.transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
timer.Stop()
|
||||
} else {
|
||||
resp.Body = &bodyCloseInterceptor{ReadCloser: resp.Body, timer: timer}
|
||||
}
|
||||
} else {
|
||||
resp, err = t.transport.RoundTrip(req)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type bodyCloseInterceptor struct {
|
||||
io.ReadCloser
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
func (bci *bodyCloseInterceptor) Close() error {
|
||||
bci.timer.Stop()
|
||||
return bci.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// A net.Conn that sets a deadline for every Read or Write operation
|
||||
type rwTimeoutConn struct {
|
||||
*net.TCPConn
|
||||
rwTimeout time.Duration
|
||||
}
|
||||
|
||||
func (c *rwTimeoutConn) Read(b []byte) (int, error) {
|
||||
err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.TCPConn.Read(b)
|
||||
}
|
||||
|
||||
func (c *rwTimeoutConn) Write(b []byte) (int, error) {
|
||||
err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.TCPConn.Write(b)
|
||||
}
|
||||
|
|
@ -1,453 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ocsp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOCSPDecode(t *testing.T) {
|
||||
responseBytes, _ := hex.DecodeString(ocspResponseHex)
|
||||
resp, err := ParseResponse(responseBytes, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expected := Response{
|
||||
Status: Good,
|
||||
SerialNumber: big.NewInt(0x1d0fa),
|
||||
RevocationReason: Unspecified,
|
||||
ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC),
|
||||
NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC),
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) {
|
||||
t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) {
|
||||
t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate)
|
||||
}
|
||||
|
||||
if resp.Status != expected.Status {
|
||||
t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status)
|
||||
}
|
||||
|
||||
if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 {
|
||||
t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber)
|
||||
}
|
||||
|
||||
if resp.RevocationReason != expected.RevocationReason {
|
||||
t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOCSPDecodeWithoutCert(t *testing.T) {
|
||||
responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex)
|
||||
_, err := ParseResponse(responseBytes, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOCSPSignature(t *testing.T) {
|
||||
issuerCert, _ := hex.DecodeString(startComHex)
|
||||
issuer, err := x509.ParseCertificate(issuerCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, _ := hex.DecodeString(ocspResponseHex)
|
||||
if _, err := ParseResponse(response, issuer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOCSPRequest(t *testing.T) {
|
||||
leafCert, _ := hex.DecodeString(leafCertHex)
|
||||
cert, err := x509.ParseCertificate(leafCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
issuerCert, _ := hex.DecodeString(issuerCertHex)
|
||||
issuer, err := x509.ParseCertificate(issuerCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
request, err := CreateRequest(cert, issuer, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedBytes, _ := hex.DecodeString(ocspRequestHex)
|
||||
if !bytes.Equal(request, expectedBytes) {
|
||||
t.Errorf("request: got %x, wanted %x", request, expectedBytes)
|
||||
}
|
||||
|
||||
decodedRequest, err := ParseRequest(expectedBytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if decodedRequest.HashAlgorithm != crypto.SHA1 {
|
||||
t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1)
|
||||
}
|
||||
|
||||
var publicKeyInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
PublicKey asn1.BitString
|
||||
}
|
||||
_, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
h := sha1.New()
|
||||
h.Write(publicKeyInfo.PublicKey.RightAlign())
|
||||
issuerKeyHash := h.Sum(nil)
|
||||
|
||||
h.Reset()
|
||||
h.Write(issuer.RawSubject)
|
||||
issuerNameHash := h.Sum(nil)
|
||||
|
||||
if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) {
|
||||
t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash)
|
||||
}
|
||||
|
||||
if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) {
|
||||
t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash)
|
||||
}
|
||||
|
||||
if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 {
|
||||
t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOCSPResponse(t *testing.T) {
|
||||
leafCert, _ := hex.DecodeString(leafCertHex)
|
||||
leaf, err := x509.ParseCertificate(leafCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
issuerCert, _ := hex.DecodeString(issuerCertHex)
|
||||
issuer, err := x509.ParseCertificate(issuerCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
responderCert, _ := hex.DecodeString(responderCertHex)
|
||||
responder, err := x509.ParseCertificate(responderCert)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex)
|
||||
responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
producedAt := time.Now().Truncate(time.Minute)
|
||||
thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC)
|
||||
nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC)
|
||||
template := Response{
|
||||
Status: Revoked,
|
||||
SerialNumber: leaf.SerialNumber,
|
||||
ThisUpdate: thisUpdate,
|
||||
NextUpdate: nextUpdate,
|
||||
RevokedAt: thisUpdate,
|
||||
RevocationReason: KeyCompromise,
|
||||
Certificate: responder,
|
||||
}
|
||||
|
||||
responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := ParseResponse(responseBytes, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) {
|
||||
t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, template.ThisUpdate)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) {
|
||||
t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) {
|
||||
t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt)
|
||||
}
|
||||
|
||||
if !resp.ProducedAt.Equal(producedAt) {
|
||||
t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt)
|
||||
}
|
||||
|
||||
if resp.Status != template.Status {
|
||||
t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status)
|
||||
}
|
||||
|
||||
if resp.SerialNumber.Cmp(template.SerialNumber) != 0 {
|
||||
t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber)
|
||||
}
|
||||
|
||||
if resp.RevocationReason != template.RevocationReason {
|
||||
t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason)
|
||||
}
|
||||
}
|
||||
|
||||
// This OCSP response was taken from Thawte's public OCSP responder.
|
||||
// To recreate:
|
||||
// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443
|
||||
// Copy and paste the first certificate into /tmp/cert.crt and the second into
|
||||
// /tmp/intermediate.crt
|
||||
// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der
|
||||
// Then hex encode the result:
|
||||
// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")'
|
||||
|
||||
const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" +
|
||||
"c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" +
|
||||
"6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" +
|
||||
"5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" +
|
||||
"2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" +
|
||||
"b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" +
|
||||
"30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" +
|
||||
"000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" +
|
||||
"fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" +
|
||||
"467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" +
|
||||
"4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" +
|
||||
"672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" +
|
||||
"d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" +
|
||||
"17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" +
|
||||
"e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" +
|
||||
"06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" +
|
||||
"040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" +
|
||||
"69676974616c204365727469666963617465205369676e696e6731383036060355040313" +
|
||||
"2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" +
|
||||
"746520536572766572204341301e170d3037313032353030323330365a170d3132313032" +
|
||||
"333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" +
|
||||
"617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" +
|
||||
"2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" +
|
||||
"0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" +
|
||||
"7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" +
|
||||
"a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" +
|
||||
"fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" +
|
||||
"4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" +
|
||||
"ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" +
|
||||
"3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" +
|
||||
"29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" +
|
||||
"01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" +
|
||||
"0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" +
|
||||
"bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" +
|
||||
"6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" +
|
||||
"55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" +
|
||||
"4469676974616c204365727469666963617465205369676e696e67312930270603550403" +
|
||||
"13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" +
|
||||
"0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" +
|
||||
"6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" +
|
||||
"6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" +
|
||||
"8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" +
|
||||
"2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" +
|
||||
"1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" +
|
||||
"c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" +
|
||||
"f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" +
|
||||
"a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" +
|
||||
"6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42"
|
||||
|
||||
const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" +
|
||||
"0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" +
|
||||
"3029060355040b1322536563757265204469676974616c20436572746966696361746520" +
|
||||
"5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" +
|
||||
"74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" +
|
||||
"3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" +
|
||||
"0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" +
|
||||
"74616c204365727469666963617465205369676e696e67313830360603550403132f5374" +
|
||||
"617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" +
|
||||
"53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" +
|
||||
"0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" +
|
||||
"4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" +
|
||||
"a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" +
|
||||
"3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" +
|
||||
"2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" +
|
||||
"48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" +
|
||||
"c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" +
|
||||
"c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" +
|
||||
"030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" +
|
||||
"b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" +
|
||||
"17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" +
|
||||
"05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" +
|
||||
"082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" +
|
||||
"667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" +
|
||||
"772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" +
|
||||
"3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" +
|
||||
"20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" +
|
||||
"687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" +
|
||||
"06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" +
|
||||
"696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" +
|
||||
"2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" +
|
||||
"e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" +
|
||||
"c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" +
|
||||
"46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" +
|
||||
"7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" +
|
||||
"ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" +
|
||||
"4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" +
|
||||
"7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" +
|
||||
"c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" +
|
||||
"434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" +
|
||||
"4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" +
|
||||
"214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" +
|
||||
"c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" +
|
||||
"4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" +
|
||||
"d2ae83a7a8ce5ba7"
|
||||
|
||||
const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" +
|
||||
"01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" +
|
||||
"3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" +
|
||||
"456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" +
|
||||
"1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" +
|
||||
"5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" +
|
||||
"82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" +
|
||||
"5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" +
|
||||
"8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" +
|
||||
"6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" +
|
||||
"0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" +
|
||||
"9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" +
|
||||
"20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" +
|
||||
"5a35fca2e054dfa8"
|
||||
|
||||
const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" +
|
||||
"c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" +
|
||||
"bcbb235d44ccc7dba62e72"
|
||||
|
||||
const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" +
|
||||
"864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" +
|
||||
"7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" +
|
||||
"333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" +
|
||||
"766572204341202d20436c617373203331493047060355040b13407777772e7665726973" +
|
||||
"69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" +
|
||||
"204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" +
|
||||
"170d3133313233313233353935395a3068310b3009060355040613025553311330110603" +
|
||||
"550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" +
|
||||
"173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" +
|
||||
"2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" +
|
||||
"818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" +
|
||||
"23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" +
|
||||
"5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" +
|
||||
"c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" +
|
||||
"82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" +
|
||||
"01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" +
|
||||
"7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" +
|
||||
"2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" +
|
||||
"726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" +
|
||||
"03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" +
|
||||
"30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" +
|
||||
"20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" +
|
||||
"092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" +
|
||||
"81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" +
|
||||
"67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" +
|
||||
"9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817"
|
||||
|
||||
const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" +
|
||||
"864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" +
|
||||
"566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" +
|
||||
"6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" +
|
||||
"1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" +
|
||||
"301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" +
|
||||
"0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" +
|
||||
"69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" +
|
||||
"3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" +
|
||||
"6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" +
|
||||
"5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" +
|
||||
"e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" +
|
||||
"bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" +
|
||||
"3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" +
|
||||
"f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" +
|
||||
"0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" +
|
||||
"2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" +
|
||||
"636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" +
|
||||
"030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" +
|
||||
"0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" +
|
||||
"a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" +
|
||||
"300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" +
|
||||
"adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" +
|
||||
"f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" +
|
||||
"e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" +
|
||||
"d68071"
|
||||
|
||||
// Key and certificate for the OCSP responder were not taken from the Thawte
|
||||
// responder, since CreateResponse requires that we have the private key.
|
||||
// Instead, they were generated randomly.
|
||||
const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" +
|
||||
"1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" +
|
||||
"1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" +
|
||||
"fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" +
|
||||
"c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" +
|
||||
"7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" +
|
||||
"1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" +
|
||||
"6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" +
|
||||
"72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" +
|
||||
"56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" +
|
||||
"575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" +
|
||||
"679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" +
|
||||
"05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" +
|
||||
"5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" +
|
||||
"1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" +
|
||||
"ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" +
|
||||
"daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" +
|
||||
"e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" +
|
||||
"07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" +
|
||||
"87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" +
|
||||
"327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" +
|
||||
"497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" +
|
||||
"6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" +
|
||||
"4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" +
|
||||
"7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" +
|
||||
"0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" +
|
||||
"d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" +
|
||||
"c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" +
|
||||
"c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" +
|
||||
"0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" +
|
||||
"120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" +
|
||||
"45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" +
|
||||
"48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" +
|
||||
"6bdaa059"
|
||||
|
||||
const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" +
|
||||
"150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" +
|
||||
"33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" +
|
||||
"526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" +
|
||||
"0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" +
|
||||
"c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" +
|
||||
"bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" +
|
||||
"3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" +
|
||||
"9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" +
|
||||
"285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" +
|
||||
"55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" +
|
||||
"a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" +
|
||||
"130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" +
|
||||
"06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" +
|
||||
"31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" +
|
||||
"9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" +
|
||||
"09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" +
|
||||
"d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" +
|
||||
"9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" +
|
||||
"66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" +
|
||||
"3a25439a94299a65a709756c7a3e568be049d5c38839"
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// bmpString returns s encoded in UCS-2 with a zero terminator.
|
||||
func bmpString(s string) ([]byte, error) {
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7292#appendix-B.1
|
||||
// http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
|
||||
// - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
|
||||
// EncodeRune returns 0xfffd if the rune does not need special encoding
|
||||
// - the above RFC provides the info that BMPStrings are NULL terminated.
|
||||
|
||||
ret := make([]byte, 0, 2*len(s)+2)
|
||||
|
||||
for _, r := range s {
|
||||
if t, _ := utf16.EncodeRune(r); t != 0xfffd {
|
||||
return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
|
||||
}
|
||||
ret = append(ret, byte(r/256), byte(r%256))
|
||||
}
|
||||
|
||||
return append(ret, 0, 0), nil
|
||||
}
|
||||
|
||||
func decodeBMPString(bmpString []byte) (string, error) {
|
||||
if len(bmpString)%2 != 0 {
|
||||
return "", errors.New("pkcs12: odd-length BMP string")
|
||||
}
|
||||
|
||||
// strip terminator if present
|
||||
if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
|
||||
bmpString = bmpString[:l-2]
|
||||
}
|
||||
|
||||
s := make([]uint16, 0, len(bmpString)/2)
|
||||
for len(bmpString) > 0 {
|
||||
s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
|
||||
bmpString = bmpString[2:]
|
||||
}
|
||||
|
||||
return string(utf16.Decode(s)), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
|
||||
"github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2"
|
||||
)
|
||||
|
||||
var (
|
||||
oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
|
||||
oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
|
||||
)
|
||||
|
||||
// pbeCipher is an abstraction of a PKCS#12 cipher.
|
||||
type pbeCipher interface {
|
||||
// create returns a cipher.Block given a key.
|
||||
create(key []byte) (cipher.Block, error)
|
||||
// deriveKey returns a key derived from the given password and salt.
|
||||
deriveKey(salt, password []byte, iterations int) []byte
|
||||
// deriveKey returns an IV derived from the given password and salt.
|
||||
deriveIV(salt, password []byte, iterations int) []byte
|
||||
}
|
||||
|
||||
type shaWithTripleDESCBC struct{}
|
||||
|
||||
func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
|
||||
return des.NewTripleDESCipher(key)
|
||||
}
|
||||
|
||||
func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
|
||||
}
|
||||
|
||||
func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
|
||||
}
|
||||
|
||||
type shaWith40BitRC2CBC struct{}
|
||||
|
||||
func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
|
||||
return rc2.New(key, len(key)*8)
|
||||
}
|
||||
|
||||
func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
|
||||
}
|
||||
|
||||
func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
|
||||
}
|
||||
|
||||
type pbeParams struct {
|
||||
Salt []byte
|
||||
Iterations int
|
||||
}
|
||||
|
||||
func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
|
||||
var cipherType pbeCipher
|
||||
|
||||
switch {
|
||||
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
|
||||
cipherType = shaWithTripleDESCBC{}
|
||||
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
|
||||
cipherType = shaWith40BitRC2CBC{}
|
||||
default:
|
||||
return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
|
||||
}
|
||||
|
||||
var params pbeParams
|
||||
if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
key := cipherType.deriveKey(params.Salt, password, params.Iterations)
|
||||
iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
|
||||
|
||||
block, err := cipherType.create(key)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
|
||||
}
|
||||
|
||||
func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
|
||||
cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encrypted := info.Data()
|
||||
if len(encrypted) == 0 {
|
||||
return nil, errors.New("pkcs12: empty encrypted data")
|
||||
}
|
||||
if len(encrypted)%blockSize != 0 {
|
||||
return nil, errors.New("pkcs12: input is not a multiple of the block size")
|
||||
}
|
||||
decrypted = make([]byte, len(encrypted))
|
||||
cbc.CryptBlocks(decrypted, encrypted)
|
||||
|
||||
psLen := int(decrypted[len(decrypted)-1])
|
||||
if psLen == 0 || psLen > blockSize {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
|
||||
if len(decrypted) < psLen {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
ps := decrypted[len(decrypted)-psLen:]
|
||||
decrypted = decrypted[:len(decrypted)-psLen]
|
||||
if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// decryptable abstracts a object that contains ciphertext.
|
||||
type decryptable interface {
|
||||
Algorithm() pkix.AlgorithmIdentifier
|
||||
Data() []byte
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrDecryption represents a failure to decrypt the input.
|
||||
ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
|
||||
|
||||
// ErrIncorrectPassword is returned when an incorrect password is detected.
|
||||
// Usually, P12/PFX data is signed to be able to verify the password.
|
||||
ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
|
||||
)
|
||||
|
||||
// NotImplementedError indicates that the input is not currently supported.
|
||||
type NotImplementedError string
|
||||
|
||||
func (e NotImplementedError) Error() string {
|
||||
return "pkcs12: " + string(e)
|
||||
}
|
||||
|
|
@ -1,3 +1,7 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package rc2 implements the RC2 cipher
|
||||
/*
|
||||
https://www.ietf.org/rfc/rfc2268.txt
|
||||
|
|
@ -27,7 +31,7 @@ func New(key []byte, t1 int) (cipher.Block, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (_ *rc2Cipher) BlockSize() int { return BlockSize }
|
||||
func (*rc2Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
var piTable = [256]byte{
|
||||
0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
)
|
||||
|
||||
type macData struct {
|
||||
Mac digestInfo
|
||||
MacSalt []byte
|
||||
Iterations int `asn1:"optional,default:1"`
|
||||
}
|
||||
|
||||
// from PKCS#7:
|
||||
type digestInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
Digest []byte
|
||||
}
|
||||
|
||||
var (
|
||||
oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
|
||||
)
|
||||
|
||||
func verifyMac(macData *macData, message, password []byte) error {
|
||||
if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
|
||||
return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
|
||||
}
|
||||
|
||||
key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
|
||||
|
||||
mac := hmac.New(sha1.New, key)
|
||||
mac.Write(message)
|
||||
expectedMAC := mac.Sum(nil)
|
||||
|
||||
if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
|
||||
return ErrIncorrectPassword
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var (
|
||||
one = big.NewInt(1)
|
||||
)
|
||||
|
||||
// sha1Sum returns the SHA-1 hash of in.
|
||||
func sha1Sum(in []byte) []byte {
|
||||
sum := sha1.Sum(in)
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
|
||||
// repeats of pattern.
|
||||
func fillWithRepeats(pattern []byte, v int) []byte {
|
||||
if len(pattern) == 0 {
|
||||
return nil
|
||||
}
|
||||
outputLen := v * ((len(pattern) + v - 1) / v)
|
||||
return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
|
||||
}
|
||||
|
||||
func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
|
||||
// implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
|
||||
|
||||
// Let H be a hash function built around a compression function f:
|
||||
|
||||
// Z_2^u x Z_2^v -> Z_2^u
|
||||
|
||||
// (that is, H has a chaining variable and output of length u bits, and
|
||||
// the message input to the compression function of H is v bits). The
|
||||
// values for u and v are as follows:
|
||||
|
||||
// HASH FUNCTION VALUE u VALUE v
|
||||
// MD2, MD5 128 512
|
||||
// SHA-1 160 512
|
||||
// SHA-224 224 512
|
||||
// SHA-256 256 512
|
||||
// SHA-384 384 1024
|
||||
// SHA-512 512 1024
|
||||
// SHA-512/224 224 1024
|
||||
// SHA-512/256 256 1024
|
||||
|
||||
// Furthermore, let r be the iteration count.
|
||||
|
||||
// We assume here that u and v are both multiples of 8, as are the
|
||||
// lengths of the password and salt strings (which we denote by p and s,
|
||||
// respectively) and the number n of pseudorandom bits required. In
|
||||
// addition, u and v are of course non-zero.
|
||||
|
||||
// For information on security considerations for MD5 [19], see [25] and
|
||||
// [1], and on those for MD2, see [18].
|
||||
|
||||
// The following procedure can be used to produce pseudorandom bits for
|
||||
// a particular "purpose" that is identified by a byte called "ID".
|
||||
// This standard specifies 3 different values for the ID byte:
|
||||
|
||||
// 1. If ID=1, then the pseudorandom bits being produced are to be used
|
||||
// as key material for performing encryption or decryption.
|
||||
|
||||
// 2. If ID=2, then the pseudorandom bits being produced are to be used
|
||||
// as an IV (Initial Value) for encryption or decryption.
|
||||
|
||||
// 3. If ID=3, then the pseudorandom bits being produced are to be used
|
||||
// as an integrity key for MACing.
|
||||
|
||||
// 1. Construct a string, D (the "diversifier"), by concatenating v/8
|
||||
// copies of ID.
|
||||
var D []byte
|
||||
for i := 0; i < v; i++ {
|
||||
D = append(D, ID)
|
||||
}
|
||||
|
||||
// 2. Concatenate copies of the salt together to create a string S of
|
||||
// length v(ceiling(s/v)) bits (the final copy of the salt may be
|
||||
// truncated to create S). Note that if the salt is the empty
|
||||
// string, then so is S.
|
||||
|
||||
S := fillWithRepeats(salt, v)
|
||||
|
||||
// 3. Concatenate copies of the password together to create a string P
|
||||
// of length v(ceiling(p/v)) bits (the final copy of the password
|
||||
// may be truncated to create P). Note that if the password is the
|
||||
// empty string, then so is P.
|
||||
|
||||
P := fillWithRepeats(password, v)
|
||||
|
||||
// 4. Set I=S||P to be the concatenation of S and P.
|
||||
I := append(S, P...)
|
||||
|
||||
// 5. Set c=ceiling(n/u).
|
||||
c := (size + u - 1) / u
|
||||
|
||||
// 6. For i=1, 2, ..., c, do the following:
|
||||
A := make([]byte, c*20)
|
||||
var IjBuf []byte
|
||||
for i := 0; i < c; i++ {
|
||||
// A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
|
||||
// H(H(H(... H(D||I))))
|
||||
Ai := hash(append(D, I...))
|
||||
for j := 1; j < r; j++ {
|
||||
Ai = hash(Ai)
|
||||
}
|
||||
copy(A[i*20:], Ai[:])
|
||||
|
||||
if i < c-1 { // skip on last iteration
|
||||
// B. Concatenate copies of Ai to create a string B of length v
|
||||
// bits (the final copy of Ai may be truncated to create B).
|
||||
var B []byte
|
||||
for len(B) < v {
|
||||
B = append(B, Ai[:]...)
|
||||
}
|
||||
B = B[:v]
|
||||
|
||||
// C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
|
||||
// blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
|
||||
// setting I_j=(I_j+B+1) mod 2^v for each j.
|
||||
{
|
||||
Bbi := new(big.Int).SetBytes(B)
|
||||
Ij := new(big.Int)
|
||||
|
||||
for j := 0; j < len(I)/v; j++ {
|
||||
Ij.SetBytes(I[j*v : (j+1)*v])
|
||||
Ij.Add(Ij, Bbi)
|
||||
Ij.Add(Ij, one)
|
||||
Ijb := Ij.Bytes()
|
||||
// We expect Ijb to be exactly v bytes,
|
||||
// if it is longer or shorter we must
|
||||
// adjust it accordingly.
|
||||
if len(Ijb) > v {
|
||||
Ijb = Ijb[len(Ijb)-v:]
|
||||
}
|
||||
if len(Ijb) < v {
|
||||
if IjBuf == nil {
|
||||
IjBuf = make([]byte, v)
|
||||
}
|
||||
bytesShort := v - len(Ijb)
|
||||
for i := 0; i < bytesShort; i++ {
|
||||
IjBuf[i] = 0
|
||||
}
|
||||
copy(IjBuf[bytesShort:], Ijb)
|
||||
Ijb = IjBuf
|
||||
}
|
||||
copy(I[j*v:(j+1)*v], Ijb)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
|
||||
// bit string, A.
|
||||
|
||||
// 8. Use the first n bits of A as the output of this entire process.
|
||||
return A[:size]
|
||||
|
||||
// If the above process is being used to generate a DES key, the process
|
||||
// should be used to create 64 random bits, and the key's parity bits
|
||||
// should be set after the 64 bits have been produced. Similar concerns
|
||||
// hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
|
||||
// similar keys with parity bits "built into them".
|
||||
}
|
||||
|
|
@ -0,0 +1,342 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pkcs12 implements some of PKCS#12.
|
||||
//
|
||||
// This implementation is distilled from https://tools.ietf.org/html/rfc7292
|
||||
// and referenced documents. It is intended for decoding P12/PFX-stored
|
||||
// certificates and keys for use with the crypto/tls package.
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
|
||||
oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
|
||||
|
||||
oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
|
||||
oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
|
||||
oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
|
||||
)
|
||||
|
||||
type pfxPdu struct {
|
||||
Version int
|
||||
AuthSafe contentInfo
|
||||
MacData macData `asn1:"optional"`
|
||||
}
|
||||
|
||||
type contentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
|
||||
}
|
||||
|
||||
type encryptedData struct {
|
||||
Version int
|
||||
EncryptedContentInfo encryptedContentInfo
|
||||
}
|
||||
|
||||
type encryptedContentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
|
||||
return i.ContentEncryptionAlgorithm
|
||||
}
|
||||
|
||||
func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
|
||||
|
||||
type safeBag struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `asn1:"tag:0,explicit"`
|
||||
Attributes []pkcs12Attribute `asn1:"set,optional"`
|
||||
}
|
||||
|
||||
type pkcs12Attribute struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `ans1:"set"`
|
||||
}
|
||||
|
||||
type encryptedPrivateKeyInfo struct {
|
||||
AlgorithmIdentifier pkix.AlgorithmIdentifier
|
||||
EncryptedData []byte
|
||||
}
|
||||
|
||||
func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
|
||||
return i.AlgorithmIdentifier
|
||||
}
|
||||
|
||||
func (i encryptedPrivateKeyInfo) Data() []byte {
|
||||
return i.EncryptedData
|
||||
}
|
||||
|
||||
// PEM block types
|
||||
const (
|
||||
certificateType = "CERTIFICATE"
|
||||
privateKeyType = "PRIVATE KEY"
|
||||
)
|
||||
|
||||
// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
|
||||
// trailing data after unmarshaling.
|
||||
func unmarshal(in []byte, out interface{}) error {
|
||||
trailing, err := asn1.Unmarshal(in, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(trailing) != 0 {
|
||||
return errors.New("pkcs12: trailing data found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks.
|
||||
func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
|
||||
encodedPassword, err := bmpString(password)
|
||||
if err != nil {
|
||||
return nil, ErrIncorrectPassword
|
||||
}
|
||||
|
||||
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
|
||||
|
||||
blocks := make([]*pem.Block, 0, len(bags))
|
||||
for _, bag := range bags {
|
||||
block, err := convertBag(&bag, encodedPassword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
|
||||
block := &pem.Block{
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
|
||||
for _, attribute := range bag.Attributes {
|
||||
k, v, err := convertAttribute(&attribute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.Headers[k] = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case bag.Id.Equal(oidCertBag):
|
||||
block.Type = certificateType
|
||||
certsData, err := decodeCertBag(bag.Value.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.Bytes = certsData
|
||||
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
|
||||
block.Type = privateKeyType
|
||||
|
||||
key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
block.Bytes = x509.MarshalPKCS1PrivateKey(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
block.Bytes, err = x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
|
||||
isString := false
|
||||
|
||||
switch {
|
||||
case attribute.Id.Equal(oidFriendlyName):
|
||||
key = "friendlyName"
|
||||
isString = true
|
||||
case attribute.Id.Equal(oidLocalKeyID):
|
||||
key = "localKeyId"
|
||||
case attribute.Id.Equal(oidMicrosoftCSPName):
|
||||
// This key is chosen to match OpenSSL.
|
||||
key = "Microsoft CSP Name"
|
||||
isString = true
|
||||
default:
|
||||
return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
|
||||
}
|
||||
|
||||
if isString {
|
||||
if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
var id []byte
|
||||
if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value = hex.EncodeToString(id)
|
||||
}
|
||||
|
||||
return key, value, nil
|
||||
}
|
||||
|
||||
// Decode extracts a certificate and private key from pfxData. This function
|
||||
// assumes that there is only one certificate and only one private key in the
|
||||
// pfxData.
|
||||
func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
|
||||
encodedPassword, err := bmpString(password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(bags) != 2 {
|
||||
err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
|
||||
return
|
||||
}
|
||||
|
||||
for _, bag := range bags {
|
||||
switch {
|
||||
case bag.Id.Equal(oidCertBag):
|
||||
if certificate != nil {
|
||||
err = errors.New("pkcs12: expected exactly one certificate bag")
|
||||
}
|
||||
|
||||
certsData, err := decodeCertBag(bag.Value.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
certs, err := x509.ParseCertificates(certsData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(certs) != 1 {
|
||||
err = errors.New("pkcs12: expected exactly one certificate in the certBag")
|
||||
return nil, nil, err
|
||||
}
|
||||
certificate = certs[0]
|
||||
|
||||
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
|
||||
if privateKey != nil {
|
||||
err = errors.New("pkcs12: expected exactly one key bag")
|
||||
}
|
||||
|
||||
if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if certificate == nil {
|
||||
return nil, nil, errors.New("pkcs12: certificate missing")
|
||||
}
|
||||
if privateKey == nil {
|
||||
return nil, nil, errors.New("pkcs12: private key missing")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
|
||||
pfx := new(pfxPdu)
|
||||
if err := unmarshal(p12Data, pfx); err != nil {
|
||||
return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
|
||||
}
|
||||
|
||||
if pfx.Version != 3 {
|
||||
return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
|
||||
}
|
||||
|
||||
if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
|
||||
return nil, nil, NotImplementedError("only password-protected PFX is implemented")
|
||||
}
|
||||
|
||||
// unmarshal the explicit bytes in the content for type 'data'
|
||||
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
|
||||
return nil, nil, errors.New("pkcs12: no MAC in data")
|
||||
}
|
||||
|
||||
if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
|
||||
if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
|
||||
// some implementations use an empty byte array
|
||||
// for the empty string password try one more
|
||||
// time with empty-empty password
|
||||
password = nil
|
||||
err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var authenticatedSafe []contentInfo
|
||||
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(authenticatedSafe) != 2 {
|
||||
return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
|
||||
}
|
||||
|
||||
for _, ci := range authenticatedSafe {
|
||||
var data []byte
|
||||
|
||||
switch {
|
||||
case ci.ContentType.Equal(oidDataContentType):
|
||||
if err := unmarshal(ci.Content.Bytes, &data); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case ci.ContentType.Equal(oidEncryptedDataContentType):
|
||||
var encryptedData encryptedData
|
||||
if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if encryptedData.Version != 0 {
|
||||
return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
|
||||
}
|
||||
if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
|
||||
}
|
||||
|
||||
var safeContents []safeBag
|
||||
if err := unmarshal(data, &safeContents); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bags = append(bags, safeContents...)
|
||||
}
|
||||
|
||||
return bags, password, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// see https://tools.ietf.org/html/rfc7292#appendix-D
|
||||
oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
|
||||
oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
|
||||
oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
|
||||
)
|
||||
|
||||
type certBag struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Data []byte `asn1:"tag:0,explicit"`
|
||||
}
|
||||
|
||||
func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
|
||||
pkinfo := new(encryptedPrivateKeyInfo)
|
||||
if err = unmarshal(asn1Data, pkinfo); err != nil {
|
||||
return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
|
||||
}
|
||||
|
||||
pkData, err := pbDecrypt(pkinfo, password)
|
||||
if err != nil {
|
||||
return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
|
||||
}
|
||||
|
||||
ret := new(asn1.RawValue)
|
||||
if err = unmarshal(pkData, ret); err != nil {
|
||||
return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
|
||||
}
|
||||
|
||||
if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
|
||||
return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
|
||||
}
|
||||
|
||||
return privateKey, nil
|
||||
}
|
||||
|
||||
func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
|
||||
bag := new(certBag)
|
||||
if err := unmarshal(asn1Data, bag); err != nil {
|
||||
return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
|
||||
}
|
||||
if !bag.Id.Equal(oidCertTypeX509Certificate) {
|
||||
return nil, NotImplementedError("only X509 certificates are supported")
|
||||
}
|
||||
return bag.Data, nil
|
||||
}
|
||||
|
|
@ -29,16 +29,6 @@ import (
|
|||
"github.com/letsencrypt/boulder/sa"
|
||||
)
|
||||
|
||||
type cacheCtrlHandler struct {
|
||||
http.Handler
|
||||
MaxAge time.Duration
|
||||
}
|
||||
|
||||
func (c *cacheCtrlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d", c.MaxAge/time.Second))
|
||||
c.Handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
/*
|
||||
DBSource maps a given Database schema to a CA Key Hash, so we can pick
|
||||
from among them when presented with OCSP requests for different certs.
|
||||
|
|
@ -181,8 +171,7 @@ func main() {
|
|||
killTimeout, err := time.ParseDuration(c.OCSPResponder.ShutdownKillTimeout)
|
||||
cmd.FailOnError(err, "Couldn't parse shutdown kill timeout")
|
||||
|
||||
m := http.StripPrefix(c.OCSPResponder.Path,
|
||||
handler(source, c.OCSPResponder.MaxAge.Duration))
|
||||
m := http.StripPrefix(c.OCSPResponder.Path, cfocsp.NewResponder(source))
|
||||
|
||||
httpMonitor := metrics.NewHTTPMonitor(stats, m, "OCSP")
|
||||
srv := &http.Server{
|
||||
|
|
@ -201,10 +190,3 @@ func main() {
|
|||
|
||||
app.Run()
|
||||
}
|
||||
|
||||
func handler(src cfocsp.Source, maxAge time.Duration) http.Handler {
|
||||
return &cacheCtrlHandler{
|
||||
Handler: cfocsp.Responder{Source: src},
|
||||
MaxAge: maxAge,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,22 +21,6 @@ import (
|
|||
"github.com/letsencrypt/boulder/test/vars"
|
||||
)
|
||||
|
||||
func TestCacheControl(t *testing.T) {
|
||||
src := make(cfocsp.InMemorySource)
|
||||
h := handler(src, 10*time.Second)
|
||||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("GET", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
expected := "max-age=10"
|
||||
actual := w.Header().Get("Cache-Control")
|
||||
if actual != expected {
|
||||
t.Errorf("Cache-Control value: want %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
req = mustRead("./testdata/ocsp.req")
|
||||
resp = mustRead("./testdata/ocsp.resp")
|
||||
|
|
@ -50,7 +34,7 @@ func TestHandler(t *testing.T) {
|
|||
src := make(cfocsp.InMemorySource)
|
||||
src[ocspReq.SerialNumber.String()] = resp
|
||||
|
||||
h := handler(src, 10*time.Second)
|
||||
h := cfocsp.NewResponder(src)
|
||||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("POST", "/", bytes.NewReader(req))
|
||||
if err != nil {
|
||||
|
|
@ -93,7 +77,7 @@ func TestDBHandler(t *testing.T) {
|
|||
t.Fatalf("unable to insert response: %s", err)
|
||||
}
|
||||
|
||||
h := handler(src, 10*time.Second)
|
||||
h := cfocsp.NewResponder(src)
|
||||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("POST", "/", bytes.NewReader(req))
|
||||
if err != nil {
|
||||
|
|
|
|||
Loading…
Reference in New Issue