diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index c562b73b2..dfd1e84fa 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -70,10 +70,6 @@ "Comment": "1.2.0-64-ge1712f3", "Rev": "e1712f381785e32046927f64a7c86fe569203196" }, - { - "ImportPath": "github.com/dgryski/go-rc2", - "Rev": "fd90a5fcd260ebe709a689d0bdca2043afffabfa" - }, { "ImportPath": "github.com/facebookgo/clock", "Rev": "600d898af40aa09a7a93ecb9265d87b0504b6f03" @@ -99,6 +95,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "a1dfa5ef89a13a0aa4be5a6f81179db10bfeea36" }, + { + "ImportPath": "github.com/google/certificate-transparency/go", + "Rev": "72d5367bd7ff1f4401c5649817dca766b668e322" + }, { "ImportPath": "github.com/jmhodges/clock", "Rev": "3c4ebd218625c9364c33db6d39c276d80c3090c6" @@ -124,13 +124,21 @@ "ImportPath": "github.com/miekg/pkcs11", "Rev": "88c9f842544e629ec046105d7fb50d5daafae737" }, + { + "ImportPath": "github.com/mreiferson/go-httpclient", + "Rev": "63fe23f7434723dc904c901043af07931f293c47" + }, { "ImportPath": "github.com/streadway/amqp", "Rev": "150b7f24d6ad507e6026c13d85ce1f1391ac7400" }, { "ImportPath": "golang.org/x/crypto/ocsp", - "Rev": "287a1d87db5d649b01d6193bd9d07e909f08094c" + "Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc" + }, + { + "ImportPath": "golang.org/x/crypto/pkcs12", + "Rev": "beef0f4390813b96e8e68fd78570396d0f4751fc" }, { "ImportPath": "golang.org/x/net/publicsuffix", diff --git a/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/helpers.go b/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/helpers.go index f69b1db06..d03586d27 100644 --- a/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/helpers.go +++ b/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/helpers.go @@ -21,7 +21,7 @@ import ( cferr "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/errors" "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/helpers/derhelpers" "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log" - "golang.org/x/crypto/pkcs12" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" ) // OneYear is a time.Duration representing a year's worth of seconds. diff --git a/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer/local/local.go b/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer/local/local.go index 5a4ba6d61..907ff5ed6 100644 --- a/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer/local/local.go +++ b/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer/local/local.go @@ -26,8 +26,8 @@ import ( "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/log" "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/cloudflare/cfssl/signer" - "github.com/google/certificate-transparency/go" - "github.com/google/certificate-transparency/go/client" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client" ) // Signer contains a signer that uses the standard library to diff --git a/Godeps/_workspace/src/github.com/dgryski/go-rc2/bench_test.go b/Godeps/_workspace/src/github.com/dgryski/go-rc2/bench_test.go deleted file mode 100644 index 639b4cb9c..000000000 --- a/Godeps/_workspace/src/github.com/dgryski/go-rc2/bench_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package rc2 - -import ( - "testing" - - ebfe "github.com/ebfe/rc2" -) - -func BenchmarkEncrypt(b *testing.B) { - r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Encrypt(src[:], src[:]) - } -} -func BenchmarkEbfeEncrypt(b *testing.B) { - r, _ := ebfe.NewCipher([]byte{0, 0, 0, 0, 0, 0, 0, 0}) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Encrypt(src[:], src[:]) - } -} -func BenchmarkDecrypt(b *testing.B) { - r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Decrypt(src[:], src[:]) - } -} -func BenchmarkEbfeDecrypt(b *testing.B) { - r, _ := ebfe.NewCipher([]byte{0, 0, 0, 0, 0, 0, 0, 0}) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Decrypt(src[:], src[:]) - } -} diff --git a/Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2_test.go b/Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2_test.go deleted file mode 100644 index 237c3d159..000000000 --- a/Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package rc2 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -func TestEncryptDecrypt(t *testing.T) { - - // TODO(dgryski): add the rest of the test vectors from the RFC - var tests = []struct { - key string - plain string - cipher string - t1 int - }{ - { - "0000000000000000", - "0000000000000000", - "ebb773f993278eff", - 63, - }, - { - "ffffffffffffffff", - "ffffffffffffffff", - "278b27e42e2f0d49", - 64, - }, - { - "3000000000000000", - "1000000000000001", - "30649edf9be7d2c2", - 64, - }, - { - "88", - "0000000000000000", - "61a8a244adacccf0", - 64, - }, - { - "88bca90e90875a", - "0000000000000000", - "6ccf4308974c267f", - 64, - }, - { - "88bca90e90875a7f0f79c384627bafb2", - "0000000000000000", - "1a807d272bbe5db1", - 64, - }, - { - "88bca90e90875a7f0f79c384627bafb2", - "0000000000000000", - "2269552ab0f85ca6", - 128, - }, - { - "88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e", - "0000000000000000", - "5b78d3a43dfff1f1", - 129, - }, - } - - for _, tt := range tests { - k, _ := hex.DecodeString(tt.key) - p, _ := hex.DecodeString(tt.plain) - c, _ := hex.DecodeString(tt.cipher) - - b, _ := New(k, tt.t1) - - var dst [8]byte - - b.Encrypt(dst[:], p) - - if !bytes.Equal(dst[:], c) { - t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c) - } - - b.Decrypt(dst[:], c) - - if !bytes.Equal(dst[:], p) { - t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p) - } - } -} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/README.md b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/README.md new file mode 100644 index 000000000..82c5d1b3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/README.md @@ -0,0 +1,25 @@ +This is the really early beginnings of a certificate transparency log +client written in Go, along with a log scanner tool. + +You'll need go v1.1 or higher to compile. + +# Installation + +This go code must be imported into your go workspace before you can +use it, which can be done with: + + go get github.com/google/certificate-transparency/go/client + go get github.com/google/certificate-transparency/go/scanner + etc. + +# Building the binaries + +To compile the log scanner run: + + go build github.com/google/certificate-transparency/go/scanner/main/scanner.go + +# Contributing + +When sending pull requests, please ensure that everything's been run +through ```gofmt``` beforehand so we can keep everything nice and +tidy. diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/asn1.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/asn1.go new file mode 100644 index 000000000..e98747795 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/asn1.go @@ -0,0 +1,956 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +// +// START CT CHANGES +// This is a fork of the Go standard library ASN.1 implementation +// (encoding/asn1). The main difference is that this version tries to correct +// for errors (e.g. use of tagPrintableString when the string data is really +// ISO8859-1 - a common error present in many x509 certificates in the wild.) +// END CT CHANGES +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + // START CT CHANGES + "errors" + "fmt" + // END CT CHANGES + "math/big" + "reflect" + // START CT CHANGES + "strings" + // END CT CHANGES + "time" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string +} + +func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string +} + +func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean"} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean"} + } + + return +} + +// INTEGER + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret int64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large"} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte) (int32, error) { + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large"} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte) *big.Int { + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret + } + ret.SetBytes(bytes) + return ret +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING"} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 4 { + err = StructuralError{"base 128 integer too large"} + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = SyntaxError{"truncated base 128 integer"} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + ret, err = time.Parse("0601021504Z0700", s) + if err != nil { + ret, err = time.Parse("060102150405Z0700", s) + } + if err == nil && ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + return time.Parse("20060102150405Z0700", string(bytes)) +} + +// PrintableString + +// parsePrintableString parses a ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b) { + err = SyntaxError{"PrintableString contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// isPrintable returns true iff the given b is in the ASN.1 PrintableString set. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' +} + +// IA5String + +// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if b >= 0x80 { + err = SyntaxError{"IA5String contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { + offset = initOffset + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)"} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large"} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length"} + return + } + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { + expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice"} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + // We pretend that GENERAL STRINGs are PRINTABLE STRINGs so + // that a sequence of them can be parsed into a []string. + if t.tag == tagGeneralString { + t.tag = tagPrintableString + } + if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag { + err = StructuralError{"sequence tag mismatch"} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence"} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength returns true iff offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// START CT CHANGES + +// Tests whether the data in |bytes| would be a valid ISO8859-1 string. +// Clearly, a sequence of bytes comprised solely of valid ISO8859-1 +// codepoints does not imply that the encoding MUST be ISO8859-1, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeISO8859_1(bytes []byte) bool { + for _, b := range bytes { + if b < 0x20 || (b >= 0x7F && b < 0xA0) { + return false + } + } + return true +} + +// Checks whether the data in |bytes| would be a valid T.61 string. +// Clearly, a sequence of bytes comprised solely of valid T.61 +// codepoints does not imply that the encoding MUST be T.61, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeT61(bytes []byte) bool { + for _, b := range bytes { + switch b { + case 0x00: + // Since we're guessing at (incorrect) encodings for a + // PrintableString, we'll err on the side of caution and disallow + // strings with a NUL in them, don't want to re-create a PayPal NUL + // situation in monitors. + fallthrough + case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF, + 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF: + // These are all invalid code points in T.61, so it can't be a T.61 string. + return false + } + } + return true +} + +// Converts the data in |bytes| to the equivalent UTF-8 string. +func iso8859_1ToUTF8(bytes []byte) string { + buf := make([]rune, len(bytes)) + for i, b := range bytes { + buf[i] = rune(b) + } + return string(buf) +} + +// END CT CHANGES + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated"} + } + return + } + + // Deal with raw values. + if fieldType == rawValueType { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} + offset += t.length + v.Set(reflect.ValueOf(result)) + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + var result interface{} + if !t.isCompound && t.class == classUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case tagPrintableString: + result, err = parsePrintableString(innerBytes) + // START CT CHANGES + if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") { + // Probably an ISO8859-1 string stuffed in, check if it + // would be valid and assume that's what's happened if so, + // otherwise try T.61, failing that give up and just assign + // the bytes + switch { + case couldBeISO8859_1(innerBytes): + result, err = iso8859_1ToUTF8(innerBytes), nil + case couldBeT61(innerBytes): + result, err = parseT61String(innerBytes) + default: + result = nil + err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.") + } + } + // END CT CHANGES + case tagIA5String: + result, err = parseIA5String(innerBytes) + case tagT61String: + result, err = parseT61String(innerBytes) + case tagUTF8String: + result, err = parseUTF8String(innerBytes) + case tagInteger: + result, err = parseInt64(innerBytes) + case tagBitString: + result, err = parseBitString(innerBytes) + case tagOID: + result, err = parseObjectIdentifier(innerBytes) + case tagUTCTime: + result, err = parseUTCTime(innerBytes) + case tagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} + return + } + + t, offset, err := parseTagAndLength(bytes, offset) + if err != nil { + return + } + if params.explicit { + expectedClass := classContextSpecific + if params.application { + expectedClass = classApplication + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag"} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match"} + } + return + } + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == tagPrintableString { + switch t.tag { + case tagIA5String, tagGeneralString, tagT61String, tagUTF8String: + universalTag = t.tag + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == tagUTCTime && t.tag == tagGeneralizedTime { + universalTag = tagGeneralizedTime + } + + expectedClass := classUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = classContextSpecific + expectedTag = *params.tag + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = classApplication + expectedTag = *params.tag + } + + // We have unwrapped any explicit tagging at this point. + if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == tagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt := parseBigInt(innerBytes) + v.Set(reflect.ValueOf(parsedInt)) + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case tagPrintableString: + v, err = parsePrintableString(innerBytes) + case tagIA5String: + v, err = parseIA5String(innerBytes) + case tagT61String: + v, err = parseT61String(innerBytes) + case tagUTF8String: + v, err = parseUTF8String(innerBytes) + case tagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String()} + return +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful is the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + switch val := v; val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString or IA5String can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// optional marks the field as ASN.1 OPTIONAL +// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// default:x sets the default value for optional integer fields +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/common.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/common.go new file mode 100644 index 000000000..33a117ece --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/common.go @@ -0,0 +1,163 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +const ( + tagBoolean = 1 + tagInteger = 2 + tagBitString = 3 + tagOctetString = 4 + tagOID = 6 + tagEnum = 10 + tagUTF8String = 12 + tagSequence = 16 + tagSet = 17 + tagPrintableString = 19 + tagT61String = 20 + tagIA5String = 22 + tagUTCTime = 23 + tagGeneralizedTime = 24 + tagGeneralString = 27 +) + +const ( + classUniversal = 0 + classApplication = 1 + classContextSpecific = 2 + classPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "ia5": + ret.stringType = tagIA5String + case part == "printable": + ret.stringType = tagPrintableString + case part == "utf8": + ret.stringType = tagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { + switch t { + case objectIdentifierType: + return tagOID, false, true + case bitStringType: + return tagBitString, false, true + case timeType: + return tagUTCTime, false, true + case enumeratedType: + return tagEnum, false, true + case bigIntType: + return tagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return tagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return tagInteger, false, true + case reflect.Struct: + return tagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return tagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return tagSet, true, true + } + return tagSequence, true, true + case reflect.String: + return tagPrintableString, false, true + } + return 0, false, false +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/marshal.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/marshal.go new file mode 100644 index 000000000..ed17e41a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1/marshal.go @@ -0,0 +1,581 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +// A forkableWriter is an in-memory buffer that can be +// 'forked' to create new forkableWriters that bracket the +// original. After +// pre, post := w.fork(); +// the overall sequence of bytes represented is logically w+pre+post. +type forkableWriter struct { + *bytes.Buffer + pre, post *forkableWriter +} + +func newForkableWriter() *forkableWriter { + return &forkableWriter{new(bytes.Buffer), nil, nil} +} + +func (f *forkableWriter) fork() (pre, post *forkableWriter) { + if f.pre != nil || f.post != nil { + panic("have already forked") + } + f.pre = newForkableWriter() + f.post = newForkableWriter() + return f.pre, f.post +} + +func (f *forkableWriter) Len() (l int) { + l += f.Buffer.Len() + if f.pre != nil { + l += f.pre.Len() + } + if f.post != nil { + l += f.post.Len() + } + return +} + +func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { + n, err = out.Write(f.Bytes()) + if err != nil { + return + } + + var nn int + + if f.pre != nil { + nn, err = f.pre.writeTo(out) + n += nn + if err != nil { + return + } + } + + if f.post != nil { + nn, err = f.post.writeTo(out) + n += nn + } + return +} + +func marshalBase128Int(out *forkableWriter, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +func marshalInt64(out *forkableWriter, i int64) (err error) { + n := int64Length(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + err = out.WriteByte(0xff) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + err = out.WriteByte(0x00) + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + err = out.WriteByte(0) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } + return +} + +func marshalLength(out *forkableWriter, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + err = out.WriteByte(b) + if err != nil { + return + } + err = marshalBase128Int(out, int64(t.tag)) + if err != nil { + return + } + } else { + b |= uint8(t.tag) + err = out.WriteByte(b) + if err != nil { + return + } + } + + if t.length >= 128 { + l := lengthLength(t.length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLength(out, t.length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(t.length)) + if err != nil { + return + } + } + + return nil +} + +func marshalBitString(out *forkableWriter, b BitString) (err error) { + paddingBits := byte((8 - b.BitLength%8) % 8) + err = out.WriteByte(paddingBits) + if err != nil { + return + } + _, err = out.Write(b.Bytes) + return +} + +func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return StructuralError{"invalid object identifier"} + } + + err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + return +} + +func marshalPrintableString(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if !isPrintable(c) { + return StructuralError{"PrintableString contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalIA5String(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if c > 127 { + return StructuralError{"IA5String contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalUTF8String(out *forkableWriter, s string) (err error) { + _, err = out.Write([]byte(s)) + return +} + +func marshalTwoDigits(out *forkableWriter, v int) (err error) { + err = out.WriteByte(byte('0' + (v/10)%10)) + if err != nil { + return + } + return out.WriteByte(byte('0' + v%10)) +} + +func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { + year, month, day := t.Date() + + switch { + case 1950 <= year && year < 2000: + err = marshalTwoDigits(out, int(year-1900)) + case 2000 <= year && year < 2050: + err = marshalTwoDigits(out, int(year-2000)) + default: + return StructuralError{"cannot represent time as UTCTime"} + } + if err != nil { + return + } + + err = marshalTwoDigits(out, int(month)) + if err != nil { + return + } + + err = marshalTwoDigits(out, day) + if err != nil { + return + } + + hour, min, sec := t.Clock() + + err = marshalTwoDigits(out, hour) + if err != nil { + return + } + + err = marshalTwoDigits(out, min) + if err != nil { + return + } + + err = marshalTwoDigits(out, sec) + if err != nil { + return + } + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + err = out.WriteByte('Z') + return + case offset > 0: + err = out.WriteByte('+') + case offset < 0: + err = out.WriteByte('-') + } + + if err != nil { + return + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + err = marshalTwoDigits(out, offsetMinutes/60) + if err != nil { + return + } + + err = marshalTwoDigits(out, offsetMinutes%60) + return +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0) + if err != nil { + return in + } + return in[offset:] +} + +func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { + switch value.Type() { + case timeType: + return marshalUTCTime(out, value.Interface().(time.Time)) + case bitStringType: + return marshalBitString(out, value.Interface().(BitString)) + case objectIdentifierType: + return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) + case bigIntType: + return marshalBigInt(out, value.Interface().(*big.Int)) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return out.WriteByte(255) + } else { + return out.WriteByte(0) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return marshalInt64(out, int64(v.Int())) + case reflect.Struct: + t := v.Type() + + startingField := 0 + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.NumField() > 0 && t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := make([]byte, s.Len()) + for i := 0; i < s.Len(); i++ { + bytes[i] = uint8(s.Index(i).Uint()) + } + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + _, err = out.Write(stripTagAndLength(bytes)) + return + } else { + startingField = 1 + } + } + + for i := startingField; i < t.NumField(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) + if err != nil { + return + } + } + return + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err = out.Write(bytes) + return + } + + var fp fieldParameters + for i := 0; i < v.Len(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Index(i), fp) + if err != nil { + return + } + } + return + case reflect.String: + switch params.stringType { + case tagIA5String: + return marshalIA5String(out, v.String()) + case tagPrintableString: + return marshalPrintableString(out, v.String()) + default: + return marshalUTF8String(out, v.String()) + } + } + + return StructuralError{"unknown Go type"} +} + +func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return marshalField(out, v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return + } + + if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + _, err = out.Write(rv.FullBytes) + } else { + err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) + if err != nil { + return + } + _, err = out.Write(rv.Bytes) + } + return + } + + tag, isCompound, ok := getUniversalType(v.Type()) + if !ok { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} + return + } + class := classUniversal + + if params.stringType != 0 && tag != tagPrintableString { + return StructuralError{"explicit string type given to non-string member"} + } + + if tag == tagPrintableString { + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r)) { + if !utf8.ValidString(v.String()) { + return errors.New("asn1: string not valid UTF-8") + } + tag = tagUTF8String + break + } + } + } else { + tag = params.stringType + } + } + + if params.set { + if tag != tagSequence { + return StructuralError{"non sequence tagged as set"} + } + tag = tagSet + } + + tags, body := out.fork() + + err = marshalBody(body, v, params) + if err != nil { + return + } + + bodyLen := body.Len() + + var explicitTag *forkableWriter + if params.explicit { + explicitTag, tags = tags.fork() + } + + if !params.explicit && params.tag != nil { + // implicit tag. + tag = *params.tag + class = classContextSpecific + } + + err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) + if err != nil { + return + } + + if params.explicit { + err = marshalTagAndLength(explicitTag, tagAndLength{ + class: classContextSpecific, + tag: *params.tag, + length: bodyLen + tags.Len(), + isCompound: true, + }) + } + + return nil +} + +// Marshal returns the ASN.1 encoding of val. +func Marshal(val interface{}) ([]byte, error) { + var out bytes.Buffer + v := reflect.ValueOf(val) + f := newForkableWriter() + err := marshalField(f, v, fieldParameters{}) + if err != nil { + return nil, err + } + _, err = f.writeTo(&out) + return out.Bytes(), nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go new file mode 100644 index 000000000..677860d0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client/logclient.go @@ -0,0 +1,330 @@ +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "strconv" + "time" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/mreiferson/go-httpclient" +) + +// URI paths for CT Log endpoints +const ( + AddChainPath = "/ct/v1/add-chain" + AddPreChainPath = "/ct/v1/add-pre-chain" + GetSTHPath = "/ct/v1/get-sth" + GetEntriesPath = "/ct/v1/get-entries" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + uri string // the base URI of the log. e.g. http://ct.googleapis/pilot + httpClient *http.Client // used to interact with the log via HTTP +} + +////////////////////////////////////////////////////////////////////////////////// +// JSON structures follow. +// These represent the structures returned by the CT Log server. +////////////////////////////////////////////////////////////////////////////////// + +// addChainRequest represents the JSON request body sent to the add-chain CT +// method. +type addChainRequest struct { + Chain []string `json:"chain"` +} + +// addChainResponse represents the JSON response to the add-chain CT method. +// An SCT represents a Log's promise to integrate a [pre-]certificate into the +// log within a defined period of time. +type addChainResponse struct { + SCTVersion ct.Version `json:"sct_version"` // SCT structure version + ID string `json:"id"` // Log ID + Timestamp uint64 `json:"timestamp"` // Timestamp of issuance + Extensions string `json:"extensions"` // Holder for any CT extensions + Signature string `json:"signature"` // Log signature for this SCT +} + +// getSTHResponse respresents the JSON response to the get-sth CT method +type getSTHResponse struct { + TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree + Timestamp uint64 `json:"timestamp"` // Time that the tree was created + SHA256RootHash string `json:"sha256_root_hash"` // Root hash of the tree + TreeHeadSignature string `json:"tree_head_signature"` // Log signature for this STH +} + +// base64LeafEntry respresents a Base64 encoded leaf entry +type base64LeafEntry struct { + LeafInput string `json:"leaf_input"` + ExtraData string `json:"extra_data"` +} + +// getEntriesReponse respresents the JSON response to the CT get-entries method +type getEntriesResponse struct { + Entries []base64LeafEntry `json:"entries"` // the list of returned entries +} + +// getConsistencyProofResponse represents the JSON response to the CT get-consistency-proof method +type getConsistencyProofResponse struct { + Consistency []string `json:"consistency"` +} + +// getAuditProofResponse represents the JSON response to the CT get-audit-proof method +type getAuditProofResponse struct { + Hash []string `json:"hash"` // the hashes which make up the proof + TreeSize uint64 `json:"tree_size"` // the tree size against which this proof is constructed +} + +// getAcceptedRootsResponse represents the JSON response to the CT get-roots method. +type getAcceptedRootsResponse struct { + Certificates []string `json:"certificates"` +} + +// getEntryAndProodReponse represents the JSON response to the CT get-entry-and-proof method +type getEntryAndProofResponse struct { + LeafInput string `json:"leaf_input"` // the entry itself + ExtraData string `json:"extra_data"` // any chain provided when the entry was added to the log + AuditPath []string `json:"audit_path"` // the corresponding proof +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// http://ct.googleapis.com/pilot +func New(uri string) *LogClient { + var c LogClient + c.uri = uri + transport := &httpclient.Transport{ + ConnectTimeout: 10 * time.Second, + RequestTimeout: 30 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, + MaxIdleConnsPerHost: 10, + DisableKeepAlives: false, + } + c.httpClient = &http.Client{Transport: transport} + return &c +} + +// Makes a HTTP call to |uri|, and attempts to parse the response as a JSON +// representation of the structure in |res|. +// Returns a non-nil |error| if there was a problem. +func (c *LogClient) fetchAndParse(uri string, res interface{}) error { + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + req.Header.Set("Keep-Alive", "timeout=15, max=100") + resp, err := c.httpClient.Do(req) + var body []byte + if resp != nil { + body, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + } + if err != nil { + return err + } + if err = json.Unmarshal(body, &res); err != nil { + return err + } + return nil +} + +// Makes a HTTP POST call to |uri|, and attempts to parse the response as a JSON +// representation of the structure in |res|. +// Returns a non-nil |error| if there was a problem. +func (c *LogClient) postAndParse(uri string, req interface{}, res interface{}) (*http.Response, string, error) { + postBody, err := json.Marshal(req) + if err != nil { + return nil, "", err + } + httpReq, err := http.NewRequest("POST", uri, bytes.NewReader(postBody)) + if err != nil { + return nil, "", err + } + httpReq.Header.Set("Keep-Alive", "timeout=15, max=100") + httpReq.Header.Set("Content-Type", "application/json") + resp, err := c.httpClient.Do(httpReq) + // Read all of the body, if there is one, so that the http.Client can do + // Keep-Alive: + var body []byte + if resp != nil { + body, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + } + if err != nil { + return resp, string(body), err + } + if resp.StatusCode == 200 { + if err != nil { + return resp, string(body), err + } + if err = json.Unmarshal(body, &res); err != nil { + return resp, string(body), err + } + } + return resp, string(body), nil +} + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. +func (c *LogClient) addChainWithRetry(path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp addChainResponse + var req addChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, base64.StdEncoding.EncodeToString(link)) + } + done := false + httpStatus := "Unknown" + for !done { + backoffSeconds := 0 + httpResp, errorBody, err := c.postAndParse(c.uri+path, &req, &resp) + if err != nil { + log.Printf("Got %s, backing off.", err) + backoffSeconds = 10 + } else { + switch { + case httpResp.StatusCode == 200: + done = true + break + case httpResp.StatusCode == 408: + case httpResp.StatusCode == 503: + // Retry + backoffSeconds = 10 + if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err != nil { + backoffSeconds = seconds + } + } + default: + return nil, fmt.Errorf("Got HTTP Status %s: %s", httpResp.Status, errorBody) + } + httpStatus = httpResp.Status + } + // Now back-off before retrying + log.Printf("Got %s, backing-off %d seconds.", httpStatus, backoffSeconds) + time.Sleep(time.Duration(backoffSeconds) * time.Second) + } + + rawLogID, err := base64.StdEncoding.DecodeString(resp.ID) + if err != nil { + return nil, err + } + rawSignature, err := base64.StdEncoding.DecodeString(resp.Signature) + if err != nil { + return nil, err + } + ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature)) + if err != nil { + return nil, err + } + var logID ct.SHA256Hash + copy(logID[:], rawLogID) + return &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(resp.Extensions), + Signature: *ds}, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(AddPreChainPath, chain) +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error. +func (c *LogClient) GetSTH() (sth *ct.SignedTreeHead, err error) { + var resp getSTHResponse + if err = c.fetchAndParse(c.uri+GetSTHPath, &resp); err != nil { + return + } + sth = &ct.SignedTreeHead{ + TreeSize: resp.TreeSize, + Timestamp: resp.Timestamp, + } + + rawRootHash, err := base64.StdEncoding.DecodeString(resp.SHA256RootHash) + if err != nil { + return nil, fmt.Errorf("invalid base64 encoding in sha256_root_hash: %v", err) + } + if len(rawRootHash) != sha256.Size { + return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(rawRootHash)) + } + copy(sth.SHA256RootHash[:], rawRootHash) + + rawSignature, err := base64.StdEncoding.DecodeString(resp.TreeHeadSignature) + if err != nil { + return nil, errors.New("invalid base64 encoding in tree_head_signature") + } + ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature)) + if err != nil { + return nil, err + } + // TODO(alcutter): Verify signature + sth.TreeHeadSignature = *ds + return +} + +// GetEntries attempts to retrieve the entries in the sequence [|start|, |end|] from the CT +// log server. (see section 4.6.) +// Returns a slice of LeafInputs or a non-nil error. +func (c *LogClient) GetEntries(start, end int64) ([]ct.LogEntry, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + var resp getEntriesResponse + err := c.fetchAndParse(fmt.Sprintf("%s%s?start=%d&end=%d", c.uri, GetEntriesPath, start, end), &resp) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, end-start+1, end-start+1) + for index, entry := range resp.Entries { + leafBytes, err := base64.StdEncoding.DecodeString(entry.LeafInput) + leaf, err := ct.ReadMerkleTreeLeaf(bytes.NewBuffer(leafBytes)) + if err != nil { + return nil, err + } + entries[index].Leaf = *leaf + chainBytes, err := base64.StdEncoding.DecodeString(entry.ExtraData) + + var chain []ct.ASN1Cert + switch leaf.TimestampedEntry.EntryType { + case ct.X509LogEntryType: + chain, err = ct.UnmarshalX509ChainArray(chainBytes) + + case ct.PrecertLogEntryType: + chain, err = ct.UnmarshalPrecertChainArray(chainBytes) + + default: + return nil, fmt.Errorf("saw unknown entry type: %v", leaf.TimestampedEntry.EntryType) + } + if err != nil { + return nil, err + } + entries[index].Chain = chain + entries[index].Index = start + int64(index) + } + return entries, nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go new file mode 100644 index 000000000..69065f9bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/handler.go @@ -0,0 +1,143 @@ +package gossip + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "net/http" + "time" + + ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" +) + +var defaultNumPollinationsToReturn = flag.Int("default_num_pollinations_to_return", 10, + "Number of randomly selected STH pollination entries to return for sth-pollination requests.") + +type clock interface { + Now() time.Time +} + +type realClock struct{} + +func (realClock) Now() time.Time { + return time.Now() +} + +// SignatureVerifierMap is a map of SignatureVerifier by LogID +type SignatureVerifierMap map[ct.SHA256Hash]ct.SignatureVerifier + +// Handler for the gossip HTTP requests. +type Handler struct { + storage *Storage + verifiers SignatureVerifierMap + clock clock +} + +func writeWrongMethodResponse(rw *http.ResponseWriter, allowed string) { + (*rw).Header().Add("Allow", allowed) + (*rw).WriteHeader(http.StatusMethodNotAllowed) +} + +func writeErrorResponse(rw *http.ResponseWriter, status int, body string) { + (*rw).WriteHeader(status) + (*rw).Write([]byte(body)) +} + +// HandleSCTFeedback handles requests POSTed to .../sct-feedback. +// It attempts to store the provided SCT Feedback +func (h *Handler) HandleSCTFeedback(rw http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + writeWrongMethodResponse(&rw, "POST") + return + } + + decoder := json.NewDecoder(req.Body) + var feedback SCTFeedback + if err := decoder.Decode(&feedback); err != nil { + writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid SCT Feedback received: %v", err)) + return + } + + // TODO(alcutter): 5.1.1 Validate leaf chains up to a trusted root + // TODO(alcutter): 5.1.1/2 Verify each SCT is valid and from a known log, discard those which aren't + // TODO(alcutter): 5.1.1/3 Discard leaves for domains other than ours. + if err := h.storage.AddSCTFeedback(feedback); err != nil { + writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Unable to store feedback: %v", err)) + return + } + rw.WriteHeader(http.StatusOK) +} + +// HandleSTHPollination handles requests POSTed to .../sth-pollination. +// It attempts to store the provided pollination info, and returns a random set of +// pollination data from the last 14 days (i.e. "fresh" by the definition of the gossip RFC.) +func (h *Handler) HandleSTHPollination(rw http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + writeWrongMethodResponse(&rw, "POST") + return + } + + decoder := json.NewDecoder(req.Body) + var p STHPollination + if err := decoder.Decode(&p); err != nil { + writeErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf("Invalid STH Pollination received: %v", err)) + return + } + + sthToKeep := make([]ct.SignedTreeHead, 0, len(p.STHs)) + for _, sth := range p.STHs { + v, found := h.verifiers[sth.LogID] + if !found { + log.Printf("Pollination entry for unknown logID: %s", sth.LogID.Base64String()) + continue + } + if err := v.VerifySTHSignature(sth); err != nil { + log.Printf("Failed to verify STH, dropping: %v", err) + continue + } + sthToKeep = append(sthToKeep, sth) + } + p.STHs = sthToKeep + + err := h.storage.AddSTHPollination(p) + if err != nil { + writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't store pollination: %v", err)) + return + } + + freshTime := h.clock.Now().AddDate(0, 0, -14) + rp, err := h.storage.GetRandomSTHPollination(freshTime, *defaultNumPollinationsToReturn) + if err != nil { + writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't fetch pollination to return: %v", err)) + return + } + + json := json.NewEncoder(rw) + if err := json.Encode(*rp); err != nil { + writeErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf("Couldn't encode pollination to return: %v", err)) + return + } +} + +// NewHandler creates a new Handler object, taking a pointer a Storage object to +// use for storing and retrieving feedback and pollination data, and a +// SignatureVerifierMap for verifying signatures from known logs. +func NewHandler(s *Storage, v SignatureVerifierMap) Handler { + return Handler{ + storage: s, + verifiers: v, + clock: realClock{}, + } +} + +// NewHandler creates a new Handler object, taking a pointer a Storage object to +// use for storing and retrieving feedback and pollination data, and a +// SignatureVerifierMap for verifying signatures from known logs. +func newHandlerWithClock(s *Storage, v SignatureVerifierMap, c clock) Handler { + return Handler{ + storage: s, + verifiers: v, + clock: c, + } +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/main/gossip_server.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/main/gossip_server.go new file mode 100644 index 000000000..149e63381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/main/gossip_server.go @@ -0,0 +1,73 @@ +package main + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + + ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip" +) + +var dbPath = flag.String("database", "/tmp/gossip.sq3", "Path to database.") +var listenAddress = flag.String("listen", ":8080", "Listen address:port for HTTP server.") +var logKeys = flag.String("log_public_keys", "", "Comma separated list of files containing trusted Logs' public keys in PEM format") + +func createVerifiers() (*gossip.SignatureVerifierMap, error) { + m := make(gossip.SignatureVerifierMap) + if len(*logKeys) == 0 { + return nil, errors.New("--log_public_keys is empty") + } + keys := strings.Split(*logKeys, ",") + for _, k := range keys { + pem, err := ioutil.ReadFile(k) + if err != nil { + return nil, fmt.Errorf("failed to read specified PEM file %s: %v", k, err) + } + for len(pem) > 0 { + key, id, rest, err := ct.PublicKeyFromPEM(pem) + pem = rest + if err != nil { + return nil, fmt.Errorf("failed to read public key from PEM in file %s: %v", k, err) + } + sv, err := ct.NewSignatureVerifier(key) + if err != nil { + return nil, fmt.Errorf("Failed to create new SignatureVerifier: %v", err) + } + m[id] = *sv + log.Printf("Loaded key for LogID %v", id) + } + } + return &m, nil +} + +func main() { + flag.Parse() + verifierMap, err := createVerifiers() + if err != nil { + log.Fatalf("Failed to load log public keys: %v", err) + } + log.Print("Starting gossip server.") + + storage := gossip.Storage{} + if err := storage.Open(*dbPath); err != nil { + log.Fatalf("Failed to open storage: %v", err) + } + defer storage.Close() + + handler := gossip.NewHandler(&storage, *verifierMap) + serveMux := http.NewServeMux() + serveMux.HandleFunc("/.well-known/ct/v1/sct-feedback", handler.HandleSCTFeedback) + serveMux.HandleFunc("/.well-known/ct/v1/sth-pollination", handler.HandleSTHPollination) + server := &http.Server{ + Addr: *listenAddress, + Handler: serveMux, + } + if err := server.ListenAndServe(); err != nil { + log.Printf("Error serving: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go new file mode 100644 index 000000000..1ed1493be --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/storage.go @@ -0,0 +1,377 @@ +package gossip + +import ( + "database/sql" + "errors" + "fmt" + "log" + "strings" + "time" + + ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/mattn/go-sqlite3" +) + +const schema = ` + CREATE TABLE IF NOT EXISTS sths ( + version INTEGER NOT NULL, + tree_size INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + root_hash BYTES NOT NULL, + signature BYTES NOT NULL, + log_id BYTES NOT NULL, + PRIMARY KEY (version, tree_size, timestamp, root_hash, log_id) + ); + + CREATE TABLE IF NOT EXISTS scts ( + sct_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + sct BYTES NOT NULL UNIQUE + ); + + CREATE TABLE IF NOT EXISTS chains ( + chain_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + chain STRING NOT NULL UNIQUE + ); + + CREATE TABLE IF NOT EXISTS sct_feedback ( + chain_id INTEGER NOT NULL REFERENCES chains(chain_id), + sct_id INTEGER NOT NULL REFERENCES scts(sct_id), + PRIMARY KEY (chain_id, sct_id) + + );` + +const insertChain = `INSERT INTO chains(chain) VALUES ($1);` +const insertSCT = `INSERT INTO scts(sct) VALUES ($1);` +const insertSCTFeedback = `INSERT INTO sct_feedback(chain_id, sct_id) VALUES ($1, $2);` +const insertSTHPollination = `INSERT INTO sths(version, tree_size, timestamp, root_hash, signature, log_id) VALUES($1, $2, $3, $4, $5, $6);` + +const selectChainID = `SELECT chain_id FROM chains WHERE chain = $1;` + +// Selects at most $2 rows from the sths table whose timestamp is newer than $1. +const selectRandomRecentPollination = `SELECT version, tree_size, timestamp, root_hash, signature, log_id FROM sths + WHERE timestamp >= $1 ORDER BY random() LIMIT $2;` +const selectSCTID = `SELECT sct_id FROM scts WHERE sct = $1;` + +const selectNumSCTs = `SELECT COUNT(*) FROM scts;` +const selectNumChains = `SELECT COUNT(*) FROM chains;` +const selectNumFeedback = `SELECT COUNT(*) FROM sct_feedback;` +const selectNumSTHs = `SELECT COUNT(*) FROM sths;` + +const selectFeedback = `SELECT COUNT(*) FROM sct_feedback WHERE chain_id = $1 AND sct_id = $2;` +const selectSTH = `SELECT COUNT(*) FROM sths WHERE version = $1 AND tree_size = $2 AND timestamp = $3 AND root_hash = $4 AND signature = $5 AND log_id = $6;` + +// Storage provides an SQLite3-backed method for persisting gossip data +type Storage struct { + db *sql.DB + dbPath string + insertChain *sql.Stmt + insertSCT *sql.Stmt + insertSCTFeedback *sql.Stmt + insertSTHPollination *sql.Stmt + selectChainID *sql.Stmt + selectRandomRecentPollination *sql.Stmt + selectSCTID *sql.Stmt + + selectNumChains *sql.Stmt + selectNumFeedback *sql.Stmt + selectNumSCTs *sql.Stmt + selectNumSTHs *sql.Stmt + + selectFeedback *sql.Stmt + selectSTH *sql.Stmt +} + +type statementSQLPair struct { + Statement **sql.Stmt + SQL string +} + +func prepareStatement(db *sql.DB, s statementSQLPair) error { + stmt, err := db.Prepare(s.SQL) + if err != nil { + return err + } + *(s.Statement) = stmt + return nil +} + +// Open opens the underlying persistent data store. +// Should be called before attempting to use any of the store or search methods. +func (s *Storage) Open(dbPath string) error { + var err error + if s.db != nil { + return errors.New("attempting to call Open() on an already Open()'d Storage") + } + if len(dbPath) == 0 { + return errors.New("attempting to call Open() with an empty file name") + } + s.dbPath = dbPath + s.db, err = sql.Open("sqlite3", s.dbPath) + if err != nil { + return err + } + if _, err := s.db.Exec(schema); err != nil { + return err + } + for _, p := range []statementSQLPair{ + {&s.insertChain, insertChain}, + {&s.insertSCT, insertSCT}, + {&s.insertSCTFeedback, insertSCTFeedback}, + {&s.insertSTHPollination, insertSTHPollination}, + {&s.selectChainID, selectChainID}, + {&s.selectRandomRecentPollination, selectRandomRecentPollination}, + {&s.selectSCTID, selectSCTID}, + {&s.selectNumChains, selectNumChains}, + {&s.selectNumFeedback, selectNumFeedback}, + {&s.selectNumSCTs, selectNumSCTs}, + {&s.selectNumSTHs, selectNumSTHs}, + {&s.selectFeedback, selectFeedback}, + {&s.selectSTH, selectSTH}} { + if err := prepareStatement(s.db, p); err != nil { + return err + } + } + return nil +} + +// Close closes the underlying DB storage. +func (s *Storage) Close() error { + return s.db.Close() +} + +func selectThingID(getID *sql.Stmt, thing interface{}) (int64, error) { + rows, err := getID.Query(thing) + if err != nil { + return -1, err + } + if !rows.Next() { + return -1, fmt.Errorf("couldn't look up ID for %v", thing) + } + var id int64 + if err = rows.Scan(&id); err != nil { + return -1, err + } + return id, nil +} + +// insertThingOrSelectID will attempt to execute the insert Statement (under transaction tx), if that fails due to +// a unique primary key constraint, it will look up that primary key by executing the getID Statement. +// Returns the ID associated with persistent thing, or an error describing the failure. +func insertThingOrSelectID(tx *sql.Tx, insert *sql.Stmt, getID *sql.Stmt, thing interface{}) (int64, error) { + txInsert := tx.Stmt(insert) + txGetID := tx.Stmt(getID) + r, err := txInsert.Exec(thing) + if err != nil { + switch e := err.(type) { + case sqlite3.Error: + if e.Code == sqlite3.ErrConstraint { + return selectThingID(txGetID, thing) + } + } + return -1, err + } + id, err := r.LastInsertId() + if err != nil { + return -1, err + } + return id, nil +} + +func (s *Storage) addChainIfNotExists(tx *sql.Tx, chain []string) (int64, error) { + flatChain := strings.Join(chain, "") + return insertThingOrSelectID(tx, s.insertChain, s.selectChainID, flatChain) +} + +func (s *Storage) addSCTIfNotExists(tx *sql.Tx, sct string) (int64, error) { + return insertThingOrSelectID(tx, s.insertSCT, s.selectSCTID, sct) +} + +func (s *Storage) addSCTFeedbackIfNotExists(tx *sql.Tx, chainID, sctID int64) error { + stmt := tx.Stmt(s.insertSCTFeedback) + _, err := stmt.Exec(chainID, sctID) + if err != nil { + switch err.(type) { + case sqlite3.Error: + // If this is a dupe that's fine, no need to return an error + if err.(sqlite3.Error).Code != sqlite3.ErrConstraint { + return err + } + default: + return err + } + } + return nil +} + +// AddSCTFeedback stores the passed in feedback object. +func (s *Storage) AddSCTFeedback(feedback SCTFeedback) (err error) { + tx, err := s.db.Begin() + if err != nil { + return err + } + // If we return a non-nil error, then rollback the transaction. + defer func() { + if err != nil { + tx.Rollback() + return + } + err = tx.Commit() + }() + + for _, f := range feedback.Feedback { + chainID, err := s.addChainIfNotExists(tx, f.X509Chain) + if err != nil { + return err + } + for _, sct := range f.SCTData { + sctID, err := s.addSCTIfNotExists(tx, sct) + if err != nil { + return err + } + if err = s.addSCTFeedbackIfNotExists(tx, chainID, sctID); err != nil { + return err + } + } + } + return nil +} + +func (s *Storage) addSTHIfNotExists(tx *sql.Tx, sth ct.SignedTreeHead) error { + stmt := tx.Stmt(s.insertSTHPollination) + sigB64, err := sth.TreeHeadSignature.Base64String() + if err != nil { + return fmt.Errorf("Failed to base64 sth signature: %v", err) + } + _, err = stmt.Exec(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String()) + if err != nil { + switch err.(type) { + case sqlite3.Error: + // If this is a dupe that's fine, no need to return an error + if err.(sqlite3.Error).Code != sqlite3.ErrConstraint { + return err + } + default: + return err + } + } + return nil +} + +// GetRandomSTHPollination returns a random selection of "fresh" (i.e. at most 14 days old) STHs from the pool. +func (s *Storage) GetRandomSTHPollination(newerThan time.Time, limit int) (*STHPollination, error) { + // Occasionally this fails to select the pollen which was added by the + // AddSTHPollination request which went on trigger this query, even though + // the transaction committed successfully. Attempting this query under a + // transaction doesn't fix it. /sadface + // Still, that shouldn't really matter too much in practice. + r, err := s.selectRandomRecentPollination.Query(newerThan.Unix()*1000, limit) + if err != nil { + return nil, err + } + var pollination STHPollination + for r.Next() { + var entry ct.SignedTreeHead + var rootB64, sigB64, idB64 string + if err := r.Scan(&entry.Version, &entry.TreeSize, &entry.Timestamp, &rootB64, &sigB64, &idB64); err != nil { + return nil, err + } + if err := entry.SHA256RootHash.FromBase64String(rootB64); err != nil { + return nil, err + } + if err := entry.TreeHeadSignature.FromBase64String(sigB64); err != nil { + return nil, err + } + if err := entry.LogID.FromBase64String(idB64); err != nil { + return nil, err + } + pollination.STHs = append(pollination.STHs, entry) + } + // If there are no entries to return, wedge an empty array in there so that the json encoder returns something valid. + if pollination.STHs == nil { + pollination.STHs = make([]ct.SignedTreeHead, 0) + } + return &pollination, nil +} + +// AddSTHPollination stores the passed in pollination object. +func (s *Storage) AddSTHPollination(pollination STHPollination) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + // If we return a non-nil error, then rollback the transaction. + defer func() { + if err != nil { + tx.Rollback() + } + err = tx.Commit() + }() + + for _, sth := range pollination.STHs { + if err := s.addSTHIfNotExists(tx, sth); err != nil { + return err + } + } + return nil +} + +func (s *Storage) getSCTID(sct string) (int64, error) { + return selectThingID(s.selectSCTID, sct) +} + +func (s *Storage) getChainID(chain []string) (int64, error) { + flatChain := strings.Join(chain, "") + return selectThingID(s.selectChainID, flatChain) +} + +func getNumThings(getCount *sql.Stmt) (int64, error) { + r, err := getCount.Query() + if err != nil { + return -1, err + } + if !r.Next() { + return -1, fmt.Errorf("Empty scan returned while querying %v", getCount) + } + var count int64 + if err := r.Scan(&count); err != nil { + return -1, err + } + return count, nil +} + +func (s *Storage) getNumChains() (int64, error) { + return getNumThings(s.selectNumChains) +} + +func (s *Storage) getNumFeedback() (int64, error) { + return getNumThings(s.selectNumFeedback) +} + +func (s *Storage) getNumSCTs() (int64, error) { + return getNumThings(s.selectNumSCTs) +} + +func (s *Storage) getNumSTHs() (int64, error) { + return getNumThings(s.selectNumSTHs) +} + +func (s *Storage) hasFeedback(sctID, chainID int64) bool { + r, err := s.selectFeedback.Query(sctID, chainID) + if err != nil { + return false + } + return r.Next() +} + +func (s *Storage) hasSTH(sth ct.SignedTreeHead) bool { + sigB64, err := sth.TreeHeadSignature.Base64String() + if err != nil { + log.Printf("%v", err) + return false + } + r, err := s.selectSTH.Query(sth.Version, sth.TreeSize, sth.Timestamp, sth.SHA256RootHash.Base64String(), sigB64, sth.LogID.Base64String()) + if err != nil { + return false + } + return r.Next() +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go new file mode 100644 index 000000000..5e7e89be9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/gossip/types.go @@ -0,0 +1,30 @@ +package gossip + +import ( + ct "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" +) + +// STHVersion reflects the STH Version field in RFC6862[-bis] +type STHVersion int + +// STHVersion constants +const ( + STHVersion0 = 0 + STHVersion1 = 1 +) + +// SCTFeedbackEntry represents a single piece of SCT feedback. +type SCTFeedbackEntry struct { + X509Chain []string `json:"x509_chain"` + SCTData []string `json:"sct_data"` +} + +// SCTFeedback represents a collection of SCTFeedback which a client might send together. +type SCTFeedback struct { + Feedback []SCTFeedbackEntry `json:"sct_feedback"` +} + +// STHPollination represents a collection of STH pollination entries which a client might send together. +type STHPollination struct { + STHs []ct.SignedTreeHead `json:"sths"` +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree.go new file mode 100644 index 000000000..c134020e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree.go @@ -0,0 +1,131 @@ +package merkletree + +/* +#cgo LDFLAGS: -lcrypto +#cgo CPPFLAGS: -I../../cpp +#cgo CXXFLAGS: -std=c++11 +#include "merkle_tree_go.h" +*/ +import "C" +import ( + "errors" + "fmt" +) + +// CPPMerkleTree provides an interface to the C++ CT MerkleTree library. +// See the go/README file for details on how to build this. +type CPPMerkleTree struct { + FullMerkleTreeInterface + + // The C++ MerkleTree handle + peer C.TREE + + // nodeSize contains the size in bytes of the nodes in the MerkleTree + // referenced by |peer|. + nodeSize C.size_t +} + +func (m *CPPMerkleTree) LeafCount() uint64 { + return uint64(C.LeafCount(m.peer)) +} + +func (m *CPPMerkleTree) LevelCount() uint64 { + return uint64(C.LevelCount(m.peer)) +} + +func (m *CPPMerkleTree) AddLeaf(leaf []byte) uint64 { + return uint64(C.AddLeaf(m.peer, C.BYTE_SLICE(&leaf))) +} + +func (m *CPPMerkleTree) AddLeafHash(hash []byte) uint64 { + return uint64(C.AddLeafHash(m.peer, C.BYTE_SLICE(&hash))) +} + +func (m *CPPMerkleTree) LeafHash(leaf uint64) ([]byte, error) { + hash := make([]byte, m.nodeSize) + success := C.LeafHash(m.peer, C.BYTE_SLICE(&hash), C.size_t(leaf)) + if !success { + return nil, fmt.Errorf("failed to get leafhash of leaf %d", leaf) + } + return hash, nil +} + +func (m *CPPMerkleTree) CurrentRoot() ([]byte, error) { + hash := make([]byte, m.nodeSize) + success := C.CurrentRoot(m.peer, C.BYTE_SLICE(&hash)) + if !success { + return nil, errors.New("failed to get current root") + } + return hash, nil +} + +func (m *CPPMerkleTree) RootAtSnapshot(snapshot uint64) ([]byte, error) { + hash := make([]byte, m.nodeSize) + success := C.RootAtSnapshot(m.peer, C.BYTE_SLICE(&hash), C.size_t(snapshot)) + if !success { + return nil, fmt.Errorf("failed to get root at snapshot %d", snapshot) + } + return hash, nil +} + +func splitSlice(slice []byte, chunkSize int) ([][]byte, error) { + if len(slice)%chunkSize != 0 { + return nil, fmt.Errorf("slice len %d is not a multiple of chunkSize %d", len(slice), chunkSize) + } + numEntries := len(slice) / chunkSize + ret := make([][]byte, numEntries) + for i := 0; i < numEntries; i++ { + start := i * chunkSize + end := start + chunkSize + ret[i] = slice[start:end] + } + return ret, nil +} + +func (m *CPPMerkleTree) PathToCurrentRoot(leaf uint64) ([][]byte, error) { + var numEntries C.size_t + entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize) + success := C.PathToCurrentRoot(m.peer, C.BYTE_SLICE(&entryBuffer), &numEntries, C.size_t(leaf)) + if !success { + return nil, fmt.Errorf("failed to get path to current root from leaf %d", leaf) + } + return splitSlice(entryBuffer, int(m.nodeSize)) +} + +func (m *CPPMerkleTree) PathToRootAtSnapshot(leaf, snapshot uint64) ([][]byte, error) { + var num_entries C.size_t + entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize) + success := C.PathToRootAtSnapshot(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(leaf), C.size_t(snapshot)) + if !success { + return nil, fmt.Errorf("failed to get path to root at snapshot %d from leaf %d", snapshot, leaf) + } + return splitSlice(entryBuffer, int(m.nodeSize)) +} + +func (m *CPPMerkleTree) SnapshotConsistency(snapshot1, snapshot2 uint64) ([][]byte, error) { + var num_entries C.size_t + entryBuffer := make([]byte, C.size_t(m.LevelCount())*m.nodeSize) + success := C.SnapshotConsistency(m.peer, C.BYTE_SLICE(&entryBuffer), &num_entries, C.size_t(snapshot1), C.size_t(snapshot2)) + if !success { + return nil, fmt.Errorf("failed to get path to snapshot consistency from %d to %d", snapshot1, snapshot2) + } + return splitSlice(entryBuffer, int(m.nodeSize)) +} + +// NewCPPMerkleTree returns a new wrapped C++ MerkleTree, using the +// Sha256Hasher. +// It is the caller's responsibility to call DeletePeer() when finished with +// the tree to deallocate its resources. +func NewCPPMerkleTree() *CPPMerkleTree { + m := &CPPMerkleTree{ + peer: C.NewMerkleTree(C.NewSha256Hasher()), + } + m.nodeSize = C.size_t(C.NodeSize(m.peer)) + return m +} + +// DeletePeer deallocates the memory used by the C++ MerkleTree peer. +func (m *CPPMerkleTree) DeletePeer() { + C.DeleteMerkleTree(m.peer) + m.peer = nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.cc b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.cc new file mode 100644 index 000000000..488cb4c39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.cc @@ -0,0 +1,154 @@ +#include "merkletree/merkle_tree.h" + +#include +#include +#include +#include + +#include "_cgo_export.h" +#include "merkle_tree_go.h" + +extern "C" { +// Some hollow functions to cast the void* types into what they really +// are, they're only really here to provide a little bit of type +// safety. Hopefully these should all be optimized away into oblivion +// by the compiler. +static inline MerkleTree* MT(TREE tree) { + assert(tree); + return static_cast(tree); +} +static inline Sha256Hasher* H(HASHER hasher) { + assert(hasher); + return static_cast(hasher); +} +static inline GoSlice* BS(BYTE_SLICE slice) { + assert(slice); + return static_cast(slice); +} + +HASHER NewSha256Hasher() { + return new Sha256Hasher; +} + +TREE NewMerkleTree(HASHER hasher) { + return new MerkleTree(H(hasher)); +} + +void DeleteMerkleTree(TREE tree) { + delete MT(tree); +} + +size_t NodeSize(TREE tree) { + return MT(tree)->NodeSize(); +} + +size_t LeafCount(TREE tree) { + return MT(tree)->LeafCount(); +} + +bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf) { + GoSlice* slice(BS(out)); + const MerkleTree* t(MT(tree)); + const size_t nodesize(t->NodeSize()); + if (slice->data == NULL || slice->cap < nodesize) { + return false; + } + const std::string& hash = t->LeafHash(leaf); + assert(nodesize == hash.size()); + memcpy(slice->data, hash.data(), nodesize); + slice->len = nodesize; + return true; +} + +size_t LevelCount(TREE tree) { + const MerkleTree* t(MT(tree)); + return t->LevelCount(); +} + +size_t AddLeaf(TREE tree, BYTE_SLICE leaf) { + GoSlice* slice(BS(leaf)); + MerkleTree* t(MT(tree)); + return t->AddLeaf(std::string(static_cast(slice->data), slice->len)); +} + +size_t AddLeafHash(TREE tree, BYTE_SLICE hash) { + GoSlice* slice(BS(hash)); + MerkleTree* t(MT(tree)); + return t->AddLeafHash( + std::string(static_cast(slice->data), slice->len)); +} + +bool CurrentRoot(TREE tree, BYTE_SLICE out) { + GoSlice* slice(BS(out)); + MerkleTree* t(MT(tree)); + const size_t nodesize(t->NodeSize()); + if (slice->data == NULL || slice->len != nodesize) { + return false; + } + const std::string& hash = t->CurrentRoot(); + assert(nodesize == hash.size()); + memcpy(slice->data, hash.data(), nodesize); + slice->len = nodesize; + return true; +} + +bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot) { + GoSlice* slice(BS(out)); + MerkleTree* t(MT(tree)); + const size_t nodesize(t->NodeSize()); + if (slice->data == NULL || slice->len != nodesize) { + return false; + } + const std::string& hash = t->RootAtSnapshot(snapshot); + assert(nodesize == hash.size()); + memcpy(slice->data, hash.data(), nodesize); + slice->len = nodesize; + return true; +} + +// Copies the fixed-length entries from |path| into the GoSlice +// pointed to by |dst|, one after the other in the same order. +// |num_copied| is set to the number of entries copied. +bool CopyNodesToSlice(const std::vector& path, GoSlice* dst, + size_t nodesize, size_t* num_copied) { + assert(dst); + assert(num_copied); + if (dst->cap < path.size() * nodesize) { + *num_copied = 0; + return false; + } + char* e = static_cast(dst->data); + for (int i = 0; i < path.size(); ++i) { + assert(nodesize == path[i].size()); + memcpy(e, path[i].data(), nodesize); + e += nodesize; + } + dst->len = path.size() * nodesize; + *num_copied = path.size(); + return true; +} + +bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t leaf) { + MerkleTree* t(MT(tree)); + const std::vector path = t->PathToCurrentRoot(leaf); + return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries); +} + +bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t leaf, size_t snapshot) { + MerkleTree* t(MT(tree)); + const std::vector path = + t->PathToRootAtSnapshot(leaf, snapshot); + return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries); +} + +bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t snapshot1, size_t snapshot2) { + MerkleTree* t(MT(tree)); + const std::vector path = + t->SnapshotConsistency(snapshot1, snapshot2); + return CopyNodesToSlice(path, BS(out), t->NodeSize(), num_entries); +} + +} // extern "C" diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.h b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.h new file mode 100644 index 000000000..446d2a8a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_go.h @@ -0,0 +1,74 @@ +#include +#include + +#ifndef GO_MERKLETREE_MERKLE_TREE_H_ +#define GO_MERKLETREE_MERKLE_TREE_H_ + +// These types & functions provide a trampoline to call the C++ MerkleTree +// implementation from within Go code. +// +// Generally we try to jump through hoops to not allocate memory from the C++ +// side, but rather have Go allocate it inside its GC memory such that we don't +// have to worry about leaks. Apart from the obvious benefit of doing it this +// way, it usually also means one less memcpy() too which is nice. + +#ifdef __cplusplus +extern "C" { +#endif + +// The _cgo_export.h file doesn't appear to exist when this header is pulled in +// to the .go file, because of this we can't use types like GoSlice here and so +// we end up with void* everywhere; we'll at least typedef them so that the +// source is a _little_ more readable. +// Grumble grumble. +typedef void* HASHER; +typedef void* TREE; +typedef void* BYTE_SLICE; + +// Allocators & deallocators: + +// Creates a new Sha256Hasher +HASHER NewSha256Hasher(); + +// Creates a new MerkleTree passing in |hasher|. +// The MerkleTree takes ownership of |hasher|. +TREE NewMerkleTree(HASHER hasher); + +// Deletes the passed in |tree|. +void DeleteMerkleTree(TREE tree); + +// MerkleTree methods below. +// See the comments in ../../merkletree/merkle_tree.h for details + +size_t NodeSize(TREE tree); +size_t LeafCount(TREE tree); +bool LeafHash(TREE tree, BYTE_SLICE out, size_t leaf); +size_t LevelCount(TREE tree); +size_t AddLeaf(TREE tree, BYTE_SLICE leaf); +size_t AddLeafHash(TREE tree, BYTE_SLICE hash); +bool CurrentRoot(TREE tree, BYTE_SLICE out); +bool RootAtSnapshot(TREE tree, BYTE_SLICE out, size_t snapshot); + +// |out| must contain sufficent space to hold all of the path elements +// sequentially. +// |num_entries| is set to the number of actual elements stored in |out|. +bool PathToCurrentRoot(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t leaf); + +// |out| must contain sufficent space to hold all of the path elements +// sequentially. +// |num_entries| is set to the number of actual elements stored in |out|. +bool PathToRootAtSnapshot(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t leaf, size_t snapshot); + +// |out| must contain sufficent space to hold all of the path elements +// sequentially. +// |num_entries| is set to the number of actual elements stored in |out|. +bool SnapshotConsistency(TREE tree, BYTE_SLICE out, size_t* num_entries, + size_t snapshot1, size_t snapshot2); + +#ifdef __cplusplus +} +#endif + +#endif // GO_MERKLETREE_MERKLE_TREE_H_ diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_interface.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_interface.go new file mode 100644 index 000000000..85c6faa3d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/merkle_tree_interface.go @@ -0,0 +1,38 @@ +package merkletree + +// MerkleTreeInterface represents the common interface for basic MerkleTree functions. +type MerkleTreeInterface interface { + // LeafCount returns the number of leaves in the tree + LeafCount() uint64 + + // LevelCount returns the number of levels in the tree + LevelCount() uint64 + + // AddLeaf adds the hash of |leaf| to the tree and returns the newly added + // leaf index + AddLeaf(leaf []byte) uint64 + + // LeafHash returns the hash of the leaf at index |leaf| or a non-nil error. + LeafHash(leaf uint64) ([]byte, error) + + // CurrentRoot returns the current root hash of the merkle tree. + CurrentRoot() ([]byte, error) +} + +// FullMerkleTreeInterface extends MerkleTreeInterface to the full range of +// operations that only a non-compact tree representation can implement. +type FullMerkleTreeInterface interface { + MerkleTreeInterface + + // RootAtSnapshot returns the root hash at the tree size |snapshot| + // which must be <= than the current tree size. + RootAtSnapshot(snapshot uint64) ([]byte, error) + + // PathToCurrentRoot returns the Merkle path (or inclusion proof) from the + // leaf hash at index |leaf| to the current root. + PathToCurrentRoot(leaf uint64) ([]byte, error) + + // SnapshotConsistency returns a consistency proof between the two tree + // sizes specified in |snapshot1| and |snapshot2|. + SnapshotConsistency(snapshot1, snapshot2 uint64) ([]byte, error) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree.cc b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree.cc new file mode 100644 index 000000000..060d246ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree.cc @@ -0,0 +1 @@ +#include "merkletree/merkle_tree.cc" diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree_math.cc b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree_math.cc new file mode 100644 index 000000000..e8b44be94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_merkle_tree_math.cc @@ -0,0 +1 @@ +#include "merkletree/merkle_tree_math.cc" diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_serial_hasher.cc b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_serial_hasher.cc new file mode 100644 index 000000000..bbcd621e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_serial_hasher.cc @@ -0,0 +1 @@ +#include "merkletree/serial_hasher.cc" diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_tree_hasher.cc b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_tree_hasher.cc new file mode 100644 index 000000000..e798f8cd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/merkletree/wrap_tree_hasher.cc @@ -0,0 +1 @@ +#include "merkletree/tree_hasher.cc" diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/dumpscts/main/dumpscts.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/dumpscts/main/dumpscts.go new file mode 100644 index 000000000..991178413 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/dumpscts/main/dumpscts.go @@ -0,0 +1,57 @@ +package main + +import ( + "compress/zlib" + "encoding/gob" + "flag" + "io" + "log" + "os" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload" +) + +var sctFile = flag.String("sct_file", "", "File to load SCTs & leaf data from") + +func main() { + flag.Parse() + var sctReader io.ReadCloser + if *sctFile == "" { + log.Fatal("Must specify --sct_file") + } + + sctFileReader, err := os.Open(*sctFile) + if err != nil { + log.Fatal(err) + } + sctReader, err = zlib.NewReader(sctFileReader) + if err != nil { + log.Fatal(err) + } + defer func() { + err := sctReader.Close() + if err != nil && err != io.EOF { + log.Fatalf("Error closing file: %s", err) + } + }() + + // TODO(alcutter) should probably store this stuff in a protobuf really. + decoder := gob.NewDecoder(sctReader) + var addedCert preload.AddedCert + numAdded := 0 + numFailed := 0 + for { + err = decoder.Decode(&addedCert) + if err != nil { + break + } + if addedCert.AddedOk { + log.Println(addedCert.SignedCertificateTimestamp) + numAdded++ + } else { + log.Printf("Cert was not added: %s", addedCert.ErrorMessage) + numFailed++ + } + } + log.Printf("Num certs added: %d, num failed: %d\n", numAdded, numFailed) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go new file mode 100644 index 000000000..189168b91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/main/preload.go @@ -0,0 +1,197 @@ +package main + +import ( + "compress/zlib" + "encoding/gob" + "flag" + "io" + "io/ioutil" + "log" + "os" + "regexp" + "sync" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner" +) + +const ( + // A regex which cannot match any input + MatchesNothingRegex = "a^" +) + +var sourceLogUri = flag.String("source_log_uri", "http://ct.googleapis.com/aviator", "CT log base URI to fetch entries from") +var targetLogUri = flag.String("target_log_uri", "http://example.com/ct", "CT log base URI to add entries to") +var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries") +var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers") +var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches") +var parallelSubmit = flag.Int("parallel_submit", 2, "Number of concurrent add-[pre]-chain requests") +var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at") +var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.") +var sctInputFile = flag.String("sct_file", "", "File to save SCTs & leaf data to") +var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts") + +func createMatcher() (scanner.Matcher, error) { + // Make a "match everything" regex matcher + precertRegex := regexp.MustCompile(".*") + var certRegex *regexp.Regexp + if *precertsOnly { + certRegex = regexp.MustCompile(MatchesNothingRegex) + } else { + certRegex = precertRegex + } + return scanner.MatchSubjectRegex{ + CertificateSubjectRegex: certRegex, + PrecertificateSubjectRegex: precertRegex}, nil +} + +func recordSct(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, sct *ct.SignedCertificateTimestamp) { + addedCert := preload.AddedCert{ + CertDER: certDer, + SignedCertificateTimestamp: *sct, + AddedOk: true, + } + addedCerts <- &addedCert +} + +func recordFailure(addedCerts chan<- *preload.AddedCert, certDer ct.ASN1Cert, addError error) { + addedCert := preload.AddedCert{ + CertDER: certDer, + AddedOk: false, + ErrorMessage: addError.Error(), + } + addedCerts <- &addedCert +} + +func sctWriterJob(addedCerts <-chan *preload.AddedCert, sctWriter io.Writer, wg *sync.WaitGroup) { + encoder := gob.NewEncoder(sctWriter) + + numAdded := 0 + numFailed := 0 + + for c := range addedCerts { + if c.AddedOk { + numAdded++ + } else { + numFailed++ + } + if encoder != nil { + err := encoder.Encode(c) + if err != nil { + log.Fatalf("failed to encode to %s: %v", *sctInputFile, err) + } + } + } + log.Printf("Added %d certs, %d failed, total: %d\n", numAdded, numFailed, numAdded+numFailed) + wg.Done() +} + +func certSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient, certs <-chan *ct.LogEntry, + wg *sync.WaitGroup) { + for c := range certs { + chain := make([]ct.ASN1Cert, len(c.Chain)+1) + chain[0] = c.X509Cert.Raw + copy(chain[1:], c.Chain) + sct, err := log_client.AddChain(chain) + if err != nil { + log.Printf("failed to add chain with CN %s: %v\n", c.X509Cert.Subject.CommonName, err) + recordFailure(addedCerts, chain[0], err) + continue + } + recordSct(addedCerts, chain[0], sct) + if !*quiet { + log.Printf("Added chain for CN '%s', SCT: %s\n", c.X509Cert.Subject.CommonName, sct) + } + } + wg.Done() +} + +func precertSubmitterJob(addedCerts chan<- *preload.AddedCert, log_client *client.LogClient, + precerts <-chan *ct.LogEntry, + wg *sync.WaitGroup) { + for c := range precerts { + sct, err := log_client.AddPreChain(c.Chain) + if err != nil { + log.Printf("failed to add pre-chain with CN %s: %v", c.Precert.TBSCertificate.Subject.CommonName, err) + recordFailure(addedCerts, c.Chain[0], err) + continue + } + recordSct(addedCerts, c.Chain[0], sct) + if !*quiet { + log.Printf("Added precert chain for CN '%s', SCT: %s\n", c.Precert.TBSCertificate.Subject.CommonName, sct) + } + } + wg.Done() +} + +func main() { + flag.Parse() + var sctFileWriter io.Writer + var err error + if *sctInputFile != "" { + sctFileWriter, err = os.Create(*sctInputFile) + if err != nil { + log.Fatal(err) + } + } else { + sctFileWriter = ioutil.Discard + } + + sctWriter := zlib.NewWriter(sctFileWriter) + defer func() { + err := sctWriter.Close() + if err != nil { + log.Fatal(err) + } + }() + + fetchLogClient := client.New(*sourceLogUri) + matcher, err := createMatcher() + if err != nil { + log.Fatal(err) + } + + opts := scanner.ScannerOptions{ + Matcher: matcher, + BatchSize: *batchSize, + NumWorkers: *numWorkers, + ParallelFetch: *parallelFetch, + StartIndex: *startIndex, + Quiet: *quiet, + } + scanner := scanner.NewScanner(fetchLogClient, opts) + + certs := make(chan *ct.LogEntry, *batchSize**parallelFetch) + precerts := make(chan *ct.LogEntry, *batchSize**parallelFetch) + addedCerts := make(chan *preload.AddedCert, *batchSize**parallelFetch) + + var sctWriterWG sync.WaitGroup + sctWriterWG.Add(1) + go sctWriterJob(addedCerts, sctWriter, &sctWriterWG) + + submitLogClient := client.New(*targetLogUri) + + var submitterWG sync.WaitGroup + for w := 0; w < *parallelSubmit; w++ { + submitterWG.Add(2) + go certSubmitterJob(addedCerts, submitLogClient, certs, &submitterWG) + go precertSubmitterJob(addedCerts, submitLogClient, precerts, &submitterWG) + } + + addChainFunc := func(entry *ct.LogEntry) { + certs <- entry + } + addPreChainFunc := func(entry *ct.LogEntry) { + precerts <- entry + } + + scanner.Scan(addChainFunc, addPreChainFunc) + + close(certs) + close(precerts) + submitterWG.Wait() + close(addedCerts) + sctWriterWG.Wait() +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go new file mode 100644 index 000000000..b7f1ed8e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/preload/types.go @@ -0,0 +1,12 @@ +package preload + +import ( + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" +) + +type AddedCert struct { + CertDER ct.ASN1Cert + SignedCertificateTimestamp ct.SignedCertificateTimestamp + AddedOk bool + ErrorMessage string +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/main/scanner.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/main/scanner.go new file mode 100644 index 000000000..55d4cb5cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/main/scanner.go @@ -0,0 +1,86 @@ +package main + +import ( + "flag" + "fmt" + "log" + "math/big" + "regexp" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner" +) + +const ( + // A regex which cannot match any input + MatchesNothingRegex = "a^" +) + +var logUri = flag.String("log_uri", "http://ct.googleapis.com/aviator", "CT log base URI") +var matchSubjectRegex = flag.String("match_subject_regex", ".*", "Regex to match CN/SAN") +var precertsOnly = flag.Bool("precerts_only", false, "Only match precerts") +var serialNumber = flag.String("serial_number", "", "Serial number of certificate of interest") +var batchSize = flag.Int("batch_size", 1000, "Max number of entries to request at per call to get-entries") +var numWorkers = flag.Int("num_workers", 2, "Number of concurrent matchers") +var parallelFetch = flag.Int("parallel_fetch", 2, "Number of concurrent GetEntries fetches") +var startIndex = flag.Int64("start_index", 0, "Log index to start scanning at") +var quiet = flag.Bool("quiet", false, "Don't print out extra logging messages, only matches.") + +// Prints out a short bit of info about |cert|, found at |index| in the +// specified log +func logCertInfo(entry *ct.LogEntry) { + log.Printf("Interesting cert at index %d: CN: '%s'", entry.Index, entry.X509Cert.Subject.CommonName) +} + +// Prints out a short bit of info about |precert|, found at |index| in the +// specified log +func logPrecertInfo(entry *ct.LogEntry) { + log.Printf("Interesting precert at index %d: CN: '%s' Issuer: %s", entry.Index, + entry.Precert.TBSCertificate.Subject.CommonName, entry.Precert.TBSCertificate.Issuer.CommonName) +} + +func createMatcherFromFlags() (scanner.Matcher, error) { + if *serialNumber != "" { + log.Printf("Using SerialNumber matcher on %s", *serialNumber) + var sn big.Int + _, success := sn.SetString(*serialNumber, 0) + if !success { + return nil, fmt.Errorf("Invalid serialNumber %s", *serialNumber) + } + return scanner.MatchSerialNumber{SerialNumber: sn}, nil + } else { + // Make a regex matcher + var certRegex *regexp.Regexp + precertRegex := regexp.MustCompile(*matchSubjectRegex) + switch *precertsOnly { + case true: + certRegex = regexp.MustCompile(MatchesNothingRegex) + case false: + certRegex = precertRegex + } + return scanner.MatchSubjectRegex{ + CertificateSubjectRegex: certRegex, + PrecertificateSubjectRegex: precertRegex}, nil + } +} + +func main() { + flag.Parse() + logClient := client.New(*logUri) + matcher, err := createMatcherFromFlags() + if err != nil { + log.Fatal(err) + } + + opts := scanner.ScannerOptions{ + Matcher: matcher, + BatchSize: *batchSize, + NumWorkers: *numWorkers, + ParallelFetch: *parallelFetch, + StartIndex: *startIndex, + Quiet: *quiet, + } + scanner := scanner.NewScanner(logClient, opts) + scanner.Scan(logCertInfo, logPrecertInfo) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go new file mode 100644 index 000000000..5b5e6cefa --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner.go @@ -0,0 +1,399 @@ +package scanner + +import ( + "container/list" + "fmt" + "log" + "math/big" + "regexp" + "sync" + "sync/atomic" + "time" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/client" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509" +) + +// Clients wishing to implement their own Matchers should implement this interface: +type Matcher interface { + // CertificateMatches is called by the scanner for each X509 Certificate found in the log. + // The implementation should return |true| if the passed Certificate is interesting, and |false| otherwise. + CertificateMatches(*x509.Certificate) bool + + // PrecertificateMatches is called by the scanner for each CT Precertificate found in the log. + // The implementation should return |true| if the passed Precertificate is interesting, and |false| otherwise. + PrecertificateMatches(*ct.Precertificate) bool +} + +// MatchAll is a Matcher which will match every possible Certificate and Precertificate. +type MatchAll struct{} + +func (m MatchAll) CertificateMatches(_ *x509.Certificate) bool { + return true +} + +func (m MatchAll) PrecertificateMatches(_ *ct.Precertificate) bool { + return true +} + +// MatchNone is a Matcher which will never match any Certificate or Precertificate. +type MatchNone struct{} + +func (m MatchNone) CertificateMatches(_ *x509.Certificate) bool { + return false +} + +func (m MatchNone) PrecertificateMatches(_ *ct.Precertificate) bool { + return false +} + +type MatchSerialNumber struct { + SerialNumber big.Int +} + +func (m MatchSerialNumber) CertificateMatches(c *x509.Certificate) bool { + return c.SerialNumber.String() == m.SerialNumber.String() +} + +func (m MatchSerialNumber) PrecertificateMatches(p *ct.Precertificate) bool { + return p.TBSCertificate.SerialNumber.String() == m.SerialNumber.String() +} + +// MatchSubjectRegex is a Matcher which will use |CertificateSubjectRegex| and |PrecertificateSubjectRegex| +// to determine whether Certificates and Precertificates are interesting. +// The two regexes are tested against Subject Common Name as well as all +// Subject Alternative Names +type MatchSubjectRegex struct { + CertificateSubjectRegex *regexp.Regexp + PrecertificateSubjectRegex *regexp.Regexp +} + +// Returns true if either CN or any SAN of |c| matches |CertificateSubjectRegex|. +func (m MatchSubjectRegex) CertificateMatches(c *x509.Certificate) bool { + if m.CertificateSubjectRegex.FindStringIndex(c.Subject.CommonName) != nil { + return true + } + for _, alt := range c.DNSNames { + if m.CertificateSubjectRegex.FindStringIndex(alt) != nil { + return true + } + } + return false +} + +// Returns true if either CN or any SAN of |p| matches |PrecertificatesubjectRegex|. +func (m MatchSubjectRegex) PrecertificateMatches(p *ct.Precertificate) bool { + if m.PrecertificateSubjectRegex.FindStringIndex(p.TBSCertificate.Subject.CommonName) != nil { + return true + } + for _, alt := range p.TBSCertificate.DNSNames { + if m.PrecertificateSubjectRegex.FindStringIndex(alt) != nil { + return true + } + } + return false +} + +// ScannerOptions holds configuration options for the Scanner +type ScannerOptions struct { + // Custom matcher for x509 Certificates, functor will be called for each + // Certificate found during scanning. + Matcher Matcher + + // Match precerts only (Matcher still applies to precerts) + PrecertOnly bool + + // Number of entries to request in one batch from the Log + BatchSize int + + // Number of concurrent matchers to run + NumWorkers int + + // Number of concurrent fethers to run + ParallelFetch int + + // Log entry index to start fetching & matching at + StartIndex int64 + + // Don't print any status messages to stdout + Quiet bool +} + +// Creates a new ScannerOptions struct with sensible defaults +func DefaultScannerOptions() *ScannerOptions { + return &ScannerOptions{ + Matcher: &MatchAll{}, + PrecertOnly: false, + BatchSize: 1000, + NumWorkers: 1, + ParallelFetch: 1, + StartIndex: 0, + Quiet: false, + } +} + +// Scanner is a tool to scan all the entries in a CT Log. +type Scanner struct { + // Client used to talk to the CT log instance + logClient *client.LogClient + + // Configuration options for this Scanner instance + opts ScannerOptions + + // Counter of the number of certificates scanned + certsProcessed int64 + + // Counter of the number of precertificates encountered during the scan. + precertsSeen int64 + + unparsableEntries int64 + entriesWithNonFatalErrors int64 +} + +// matcherJob represents the context for an individual matcher job. +type matcherJob struct { + // The log entry returned by the log server + entry ct.LogEntry + // The index of the entry containing the LeafInput in the log + index int64 +} + +// fetchRange represents a range of certs to fetch from a CT log +type fetchRange struct { + start int64 + end int64 +} + +// Takes the error returned by either x509.ParseCertificate() or +// x509.ParseTBSCertificate() and determines if it's non-fatal or otherwise. +// In the case of non-fatal errors, the error will be logged, +// entriesWithNonFatalErrors will be incremented, and the return value will be +// nil. +// Fatal errors will be logged, unparsableEntires will be incremented, and the +// fatal error itself will be returned. +// When |err| is nil, this method does nothing. +func (s *Scanner) handleParseEntryError(err error, entryType ct.LogEntryType, index int64) error { + if err == nil { + // No error to handle + return nil + } + switch err.(type) { + case x509.NonFatalErrors: + s.entriesWithNonFatalErrors++ + // We'll make a note, but continue. + s.Log(fmt.Sprintf("Non-fatal error in %+v at index %d: %s", entryType, index, err.Error())) + default: + s.unparsableEntries++ + s.Log(fmt.Sprintf("Failed to parse in %+v at index %d : %s", entryType, index, err.Error())) + return err + } + return nil +} + +// Processes the given |entry| in the specified log. +func (s *Scanner) processEntry(entry ct.LogEntry, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) { + atomic.AddInt64(&s.certsProcessed, 1) + switch entry.Leaf.TimestampedEntry.EntryType { + case ct.X509LogEntryType: + if s.opts.PrecertOnly { + // Only interested in precerts and this is an X.509 cert, early-out. + return + } + cert, err := x509.ParseCertificate(entry.Leaf.TimestampedEntry.X509Entry) + if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil { + // We hit an unparseable entry, already logged inside handleParseEntryError() + return + } + if s.opts.Matcher.CertificateMatches(cert) { + entry.X509Cert = cert + foundCert(&entry) + } + case ct.PrecertLogEntryType: + c, err := x509.ParseTBSCertificate(entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate) + if err = s.handleParseEntryError(err, entry.Leaf.TimestampedEntry.EntryType, entry.Index); err != nil { + // We hit an unparseable entry, already logged inside handleParseEntryError() + return + } + precert := &ct.Precertificate{ + Raw: entry.Chain[0], + TBSCertificate: *c, + IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash} + if s.opts.Matcher.PrecertificateMatches(precert) { + entry.Precert = precert + foundPrecert(&entry) + } + s.precertsSeen++ + } +} + +// Worker function to match certs. +// Accepts MatcherJobs over the |entries| channel, and processes them. +// Returns true over the |done| channel when the |entries| channel is closed. +func (s *Scanner) matcherJob(id int, entries <-chan matcherJob, foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry), wg *sync.WaitGroup) { + for e := range entries { + s.processEntry(e.entry, foundCert, foundPrecert) + } + s.Log(fmt.Sprintf("Matcher %d finished", id)) + wg.Done() +} + +// Worker function for fetcher jobs. +// Accepts cert ranges to fetch over the |ranges| channel, and if the fetch is +// successful sends the individual LeafInputs out (as MatcherJobs) into the +// |entries| channel for the matchers to chew on. +// Will retry failed attempts to retrieve ranges indefinitely. +// Sends true over the |done| channel when the |ranges| channel is closed. +func (s *Scanner) fetcherJob(id int, ranges <-chan fetchRange, entries chan<- matcherJob, wg *sync.WaitGroup) { + for r := range ranges { + success := false + // TODO(alcutter): give up after a while: + for !success { + logEntries, err := s.logClient.GetEntries(r.start, r.end) + if err != nil { + s.Log(fmt.Sprintf("Problem fetching from log: %s", err.Error())) + continue + } + for _, logEntry := range logEntries { + logEntry.Index = r.start + entries <- matcherJob{logEntry, r.start} + r.start++ + } + if r.start > r.end { + // Only complete if we actually got all the leaves we were + // expecting -- Logs MAY return fewer than the number of + // leaves requested. + success = true + } + } + } + s.Log(fmt.Sprintf("Fetcher %d finished", id)) + wg.Done() +} + +// Returns the smaller of |a| and |b| +func min(a int64, b int64) int64 { + if a < b { + return a + } else { + return b + } +} + +// Returns the larger of |a| and |b| +func max(a int64, b int64) int64 { + if a > b { + return a + } else { + return b + } +} + +// Pretty prints the passed in number of |seconds| into a more human readable +// string. +func humanTime(seconds int) string { + nanos := time.Duration(seconds) * time.Second + hours := int(nanos / (time.Hour)) + nanos %= time.Hour + minutes := int(nanos / time.Minute) + nanos %= time.Minute + seconds = int(nanos / time.Second) + s := "" + if hours > 0 { + s += fmt.Sprintf("%d hours ", hours) + } + if minutes > 0 { + s += fmt.Sprintf("%d minutes ", minutes) + } + if seconds > 0 { + s += fmt.Sprintf("%d seconds ", seconds) + } + return s +} + +func (s Scanner) Log(msg string) { + if !s.opts.Quiet { + log.Print(msg) + } +} + +// Performs a scan against the Log. +// For each x509 certificate found, |foundCert| will be called with the +// index of the entry and certificate itself as arguments. For each precert +// found, |foundPrecert| will be called with the index of the entry and the raw +// precert string as the arguments. +// +// This method blocks until the scan is complete. +func (s *Scanner) Scan(foundCert func(*ct.LogEntry), + foundPrecert func(*ct.LogEntry)) error { + s.Log("Starting up...\n") + s.certsProcessed = 0 + s.precertsSeen = 0 + s.unparsableEntries = 0 + s.entriesWithNonFatalErrors = 0 + + latestSth, err := s.logClient.GetSTH() + if err != nil { + return err + } + s.Log(fmt.Sprintf("Got STH with %d certs", latestSth.TreeSize)) + + ticker := time.NewTicker(time.Second) + startTime := time.Now() + fetches := make(chan fetchRange, 1000) + jobs := make(chan matcherJob, 100000) + go func() { + for range ticker.C { + throughput := float64(s.certsProcessed) / time.Since(startTime).Seconds() + remainingCerts := int64(latestSth.TreeSize) - int64(s.opts.StartIndex) - s.certsProcessed + remainingSeconds := int(float64(remainingCerts) / throughput) + remainingString := humanTime(remainingSeconds) + s.Log(fmt.Sprintf("Processed: %d certs (to index %d). Throughput: %3.2f ETA: %s\n", s.certsProcessed, + s.opts.StartIndex+int64(s.certsProcessed), throughput, remainingString)) + } + }() + + var ranges list.List + for start := s.opts.StartIndex; start < int64(latestSth.TreeSize); { + end := min(start+int64(s.opts.BatchSize), int64(latestSth.TreeSize)) - 1 + ranges.PushBack(fetchRange{start, end}) + start = end + 1 + } + var fetcherWG sync.WaitGroup + var matcherWG sync.WaitGroup + // Start matcher workers + for w := 0; w < s.opts.NumWorkers; w++ { + matcherWG.Add(1) + go s.matcherJob(w, jobs, foundCert, foundPrecert, &matcherWG) + } + // Start fetcher workers + for w := 0; w < s.opts.ParallelFetch; w++ { + fetcherWG.Add(1) + go s.fetcherJob(w, fetches, jobs, &fetcherWG) + } + for r := ranges.Front(); r != nil; r = r.Next() { + fetches <- r.Value.(fetchRange) + } + close(fetches) + fetcherWG.Wait() + close(jobs) + matcherWG.Wait() + + s.Log(fmt.Sprintf("Completed %d certs in %s", s.certsProcessed, humanTime(int(time.Since(startTime).Seconds())))) + s.Log(fmt.Sprintf("Saw %d precerts", s.precertsSeen)) + s.Log(fmt.Sprintf("%d unparsable entries, %d non-fatal errors", s.unparsableEntries, s.entriesWithNonFatalErrors)) + return nil +} + +// Creates a new Scanner instance using |client| to talk to the log, and taking +// configuration options from |opts|. +func NewScanner(client *client.LogClient, opts ScannerOptions) *Scanner { + var scanner Scanner + scanner.logClient = client + // Set a default match-everything regex if none was provided: + if opts.Matcher == nil { + opts.Matcher = &MatchAll{} + } + scanner.opts = opts + return &scanner +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner_test_data.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner_test_data.go new file mode 100644 index 000000000..d9432278e --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/scanner/scanner_test_data.go @@ -0,0 +1,379 @@ +package scanner + +import ( + "crypto/sha256" + "encoding/base64" + "log" +) + +const ( + // TODO(alcutter): this signature is syntactically correct, but invalid. + FourEntrySTH = "{" + + "\"tree_size\":4,\"timestamp\":1396877652123,\"sha256_root_hash\":\"0JBu0CkZnKXc1niEndDaqqgCRHucCfVt1/WBAXs/5T8=\",\"tree_head_signature\":\"AAAACXNpZ25hdHVyZQ==\"}" + FourEntries = "{\"entries\":[{\"leaf_input\":\"AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuu" + + "gAwIBAgIKFIT5BQAAAAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29" + + "vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTF" + + "aFw0xMzA2MDcxOTQzMjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQ" + + "HEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5" + + "jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/" + + "HUq+gjprmUNxLSvcK+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBR" + + "YngbRkAPSGCufzJy+QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSU" + + "EFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1U" + + "dIwQYMBaAFL/AMOv1QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N" + + "0YXRpYy5jb20vR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3J" + + "sMGYGCCsGAQUFBwEBBFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGV" + + "JbnRlcm5ldEF1dGhvcml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADA" + + "aBgNVHREEEzARgg9tYWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kp" + + "qJw5Q+Hf8gzGhxDG6aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6c" + + "LwfldIXgTOYkw/uckGwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA==\"," + + "\"extra_data\":\"AAXeAAK0MIICsDCCAhmgAwIBAgIDC2dxMA0GCSqGSIb3DQEBBQUAME4xCzAJBgN" + + "VBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F" + + "0ZSBBdXRob3JpdHkwHhcNMDkwNjA4MjA0MzI3WhcNMTMwNjA3MTk0MzI3WjBGMQswCQYDVQQGEwJVUzE" + + "TMBEGA1UEChMKR29vZ2xlIEluYzEiMCAGA1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTCBnzA" + + "NBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAye23pIucV+eEPkB9hPSP0XFjU5nneXQUr0SZMyCSjXvlKAy" + + "6rWxJfoNfNFlOCnowzdDXxFdF7dWq1nMmzq0yE7jXDx07393cCDaob1FEm8rWIFJztyaHNWrbqeXUWaU" + + "r/GcZOfqTGBhs3t0lig4zFEfC7wFQeeT9adGnwKziV28CAwEAAaOBozCBoDAOBgNVHQ8BAf8EBAMCAQY" + + "wHQYDVR0OBBYEFL/AMOv1QxE+Z7qekfv8atrjaxIkMB8GA1UdIwQYMBaAFEjmaPkr0rKV10fYIyAQTzO" + + "YkJ/UMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnV" + + "zdC5jb20vY3Jscy9zZWN1cmVjYS5jcmwwDQYJKoZIhvcNAQEFBQADgYEAuIojxkiWsRF8YHdeBZqrocb" + + "6ghwYB8TrgbCoZutJqOkM0ymt9e8kTP3kS8p/XmOrmSfLnzYhLLkQYGfN0rTw8Ktx5YtaiScRhKqOv5n" + + "wnQkhClIZmloJ0pC3+gz4fniisIWvXEyZ2VxVKfmlUUIuOss4jHg7y/j7lYe8vJD5UDIAAyQwggMgMII" + + "CiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF" + + "4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY" + + "0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1U" + + "ECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4G" + + "NADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKHv7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18" + + "Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXv" + + "F2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCAQUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAY" + + "TAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSB" + + "BdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDVR0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQ" + + "EAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9g" + + "jIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqhkiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvc" + + "NAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePglcwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNN" + + "VsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn00DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZUR" + + "f+vzvKWipood573nvT6wHdzg=\"},{\"leaf_input\":\"AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgA" + + "wIBAgIDAPY6MA0GCSqGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgS" + + "W5jLjEYMBYGA1UEAxMPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N" + + "1owgc4xKTAnBgNVBAUTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVU" + + "zEXMBUGA1UECBMOU291dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsY" + + "WNrYmF1ZCBJbmMuMRAwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZ" + + "S5wdXJjaGFzZS10aWNrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCg" + + "gEBAJtkbcF8x3TtIARHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0" + + "uBYcJpkwhEg4Vg5skyfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5" + + "WIZq3BUVQ9GVqGe9n1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/" + + "pIn6M0gFAbu0egFgDybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtm" + + "wwKS2Zo+tnulPnEe9jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9" + + "Z/7Rc5KMA4GA1UdDwEB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RB" + + "DYwNIIyd3d3LnN0cnVsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDV" + + "R0fBDYwNDAyoDCgLoYsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwH" + + "QYDVR0OBBYEFDIdT1lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1M" + + "DMGCCsGAQUFBzAChidodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIh" + + "vcNAQEFBQADggEBAFhFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGP" + + "r8X5u8KzBOQ+fl6aRxvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9J" + + "HQ95YwtlyC7H4VeDKtJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJK" + + "ptG2OeM8AaynHsFcK/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+" + + "sYhkvp5IcCypoJy/Rk4fHgD8VTNiNWj7KGuHRYAAA==\",\"extra_data\":\"AAqLAAPdMIID2TCCA" + + "sGgAwIBAgIDAjbQMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzd" + + "CBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMTAwMjE5MjIzOTI2WhcNMjAwMjE4M" + + "jIzOTI2WjBAMQswCQYDVQQGEwJVUzEXMBUGA1UEChMOR2VvVHJ1c3QsIEluYy4xGDAWBgNVBAMTD0dlb" + + "1RydXN0IFNTTCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJCzgMHk5UatcGA9uuUU3" + + "Z6KXot1WubKbUGlI+g5hSZ6p1V3mkihkn46HhrxJ6ujTDnMyz1Hr4GuFmpcN+9FQf37mpc8oEOdxt8XI" + + "dGKolbCA0mEEoE+yQpUYGa5jFTk+eb5lPHgX3UR8im55IaisYmtph6DKWOy8FQchQt65+EuDa+kvc3ns" + + "VrXjAVaDktzKIt1XTTYdwvhdGLicTBi2LyKBeUxY0pUiWozeKdOVSQdl+8a5BLGDzAYtDRN4dgjOyFbL" + + "TAZJQ5096QhS6CkIMlszZhWwPKoXz4mdaAN+DaIiixafWcwqQ/RmXAueOFRJq9VeiS+jDkNd53eAsMMv" + + "R8CAwEAAaOB2TCB1jAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KM" + + "B8GA1UdIwQYMBaAFMB6mGiNifurBWQMEX2qfWW4ysxOMBIGA1UdEwEB/wQIMAYBAf8CAQAwOgYDVR0fB" + + "DMwMTAvoC2gK4YpaHR0cDovL2NybC5nZW90cnVzdC5jb20vY3Jscy9ndGdsb2JhbC5jcmwwNAYIKwYBB" + + "QUHAQEEKDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5nZW90cnVzdC5jb20wDQYJKoZIhvcNAQEFB" + + "QADggEBANTvU4ToGr2hiwTAqfVfoRB4RV2yV2pOJMtlTjGXkZrUJPjiJ2ZwMZzBYlQG55cdOprApClIC" + + "q8kx6jEmlTBfEx4TCtoLF0XplR4TEbigMMfOHES0tdT41SFULgCy+5jOvhWiU1Vuy7AyBh3hjELC3Dwf" + + "jWDpCoTZFZnNF0WX3OsewYk2k9QbSqr0E1TQcKOu3EDSSmGGM8hQkx0YlEVxW+o78Qn5Rsz3VqI138S0" + + "adhJR/V4NwdzxoQ2KDLX4z6DOW/cf/lXUQdpj6HR/oaToODEj+IZpWYeZqF6wJHzSXj8gYETpnKXKBue" + + "rvdo5AaRTPvvz7SBMS24CqFZUE+ENQAA4EwggN9MIIC5qADAgECAgMSu+YwDQYJKoZIhvcNAQEFBQAwT" + + "jELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlc" + + "nRpZmljYXRlIEF1dGhvcml0eTAeFw0wMjA1MjEwNDAwMDBaFw0xODA4MjEwNDAwMDBaMEIxCzAJBgNVB" + + "AYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0Ewg" + + "gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDazBhjMP30FyMaVn5b3zxsOORxt3iR1Lyh2Ez4q" + + "EO2A+lNIQcIiNpYL2Y5Kb0FeIudOOgFt2p+caTmxGCmsO+A5IkoD54l1u2D862mkceYyUIYNRSdrZhGk" + + "i5PyvGHQ8EWlVctUO+JLYB6V63y7l9r0gCNuRT4FBU12cBGo3tyyJG/yVUrzdCXPpwmZMzfzoMZccpO5" + + "tTVe6kZzVXeyOzSXjhT5VxPjC3+UCM2/Gbmy46kORkAt5UCOZELDv44LtEdBZr2TT5vDwcdrywej2A54" + + "vo2UxM51F4mK9s9qBS9MusYAyhSBHHlqzM94Ti7BzaEYpx56hYw9F/AK+hxa+T5AgMBAAGjgfAwge0wH" + + "wYDVR0jBBgwFoAUSOZo+SvSspXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFMB6mGiNifurBWQMEX2qfWW4y" + + "sxOMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMDoGA1UdHwQzMDEwL6AtoCuGKWh0dHA6L" + + "y9jcmwuZ2VvdHJ1c3QuY29tL2NybHMvc2VjdXJlY2EuY3JsME4GA1UdIARHMEUwQwYEVR0gADA7MDkGC" + + "CsGAQUFBwIBFi1odHRwczovL3d3dy5nZW90cnVzdC5jb20vcmVzb3VyY2VzL3JlcG9zaXRvcnkwDQYJK" + + "oZIhvcNAQEFBQADgYEAduESbk5LFhKGMAaygQjP8AjHx3F+Zu7C7dQ7H//w8MhO1kM4sLkwfRjQVYOia" + + "ss2EZzoSGajbX+4E9RH/otaXHP8rtkbMhk4q5c0FKqW0uujHBQISba75ZHvgzbrHVZvytq8c2OQ5H97P" + + "iLLPQftXzh0nOMDUE6hr5juYfKEPxIAAyQwggMgMIICiaADAgECAgQ13vTPMA0GCSqGSIb3DQEBBQUAM" + + "E4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZ" + + "XJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNOTgwODIyMTY0MTUxWhcNMTgwODIyMTY0MTUxWjBOMQswCQYDV" + + "QQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhd" + + "GUgQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDBXbFYZwhi7qCaLR8IbZEUaJgKH" + + "v7aBG8ThGIhw9F8zp8F4LgB8E407OKKlQRkrPFrU18Fs8tngL9CAo7+3QEJ7OEAFE/8+/AM3UO6WyvhH" + + "4BwmRVXkxbxD5dqt8JoIxzMTVkwrFEeO68r1u5jRXvF2V9Q0uNQDzqI578U/eDHuQIDAQABo4IBCTCCA" + + "QUwcAYDVR0fBGkwZzBloGOgYaRfMF0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDV" + + "QQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxDTALBgNVBAMTBENSTDEwGgYDV" + + "R0QBBMwEYEPMjAxODA4MjIxNjQxNTFaMAsGA1UdDwQEAwIBBjAfBgNVHSMEGDAWgBRI5mj5K9KylddH2" + + "CMgEE8zmJCf1DAdBgNVHQ4EFgQUSOZo+SvSspXXR9gjIBBPM5iQn9QwDAYDVR0TBAUwAwEB/zAaBgkqh" + + "kiG9n0HQQAEDTALGwVWMy4wYwMCBsAwDQYJKoZIhvcNAQEFBQADgYEAWM4p6vz33rXOArkXtYXRuePgl" + + "cwlMQ0AppJuf7aSY55QldGab+QR3mOFbpjuqP9ayNNVsmZxV97AIes9KqcjSQEEhkJ7/O5/ohZStWdn0" + + "0DbOyZYsih3Pa4Ud2HW+ipmJ6AN+qdzXOpw8ZQhZURf+vzvKWipood573nvT6wHdzg=\"},{\"leaf_i" + + "nput\":\"AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvrvQwDQY" + + "JKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCB" + + "MdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwMzE0MzN" + + "aFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVBAcTB0N" + + "odW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZWRpYSB" + + "EZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8" + + "AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/kND7au" + + "fyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV5fn4s7" + + "I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJENEWOY2" + + "AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZRBMGBs" + + "HTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDVR0gBIG" + + "wMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwbGVhc2U" + + "gdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGAQUFBwI" + + "BFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0bWwwGwY" + + "DVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwE" + + "GCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlcnRydXN" + + "0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQAw8sX" + + "P2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWBVAHQsH" + + "i4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WASQszuqT" + + "NW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/ugcnk0a" + + "7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jka24Cfc" + + "m+vfUfHKB56sIzquxAAA=\",\"extra_data\":\"AArbAAQ4MIIENDCCAxygAwIBAgIEBydcJjANBgk" + + "qhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJ" + + "lclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTExMDgxODE4MzYzM1o" + + "XDTE4MDgwOTE4MzU0OVowWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ28" + + "uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjCCASIwDQYJKoZIhvc" + + "NAQEBBQADggEPADCCAQoCggEBALbcdvu5RPsSfFSwu0F1dPA1R54nukNERWAZzUQKsnjl+h4kOwIfaHd" + + "g9OsiBQo3btv3FSC7PVPU0BGO1OtnvtjdBTeUQSUj75oQo8P3AL26JpJngVCpT56RPE4gulJ//0xNjqq" + + "tTl+8J5cCKf2Vg0m/CrqxNRg1qXOIYlGsFBc0UOefxvOTXbnFAE83kHqBD9T1cinojGKscTvzLt8qXOm" + + "+51Ykgiiavz39cUL9xXtrNwlHUD5ykao7xU+dEm49gANUSUEVPPKGRHQo9bmjG9t2x+oDiaBg6VH2oWQ" + + "+dJvbKssYPMHnaBiJ7Ks4LlC5b24VMygdL9WAF4Yi8x0M4IcCAwEAAaOCAQAwgf0wEgYDVR0TAQH/BAg" + + "wBgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1odHRwOi8vY3liZXJ" + + "0cnVzdC5vbW5pcm9vdC5jb20vcmVwb3NpdG9yeS5jZm0wDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBa" + + "AFOWdWTCCR1jMrPoIVDaGezq1BE3wMEIGA1UdHwQ7MDkwN6A1oDOGMWh0dHA6Ly9jZHAxLnB1YmxpYy1" + + "0cnVzdC5jb20vQ1JML09tbmlyb290MjAyNS5jcmwwHQYDVR0OBBYEFBvkje86cWsSZWjPtpG8OUMBjXX" + + "JMA0GCSqGSIb3DQEBBQUAA4IBAQBtK+3pj7Yp1rYwuuZttcNT0sm4Ck5In/E/Oiq0+3SW5r0YvKd5wHj" + + "BObog406A0iTVpXt/YqPa1A8NqZ2qxem8CMlIZpiewPneq23lsDPCcNCW1x5vmAQVY0i7moVdG2nztE/" + + "zpnAWDyEZf62wAzlJhoyic06T3CEBaLDvDXAaeqKyzCJCkVS9rHAEjUxc/Dqikvb5KhJAzXa3ZvTX0qv" + + "ejizZ3Qk1NydWC662rpqDYPBff/Ctsxz6uHRfx+zADq3Yw8+f0jAOXFEfPhniwdKpkA/mV7mvBHai8gg" + + "EJQo1u3MEMdCYRn82wWEWo4qMmd4QBfLe7aUJZJeEj0KoeyLEAAQ8MIIEODCCA6GgAwIBAgIEBydtuTA" + + "NBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" + + "DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" + + "0IEdsb2JhbCBSb290MB4XDTEwMTEzMDE2MzUyMVoXDTE4MDgxMDE1MzQyNlowWjELMAkGA1UEBhMCSUU" + + "xEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3J" + + "lIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKrmD1X6CZ" + + "ymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsBUnuId9Mcj8e6uYi" + + "1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dn" + + "KM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xX" + + "tabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/yejl0qhqd" + + "NkNwnGjkCAwEAAaOCAWowggFmMBIGA1UdEwEB/wQIMAYBAf8CAQMwTgYDVR0gBEcwRTBDBgRVHSAAMDs" + + "wOQYIKwYBBQUHAgEWLWh0dHA6Ly9jeWJlcnRydXN0Lm9tbmlyb290LmNvbS9yZXBvc2l0b3J5LmNmbTA" + + "OBgNVHQ8BAf8EBAMCAQYwgYkGA1UdIwSBgTB/oXmkdzB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1R" + + "FIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgN" + + "VBAMTGkdURSBDeWJlclRydXN0IEdsb2JhbCBSb290ggIBpTBFBgNVHR8EPjA8MDqgOKA2hjRodHRwOi8" + + "vd3d3LnB1YmxpYy10cnVzdC5jb20vY2dpLWJpbi9DUkwvMjAxOC9jZHAuY3JsMB0GA1UdDgQWBBTlnVk" + + "wgkdYzKz6CFQ2hns6tQRN8DANBgkqhkiG9w0BAQUFAAOBgQAWtCzJ8V7honubeCB6SnBwhhkAtwUq6Mk" + + "lOQ/DZDx1CdmJFYAHwo28KaVkUM9xdUcjvU3Yf3eaURBuTh8gPEecQ3R/loQQTBNDvvjgci7/v648CgN" + + "ggktv+ZrFHvavkDufYTs+3psFGsYsPFchCA9U+ihjbOgbnA/P3TBEE7lX/gACXjCCAlowggHDAgIBpTA" + + "NBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQY" + + "DVQQLEx5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN" + + "0IEdsb2JhbCBSb290MB4XDTk4MDgxMzAwMjkwMFoXDTE4MDgxMzIzNTkwMFowdTELMAkGA1UEBhMCVVM" + + "xGDAWBgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3QgU29sdXRpb25" + + "zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwgUm9vdDCBnzANBgkqhkiG9w0BAQE" + + "FAAOBjQAwgYkCgYEAlQ+gtvBQnOh6x4jN3RcOLrCU0Bs9DvaUwIqUxwbIkJfIuGQaen5sPFPhNyhzYH+" + + "yl1MHn1P5bViU0q+NbYhngObtspXPcjHKpRxyulwC52RC5/mpLNY6DayNQqokATnmnD8BhVcNWIdF+NO" + + "FqpNpJoVwSIA/EhXHebQfBS87YpkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBt6xsJ6V7ZUdtnImGkKjx" + + "Id+OgfKbec6IUA4U9+6sOMMWDFjOBEwieezRO30DIdNe5fdz0dlV9m2NUGOnw6vNcsdmLQh65wJVOuvr" + + "V4nz1aGG/juwFl19bsNejhTTEJKcND5WT78uU2J4fnVyFbceqrk8fIrXNla26p8z5qwt6fw==\"},{\"" + + "leaf_input\":\"AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7QhM" + + "A0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8wL" + + "QYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE2M" + + "zdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1UEB" + + "RMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZvc" + + "mRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAsTA" + + "klUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9yZ" + + "HBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq5Y" + + "ajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq1o" + + "hquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR6L" + + "5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKxpK" + + "AROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUsxP" + + "LcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDAyB" + + "ggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDAwL" + + "oIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADAdB" + + "gNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5nb" + + "G9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQUFB" + + "zAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA1B" + + "ggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0OB" + + "BYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0GC" + + "SqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CKIq" + + "0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOROI" + + "8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7Jab" + + "TcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XXPY" + + "oTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA=\",\"extra_data\":\"AAgjAARfMIIEW" + + "zCCA0OgAwIBAgILBAAAAAABL07hW2MwDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnb" + + "iBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNM" + + "TEwNDEzMTAwMDAwWhcNMjIwNDEzMTAwMDAwWjBZMQswCQYDVQQGEwJCRTEZMBcGA1UEChMQR2xvYmFsU" + + "2lnbiBudi1zYTEvMC0GA1UEAxMmR2xvYmFsU2lnbiBFeHRlbmRlZCBWYWxpZGF0aW9uIENBIC0gRzIwg" + + "gEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNoUbMUpq4pbR/WNnN2EugcgyXW6aIIMO5PUbc0" + + "FxSMPb6WU+FX7DbiLSpXysjSKyr9ZJ4FLYyD/tcaoVbAJDgu2X1WvlPZ37HbCnsk8ArysRe2LDb1r4/m" + + "wvAj6ldrvcAAqT8umYROHf+IyAlVRDFvYK5TLFoxuJwe4NcE2fBofN8C6iZmtDimyUxyCuNQPZSY7Ggr" + + "Vou9Xk2bTUsDt0F5NDiB0i3KF4r1VjVbNAMoQFGAVqPxq9kx1UBXeHRxmxQJaAFrQCrDI1la93rwnJUy" + + "Q88ABeHIu/buYZ4FlGud9mmKE3zWI2DZ7k0JZscUYBR84OSaqOuR5rW5IsbwO2xAgMBAAGjggEvMIIBK" + + "zAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUsLBK/Rx1KPgcYaoT9" + + "vrBkD1rFqMwRwYDVR0gBEAwPjA8BgRVHSAAMDQwMgYIKwYBBQUHAgEWJmh0dHBzOi8vd3d3Lmdsb2Jhb" + + "HNpZ24uY29tL3JlcG9zaXRvcnkvMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwuZ2xvYmFsc2lnb" + + "i5uZXQvcm9vdC1yMi5jcmwwRAYIKwYBBQUHAQEEODA2MDQGCCsGAQUFBzABhihodHRwOi8vb2NzcC5nb" + + "G9iYWxzaWduLmNvbS9FeHRlbmRlZFNTTENBMB8GA1UdIwQYMBaAFJviB1dnHB7AagbeWbSaLd/cGYYuM" + + "A0GCSqGSIb3DQEBBQUAA4IBAQBfKJAMLekgsjB8iKtABfqxnVwik9WdyjUx+izqHZNZGcSgDfsJQDHaZ" + + "FbNUr7nGGbobQmbstuUPu42RR4kVLYgBZO1MRq4ZFfm0ywBTDmWef63BJgS77cuWnf+R/N5mELdFr5ba" + + "SvJJsgpaHfmrPZOkBMoZwTsciUf16cKUH84DnIYsSm4/66h1FS4Zk2g1c/T76kyKsWXYtKEzLCg2Jipy" + + "jjkzEQ1b2EmsC6Ycvk4Mg20oWIKIWIV3rttkxA2UztKIXvC9b4u9gIT6a5McOkq9h/Di+Wf4I0qKOgZL" + + "LNl3ffxb5c1ntuSNWOB1yfkK2Kq+mKhcZKMCha3PbVKZVsCAAO+MIIDujCCAqKgAwIBAgILBAAAAAABD" + + "4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARB" + + "gNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExM" + + "jE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvY" + + "mFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBA" + + "KbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isU" + + "oh7SqbKSaZeqKeMWhG8eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfX" + + "klqtTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc" + + "5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgABy" + + "Ur6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/" + + "zAdBgNVHQ4EFgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2Nyb" + + "C5nbG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGL" + + "jANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp9" + + "Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiP" + + "qFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMN" + + "YxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7TBj0/VLZjmmx6" + + "BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==\"}]}" + + Entry0 = "AAAAAAE9pCDoYwAAAAOGMIIDgjCCAuugAwIBAgIKFIT5BQAA" + + "AAB9PDANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzETMBEGA1UEChMKR29vZ2xlIEluYzEiMCAG" + + "A1UEAxMZR29vZ2xlIEludGVybmV0IEF1dGhvcml0eTAeFw0xMzAyMjAxMzM0NTFaFw0xMzA2MDcxOTQz" + + "MjdaMGkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBW" + + "aWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMRgwFgYDVQQDEw9tYWlsLmdvb2dsZS5jb20wgZ8wDQYJKoZI" + + "hvcNAQEBBQADgY0AMIGJAoGBAOD1FbMyG0IT8JOi2El6RVciBJp4ENfTkpJ2vn/HUq+gjprmUNxLSvcK" + + "+D8vBpkq8N41Qv+82PyTuZIB0pg2CJfs07C5+ZAQnwm01DiQjM/j2jKb5GegOBRYngbRkAPSGCufzJy+" + + "QBWbd1htqceIREEI/JH7pUGgg90XUQgBddBbAgMBAAGjggFSMIIBTjAdBgNVHSUEFjAUBggrBgEFBQcD" + + "AQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgZmgKeyK8PXIGOAU+/5r/xNy5hMB8GA1UdIwQYMBaAFL/AMOv1" + + "QxE+Z7qekfv8atrjaxIkMFsGA1UdHwRUMFIwUKBOoEyGSmh0dHA6Ly93d3cuZ3N0YXRpYy5jb20vR29v" + + "Z2xlSW50ZXJuZXRBdXRob3JpdHkvR29vZ2xlSW50ZXJuZXRBdXRob3JpdHkuY3JsMGYGCCsGAQUFBwEB" + + "BFowWDBWBggrBgEFBQcwAoZKaHR0cDovL3d3dy5nc3RhdGljLmNvbS9Hb29nbGVJbnRlcm5ldEF1dGhv" + + "cml0eS9Hb29nbGVJbnRlcm5ldEF1dGhvcml0eS5jcnQwDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9t" + + "YWlsLmdvb2dsZS5jb20wDQYJKoZIhvcNAQEFBQADgYEAX0lVXCymPXGdCwvn2kpqJw5Q+Hf8gzGhxDG6" + + "aMlO5wj2wf8qPWABDRwHdb4mdSmRMuwhzCJhE3PceXLNf3pOlR/Prt18mDY/r6cLwfldIXgTOYkw/uck" + + "Gwvb0BwMsEi2FDE/T3d3SOo+lHvqPX9sOVa2uyA0wmIYnbT+5uQY6m0AAA==" + + Entry1 = "AAAAAAE9pe0GcwAAAATWMIIE0jCCA7qgAwIBAgIDAPY6MA0GCS" + + "qGSIb3DQEBBQUAMEAxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5HZW9UcnVzdCwgSW5jLjEYMBYGA1UEAx" + + "MPR2VvVHJ1c3QgU1NMIENBMB4XDTExMTAyMTExMDUwNloXDTEzMTEyMjA0MzI0N1owgc4xKTAnBgNVBA" + + "UTIFRqbGZoUTB0cXp3WmtNa0svNXFNdGZqbjJ6aWRVNzRoMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOU2" + + "91dGggQ2Fyb2xpbmExEzARBgNVBAcTCkNoYXJsZXN0b24xFzAVBgNVBAoTDkJsYWNrYmF1ZCBJbmMuMR" + + "AwDgYDVQQLEwdIb3N0aW5nMTswOQYDVQQDEzJ3d3cuc3RydWxlYXJ0c2NlbnRyZS5wdXJjaGFzZS10aW" + + "NrZXRzLW9ubGluZS5jby51azCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJtkbcF8x3TtIA" + + "RHC8BDRtoIAdh9HO9fo+5UUDtoc8f4xq7Rb2xbWOiEX29JqZOdsuucYTuYbbDf0uBYcJpkwhEg4Vg5sk" + + "yfp0jAd6pXm1euQ+RiRShzEQYKJ8y4/IjZHttA/8HSzEKWJnuidsYrl/twFhlX5WIZq3BUVQ9GVqGe9n" + + "1r2eIFTs6FxYUpaVzTkc6OLh1qSz+cnDDPigLUoUOK/KqN7ybmJxSefJw9WpFW/pIn6M0gFAbu0egFgD" + + "ybQ3JwUAEh8ddzpKRCqGq1mdZAKpKFHcqmi5nG5aFD4p1NFmPjDVQXohXLQvwtmwwKS2Zo+tnulPnEe9" + + "jjET/f+MUCAwEAAaOCAUQwggFAMB8GA1UdIwQYMBaAFEJ5VBthzVUrPmPVPEhX9Z/7Rc5KMA4GA1UdDw" + + "EB/wQEAwIEsDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPQYDVR0RBDYwNIIyd3d3LnN0cn" + + "VsZWFydHNjZW50cmUucHVyY2hhc2UtdGlja2V0cy1vbmxpbmUuY28udWswPQYDVR0fBDYwNDAyoDCgLo" + + "YsaHR0cDovL2d0c3NsLWNybC5nZW90cnVzdC5jb20vY3Jscy9ndHNzbC5jcmwwHQYDVR0OBBYEFDIdT1" + + "lJ84lcDpGuBOuAXrP0AlBVMAwGA1UdEwEB/wQCMAAwQwYIKwYBBQUHAQEENzA1MDMGCCsGAQUFBzAChi" + + "dodHRwOi8vZ3Rzc2wtYWlhLmdlb3RydXN0LmNvbS9ndHNzbC5jcnQwDQYJKoZIhvcNAQEFBQADggEBAF" + + "hFfVTB5NWG3rVaq1jM72uGneGCjGk4qV4uKtEFn+zTJe9W2N/u8V2+mLvWQfDGPr8X5u8KzBOQ+fl6aR" + + "xvI71EM3kjMu6UuJkUwXsoocK1c/iVBwWSpqem20t/2Z2n5oIN54QsKZX6tQd9JHQ95YwtlyC7H4VeDK" + + "tJZ5x9UhJi8v35C+UgYPmiU5PdeoTdwxCf285FoQL9fBAPbv+EGek1XVaVg2yJKptG2OeM8AaynHsFcK" + + "/OcZJtsiGhtu2s9F910OBpoU+lhnPylwxOf4k35JcLaqHJ3BbLUtybbduNqtf3+sYhkvp5IcCypoJy/R" + + "k4fHgD8VTNiNWj7KGuHRYAAA==" + + Entry2 = "AAAAAAE9pe0GcwAAAATjMIIE3zCCA8egAwIBAgIUCimKXmNJ+wiDS2zJvg6LC2cvr" + + "vQwDQYJKoZIhvcNAQEFBQAwWjELMAkGA1UEBhMCSlAxIzAhBgNVBAoMGkN5YmVydHJ1c3QgSmFwYW4gQ" + + "28uLCBMdGQuMSYwJAYDVQQDDB1DeWJlcnRydXN0IEphcGFuIFB1YmxpYyBDQSBHMjAeFw0xMjAzMTkwM" + + "zE0MzNaFw0xNTAzMzExNDU5MDBaMIGKMQswCQYDVQQGEwJKUDEOMAwGA1UECBMFVG9reW8xEDAOBgNVB" + + "AcTB0NodW8ta3UxHjAcBgNVBAoTFU5ldCBEcmVhbWVycyBDby4sTHRkLjEeMBwGA1UECxMVTWVnYSBNZ" + + "WRpYSBEZXBhcnRtZW50MRkwFwYDVQQDExB3d3cubmV0a2VpYmEuY29tMIIBIjANBgkqhkiG9w0BAQEFA" + + "AOCAQ8AMIIBCgKCAQEA2to03F4GdlRiGljXrSmT08/WrY59UWaoe/H4wQN6S5eQKVtaLjBWUF5Ro4sm/" + + "kND7aufyDqXUePxiZkphupV+VO7PeKp9e5yqEijK4z2XoFQhrCH5kkn1GDrTNzonxyAQtiBJ/k6gVTJV" + + "5fn4s7I6bZ2aXiJLIlTCFwMDNkrB3fj9py86WwymXaypSHkmo9Sx6PFiIOwPH6vXRK4UyAfFpXPiLGJE" + + "NEWOY2AtzMJiIoupgAuyvmoY0G0Vk34mA9gOIOrKE2QmVSR3AtA31UpNZ33qvimfz96rHtCeiZj5HNxZ" + + "RBMGBsHTlu5e49xypiYCCV41jQvmfZOShan3R3o2QIDAQABo4IBajCCAWYwCQYDVR0TBAIwADCBuAYDV" + + "R0gBIGwMIGtMIGqBggqgwiMmxEBATCBnTBXBggrBgEFBQcCAjBLGklGb3IgbW9yZSBkZXRhaWxzLCBwb" + + "GVhc2UgdmlzaXQgb3VyIHdlYnNpdGUgaHR0cHM6Ly93d3cuY3liZXJ0cnVzdC5uZS5qcCAuMEIGCCsGA" + + "QUFBwIBFjZodHRwczovL3d3dy5jeWJlcnRydXN0Lm5lLmpwL3NzbC9yZXBvc2l0b3J5L2luZGV4Lmh0b" + + "WwwGwYDVR0RBBQwEoIQd3d3Lm5ldGtlaWJhLmNvbTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBB" + + "QUHAwEGCCsGAQUFBwMCMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly9zdXJlc2VyaWVzLWNybC5jeWJlc" + + "nRydXN0Lm5lLmpwL1N1cmVTZXJ2ZXIvY3RqcHViY2FnMi9jZHAuY3JsMA0GCSqGSIb3DQEBBQUAA4IBA" + + "QAw8sXP2ecKp5QGXtzcxKwkkznqocaddzoG69atcyzwshySLfo0ElMHP5WG9TpVrb6XSh2a1edwduAWB" + + "VAHQsHi4bt4wX9e9DBMnQx/jelcJevABQsXJPGc86diisXYDkHKQesi+8CvWvE0GmbVJRoq0RDo14WAS" + + "QszuqTNW993walCzNTg88s7MniFgmgFd8n31SVls6QhY2Fmlr13JLDtzVDQDbj6MCPuwG8DdmR1bCM/u" + + "gcnk0a7ZVy3d4yTjdhKpocToFklhHtHg0AINghPXIqU0njjUsy3ujNYIYo1TaZ3835Bo0lDwdvKK68Jk" + + "a24Cfcm+vfUfHKB56sIzquxAAA=" + + Entry3 = "AAAAAAE9pe0GdAAAAAWmMIIFojCCBIqgAwIBAgISESE1Pz3s7WxTnxbUXmwjh7Q" + + "hMA0GCSqGSIb3DQEBBQUAMFkxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS8" + + "wLQYDVQQDEyZHbG9iYWxTaWduIEV4dGVuZGVkIFZhbGlkYXRpb24gQ0EgLSBHMjAeFw0xMTEwMTAxNDE" + + "2MzdaFw0xMzEwMTAxNDE2MzdaMIHpMR0wGwYDVQQPDBRQcml2YXRlIE9yZ2FuaXphdGlvbjERMA8GA1U" + + "EBRMIMDIzOTczNzMxEzARBgsrBgEEAYI3PAIBAxMCR0IxCzAJBgNVBAYTAkdCMRQwEgYDVQQIEwtPeGZ" + + "vcmRzaGlyZTEPMA0GA1UEBxMGT3hmb3JkMRgwFgYDVQQJEw9CZWF1bW9udCBTdHJlZXQxCzAJBgNVBAs" + + "TAklUMSMwIQYDVQQKExpUaGUgT3hmb3JkIFBsYXlob3VzZSBUcnVzdDEgMB4GA1UEAxMXd3d3Lm94Zm9" + + "yZHBsYXlob3VzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2VgUJx+QIlEn4vMq" + + "5YajmJEk1Lv5Kwc95oqEb2EbQMVhCJct0OA0wKJbnFGaNIo5DJHIouuz98JoHixMB54EwZi5I64wvqyq" + + "1ohquTrUk4CS/4Y4odDw61dIqE2UZCxJYui9y4fTkptjNWmTaytw3LpGkt4Yx+AIcB+Oc7c7IPjTZEvR" + + "6L5lK9WqfZmrS/Y+Tgflz6W79rpgUb2CyfqLUX0Hxohw5/Zp197y4XhOwou/f+Vaju3j/Gt1WBAbWrKx" + + "pKAROVesfqT/H7Y/iOJ6jkPt5rqrLosStbGMpPUNNGRY0a8F1HBAUUzjTrRAE6CGZAPgBbcloYFc1zUs" + + "xPLcZAgMBAAGjggHRMIIBzTAOBgNVHQ8BAf8EBAMCBaAwTAYDVR0gBEUwQzBBBgkrBgEEAaAyAQEwNDA" + + "yBggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3NpdG9yeS8wNwYDVR0RBDA" + + "wLoIXd3d3Lm94Zm9yZHBsYXlob3VzZS5jb22CE294Zm9yZHBsYXlob3VzZS5jb20wCQYDVR0TBAIwADA" + + "dBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NybC5" + + "nbG9iYWxzaWduLmNvbS9ncy9nc2V4dGVuZHZhbGcyLmNybDCBiAYIKwYBBQUHAQEEfDB6MEEGCCsGAQU" + + "FBzAChjVodHRwOi8vc2VjdXJlLmdsb2JhbHNpZ24uY29tL2NhY2VydC9nc2V4dGVuZHZhbGcyLmNydDA" + + "1BggrBgEFBQcwAYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzZXh0ZW5kdmFsZzIwHQYDVR0" + + "OBBYEFNp+MVYdHILBfTE6JM8O6Ul+Xwx3MB8GA1UdIwQYMBaAFLCwSv0cdSj4HGGqE/b6wZA9axajMA0" + + "GCSqGSIb3DQEBBQUAA4IBAQALHuvJlSvi3OqKwDiXBrsx0zb7DGGLAzwQCyr60iwJuc1S8SkWURlM0CK" + + "Iq0Qupj5vYIAY2g6gDWxdf/JFMh/Rxzv90JE/xZm9YlnMh2Evz3glLLQ5y2x1ddc0RU9YFoeOmJcgDOR" + + "OI8aQvhcn9Jdj1Yk7BkKhbQv/pM9ETqtSro3Xbv/qcwPTG/oRysMCrN/DUxedUr95dFjrS3zpo+6Hr7J" + + "abTcaAak40ksY+vHEQWbqm4YluJ4/c+6qfpsTTUih6//7xs92UxObeSMtWPaxySxedXekTPYrGt5X8XX" + + "PYoTKJnuJrxlkEBv0K7wozbn5Km2dpOqCAaqbf8WKa3mvAAA=" +) + +func makeParent(a []byte, b []byte) [sha256.Size]byte { + if len(a) != len(b) { + log.Fatalf("a & b are different lengths: %d vs %d", len(a), len(b)) + } + if len(a) != sha256.Size { + log.Fatalf("a & b incorrect length for Sha256 hash") + } + var r [sha256.Size * 2]byte + copy(r[0:31], a) + copy(r[32:63], b) + return sha256.Sum256(r[:]) +} + +func CalcRootHash() { + e0, err := base64.StdEncoding.DecodeString(Entry0) + if err != nil { + log.Fatal(err) + } + h0 := sha256.Sum256(e0) + e1, err := base64.StdEncoding.DecodeString(Entry1) + if err != nil { + log.Fatal(err) + } + h1 := sha256.Sum256(e1) + e2, err := base64.StdEncoding.DecodeString(Entry2) + if err != nil { + log.Fatal(err) + } + h2 := sha256.Sum256(e2) + e3, err := base64.StdEncoding.DecodeString(Entry3) + if err != nil { + log.Fatal(err) + } + h3 := sha256.Sum256(e3) + + hash01 := makeParent(h0[:], h1[:]) + hash23 := makeParent(h2[:], h3[:]) + root := makeParent(hash01[:], hash23[:]) + log.Println(base64.StdEncoding.EncodeToString(root[:])) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/serialization.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/serialization.go new file mode 100644 index 000000000..e383ea588 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/serialization.go @@ -0,0 +1,463 @@ +package ct + +import ( + "bytes" + "container/list" + "crypto" + "encoding/binary" + "errors" + "fmt" + "io" +) + +// Variable size structure prefix-header byte lengths +const ( + CertificateLengthBytes = 3 + PreCertificateLengthBytes = 3 + ExtensionsLengthBytes = 2 + CertificateChainLengthBytes = 3 + SignatureLengthBytes = 2 +) + +// Max lengths +const ( + MaxCertificateLength = (1 << 24) - 1 + MaxExtensionsLength = (1 << 16) - 1 +) + +func writeUint(w io.Writer, value uint64, numBytes int) error { + buf := make([]uint8, numBytes) + for i := 0; i < numBytes; i++ { + buf[numBytes-i-1] = uint8(value & 0xff) + value >>= 8 + } + if value != 0 { + return errors.New("numBytes was insufficiently large to represent value") + } + if _, err := w.Write(buf); err != nil { + return err + } + return nil +} + +func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error { + if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil { + return err + } + if _, err := w.Write(value); err != nil { + return err + } + return nil +} + +func readUint(r io.Reader, numBytes int) (uint64, error) { + var l uint64 + for i := 0; i < numBytes; i++ { + l <<= 8 + var t uint8 + if err := binary.Read(r, binary.BigEndian, &t); err != nil { + return 0, err + } + l |= uint64(t) + } + return l, nil +} + +// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the +// number of (BigEndian) prefix-bytes which contain the length of the actual +// array data bytes that follow. +// Allocates an array to hold the contents and returns a slice view into it if +// the read was successful, or an error otherwise. +func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) { + switch { + case numLenBytes > 8: + return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes) + case numLenBytes == 0: + return nil, errors.New("numLenBytes should be > 0") + } + l, err := readUint(r, numLenBytes) + if err != nil { + return nil, err + } + data := make([]byte, l) + n, err := r.Read(data) + if err != nil { + return nil, err + } + if n != int(l) { + return nil, fmt.Errorf("short read: expected %d but got %d", l, n) + } + return data, nil +} + +// Reads a list of ASN1Cert types from |r| +func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) { + listBytes, err := readVarBytes(r, totalLenBytes) + if err != nil { + return []ASN1Cert{}, err + } + list := list.New() + listReader := bytes.NewReader(listBytes) + var entry []byte + for err == nil { + entry, err = readVarBytes(listReader, elementLenBytes) + if err != nil { + if err != io.EOF { + return []ASN1Cert{}, err + } + } else { + list.PushBack(entry) + } + } + ret := make([]ASN1Cert, list.Len()) + i := 0 + for e := list.Front(); e != nil; e = e.Next() { + ret[i] = e.Value.([]byte) + i++ + } + return ret, nil +} + +// ReadTimestampedEntryInto parses the byte-stream representation of a +// TimestampedEntry from |r| and populates the struct |t| with the data. See +// RFC section 3.4 for details on the format. +// Returns a non-nil error if there was a problem. +func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error { + var err error + if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil { + return err + } + if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil { + return err + } + switch t.EntryType { + case X509LogEntryType: + if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil { + return err + } + case PrecertLogEntryType: + if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil { + return err + } + if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil { + return err + } + default: + return fmt.Errorf("unknown EntryType: %d", t.EntryType) + } + t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes) + return nil +} + +// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf +// and returns a pointer to a new MerkleTreeLeaf structure containing the +// parsed data. +// See RFC section 3.4 for details on the format. +// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a +// problem +func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) { + var m MerkleTreeLeaf + if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil { + return nil, err + } + if m.Version != V1 { + return nil, fmt.Errorf("unknown Version %d", m.Version) + } + if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil { + return nil, err + } + if m.LeafType != TimestampedEntryLeafType { + return nil, fmt.Errorf("unknown LeafType %d", m.LeafType) + } + if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil { + return nil, err + } + return &m, nil +} + +// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a +// GetEntries response in the case where the entry refers to an X509 leaf. +func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) { + return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes) +} + +// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in +// a GetEntries response in the case where the entry refers to a Precertificate +// leaf. +func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) { + var chain []ASN1Cert + + reader := bytes.NewReader(b) + // read the pre-cert entry: + precert, err := readVarBytes(reader, CertificateLengthBytes) + if err != nil { + return chain, err + } + chain = append(chain, precert) + // and then read and return the chain up to the root: + remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes) + if err != nil { + return chain, err + } + chain = append(chain, remainingChain...) + return chain, nil +} + +// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader +func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) { + var h byte + if err := binary.Read(r, binary.BigEndian, &h); err != nil { + return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err) + } + + var s byte + if err := binary.Read(r, binary.BigEndian, &s); err != nil { + return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err) + } + + sig, err := readVarBytes(r, SignatureLengthBytes) + if err != nil { + return nil, fmt.Errorf("failed to read Signature bytes: %v", err) + } + + return &DigitallySigned{ + HashAlgorithm: HashAlgorithm(h), + SignatureAlgorithm: SignatureAlgorithm(s), + Signature: sig, + }, nil +} + +// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array +func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) { + var b bytes.Buffer + if err := b.WriteByte(byte(ds.HashAlgorithm)); err != nil { + return nil, fmt.Errorf("failed to write HashAlgorithm: %v", err) + } + if err := b.WriteByte(byte(ds.SignatureAlgorithm)); err != nil { + return nil, fmt.Errorf("failed to write SignatureAlgorithm: %v", err) + } + if err := writeVarBytes(&b, ds.Signature, SignatureLengthBytes); err != nil { + return nil, fmt.Errorf("failed to write HashAlgorithm: %v", err) + } + return b.Bytes(), nil +} + +func checkCertificateFormat(cert ASN1Cert) error { + if len(cert) == 0 { + return errors.New("certificate is zero length") + } + if len(cert) > MaxCertificateLength { + return errors.New("certificate too large") + } + return nil +} + +func checkExtensionsFormat(ext CTExtensions) error { + if len(ext) > MaxExtensionsLength { + return errors.New("extensions too large") + } + return nil +} + +func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) { + if err := checkCertificateFormat(cert); err != nil { + return nil, err + } + if err := checkExtensionsFormat(ext); err != nil { + return nil, err + } + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, V1); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil { + return nil, err + } + if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil { + return nil, err + } + if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) { + if err := checkCertificateFormat(tbs); err != nil { + return nil, err + } + if err := checkExtensionsFormat(ext); err != nil { + return nil, err + } + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, V1); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil { + return nil, err + } + if _, err := buf.Write(issuerKeyHash[:]); err != nil { + return nil, err + } + if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil { + return nil, err + } + if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) { + if sct.SCTVersion != V1 { + return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion) + } + if entry.Leaf.LeafType != TimestampedEntryLeafType { + return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType) + } + switch entry.Leaf.TimestampedEntry.EntryType { + case X509LogEntryType: + return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions) + case PrecertLogEntryType: + return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate, + entry.Leaf.TimestampedEntry.Extensions) + default: + return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType) + } +} + +// SerializeSCTSignatureInput serializes the passed in sct and log entry into +// the correct format for signing. +func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) { + switch sct.SCTVersion { + case V1: + return serializeV1SCTSignatureInput(sct, entry) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +func serializeV1SCT(sct SignedCertificateTimestamp) ([]byte, error) { + if err := checkExtensionsFormat(sct.Extensions); err != nil { + return nil, err + } + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, V1); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sct.LogID); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sct.Timestamp); err != nil { + return nil, err + } + if err := writeVarBytes(&buf, sct.Extensions, ExtensionsLengthBytes); err != nil { + return nil, err + } + sig, err := MarshalDigitallySigned(sct.Signature) + if err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SerializeSCT serializes the passed in sct into the format specified +// by RFC6962 section 3.2 +func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) { + switch sct.SCTVersion { + case V1: + return serializeV1SCT(sct) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error { + if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil { + return err + } + if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil { + return err + } + ext, err := readVarBytes(r, ExtensionsLengthBytes) + if err != nil { + return err + } + sct.Extensions = ext + ds, err := UnmarshalDigitallySigned(r) + if err != nil { + return err + } + sct.Signature = *ds + return nil +} + +func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) { + var sct SignedCertificateTimestamp + if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil { + return nil, err + } + switch sct.SCTVersion { + case V1: + return &sct, deserializeSCTV1(r, &sct) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) { + if sth.Version != V1 { + return nil, fmt.Errorf("invalid STH version %d", sth.Version) + } + if sth.TreeSize < 0 { + return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize) + } + if len(sth.SHA256RootHash) != crypto.SHA256.Size() { + return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size()) + } + + var buf bytes.Buffer + if err := binary.Write(&buf, binary.BigEndian, V1); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil { + return nil, err + } + if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SerializeSTHSignatureInput serializes the passed in sth into the correct +// format for signing. +func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) { + switch sth.Version { + case V1: + return serializeV1STHSignatureInput(sth) + default: + return nil, fmt.Errorf("unsupported STH version %d", sth.Version) + } +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/signatures.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/signatures.go new file mode 100644 index 000000000..600db2454 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/signatures.go @@ -0,0 +1,131 @@ +package ct + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "errors" + "flag" + "fmt" + "log" + "math/big" +) + +var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false, + "Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.") + +// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error. +func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) { + p, rest := pem.Decode(b) + if p == nil { + return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b)) + } + k, err := x509.ParsePKIXPublicKey(p.Bytes) + return k, sha256.Sum256(p.Bytes), rest, err +} + +// SignatureVerifier can verify signatures on SCTs and STHs +type SignatureVerifier struct { + pubKey crypto.PublicKey +} + +// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey. +func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) { + switch pkType := pk.(type) { + case *rsa.PublicKey: + if pkType.N.BitLen() < 2048 { + e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen()) + if !(*allowVerificationWithNonCompliantKeys) { + return nil, e + } + log.Printf("WARNING: %v", e) + } + case *ecdsa.PublicKey: + params := *(pkType.Params()) + if params != *elliptic.P256().Params() { + e := fmt.Errorf("public is ECDSA, but not on the P256 curve") + if !(*allowVerificationWithNonCompliantKeys) { + return nil, e + } + log.Printf("WARNING: %v", e) + + } + default: + return nil, fmt.Errorf("Unsupported public key type %v", pkType) + } + + return &SignatureVerifier{ + pubKey: pk, + }, nil +} + +// verifySignature verifies that the passed in signature over data was created by our PublicKey. +// Currently, only SHA256 is supported as a HashAlgorithm, and only ECDSA and RSA signatures are supported. +func (s SignatureVerifier) verifySignature(data []byte, sig DigitallySigned) error { + if sig.HashAlgorithm != SHA256 { + return fmt.Errorf("unsupported HashAlgorithm in signature: %v", sig.HashAlgorithm) + } + + hasherType := crypto.SHA256 + hasher := hasherType.New() + if _, err := hasher.Write(data); err != nil { + return fmt.Errorf("failed to write to hasher: %v", err) + } + hash := hasher.Sum([]byte{}) + + switch sig.SignatureAlgorithm { + case RSA: + rsaKey, ok := s.pubKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify RSA signature with %T key", s.pubKey) + } + if err := rsa.VerifyPKCS1v15(rsaKey, hasherType, hash, sig.Signature); err != nil { + return fmt.Errorf("failed to verify rsa signature: %v", err) + } + case ECDSA: + ecdsaKey, ok := s.pubKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify ECDSA signature with %T key", s.pubKey) + } + var ecdsaSig struct { + R, S *big.Int + } + rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %v", rest) + } + + if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) { + return errors.New("failed to verify ecdsa signature") + } + default: + return fmt.Errorf("unsupported signature type %v", sig.SignatureAlgorithm) + } + return nil +} + +// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry +func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error { + sctData, err := SerializeSCTSignatureInput(sct, entry) + if err != nil { + return err + } + return s.verifySignature(sctData, sct.Signature) +} + +// VerifySTHSignature verifies that the STH's signature is valid. +func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error { + sthData, err := SerializeSTHSignatureInput(sth) + if err != nil { + return err + } + return s.verifySignature(sthData, sth.TreeHeadSignature) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/types.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/types.go new file mode 100644 index 000000000..ecfcf4684 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/types.go @@ -0,0 +1,344 @@ +package ct + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509" +) + +const ( + issuerKeyHashLength = 32 +) + +/////////////////////////////////////////////////////////////////////////////// +// The following structures represent those outlined in the RFC6962 document: +/////////////////////////////////////////////////////////////////////////////// + +// LogEntryType represents the LogEntryType enum from section 3.1 of the RFC: +// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType; +type LogEntryType uint16 + +func (e LogEntryType) String() string { + switch e { + case X509LogEntryType: + return "X509LogEntryType" + case PrecertLogEntryType: + return "PrecertLogEntryType" + } + panic(fmt.Sprintf("No string defined for LogEntryType constant value %d", e)) +} + +// LogEntryType constants, see section 3.1 of RFC6962. +const ( + X509LogEntryType LogEntryType = 0 + PrecertLogEntryType LogEntryType = 1 +) + +// MerkleLeafType represents the MerkleLeafType enum from section 3.4 of the +// RFC: enum { timestamped_entry(0), (255) } MerkleLeafType; +type MerkleLeafType uint8 + +func (m MerkleLeafType) String() string { + switch m { + case TimestampedEntryLeafType: + return "TimestampedEntryLeafType" + default: + return fmt.Sprintf("UnknownLeafType(%d)", m) + } +} + +// MerkleLeafType constants, see section 3.4 of the RFC. +const ( + TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT +) + +// Version represents the Version enum from section 3.2 of the RFC: +// enum { v1(0), (255) } Version; +type Version uint8 + +func (v Version) String() string { + switch v { + case V1: + return "V1" + default: + return fmt.Sprintf("UnknownVersion(%d)", v) + } +} + +// CT Version constants, see section 3.2 of the RFC. +const ( + V1 Version = 0 +) + +// SignatureType differentiates STH signatures from SCT signatures, see RFC +// section 3.2 +type SignatureType uint8 + +func (st SignatureType) String() string { + switch st { + case CertificateTimestampSignatureType: + return "CertificateTimestamp" + case TreeHashSignatureType: + return "TreeHash" + default: + return fmt.Sprintf("UnknownSignatureType(%d)", st) + } +} + +// SignatureType constants, see RFC section 3.2 +const ( + CertificateTimestampSignatureType SignatureType = 0 + TreeHashSignatureType SignatureType = 1 +) + +// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate +// (section 3.1) +type ASN1Cert []byte + +// PreCert represents a Precertificate (section 3.2) +type PreCert struct { + IssuerKeyHash [issuerKeyHashLength]byte + TBSCertificate []byte +} + +// CTExtensions is a representation of the raw bytes of any CtExtension +// structure (see section 3.2) +type CTExtensions []byte + +// MerkleTreeNode represents an internal node in the CT tree +type MerkleTreeNode []byte + +// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and +// 4.4) +type ConsistencyProof []MerkleTreeNode + +// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5) +type AuditPath []MerkleTreeNode + +// LeafInput represents a serialized MerkleTreeLeaf structure +type LeafInput []byte + +// HashAlgorithm from the DigitallySigned struct +type HashAlgorithm byte + +// HashAlgorithm constants +const ( + None HashAlgorithm = 0 + MD5 HashAlgorithm = 1 + SHA1 HashAlgorithm = 2 + SHA224 HashAlgorithm = 3 + SHA256 HashAlgorithm = 4 + SHA384 HashAlgorithm = 5 + SHA512 HashAlgorithm = 6 +) + +func (h HashAlgorithm) String() string { + switch h { + case None: + return "None" + case MD5: + return "MD5" + case SHA1: + return "SHA1" + case SHA224: + return "SHA224" + case SHA256: + return "SHA256" + case SHA384: + return "SHA384" + case SHA512: + return "SHA512" + default: + return fmt.Sprintf("UNKNOWN(%d)", h) + } +} + +// SignatureAlgorithm from the the DigitallySigned struct +type SignatureAlgorithm byte + +// SignatureAlgorithm constants +const ( + Anonymous SignatureAlgorithm = 0 + RSA SignatureAlgorithm = 1 + DSA SignatureAlgorithm = 2 + ECDSA SignatureAlgorithm = 3 +) + +func (s SignatureAlgorithm) String() string { + switch s { + case Anonymous: + return "Anonymous" + case RSA: + return "RSA" + case DSA: + return "DSA" + case ECDSA: + return "ECDSA" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +// DigitallySigned represents an RFC5246 DigitallySigned structure +type DigitallySigned struct { + HashAlgorithm HashAlgorithm + SignatureAlgorithm SignatureAlgorithm + Signature []byte +} + +// FromBase64String populates the DigitallySigned structure from the base64 data passed in. +// Returns an error if the base64 data is invalid. +func (d *DigitallySigned) FromBase64String(b64 string) error { + raw, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) + } + ds, err := UnmarshalDigitallySigned(bytes.NewReader(raw)) + if err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + *d = *ds + return nil +} + +// Base64String returns the base64 representation of the DigitallySigned struct. +func (d DigitallySigned) Base64String() (string, error) { + b, err := MarshalDigitallySigned(d) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (d DigitallySigned) MarshalJSON() ([]byte, error) { + b64, err := d.Base64String() + if err != nil { + return []byte{}, err + } + return []byte(`"` + b64 + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *DigitallySigned) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + return d.FromBase64String(content) +} + +// LogEntry represents the contents of an entry in a CT log, see section 3.1. +type LogEntry struct { + Index int64 + Leaf MerkleTreeLeaf + X509Cert *x509.Certificate + Precert *Precertificate + Chain []ASN1Cert +} + +// SHA256Hash represents the output from the SHA256 hash function. +type SHA256Hash [sha256.Size]byte + +// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. +func (s *SHA256Hash) FromBase64String(b64 string) error { + bs, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 LogID: %v", err) + } + if len(bs) != sha256.Size { + return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) + } + copy(s[:], bs) + return nil +} + +// Base64String returns the base64 representation of this SHA256Hash. +func (s SHA256Hash) Base64String() string { + return base64.StdEncoding.EncodeToString(s[:]) +} + +// MarshalJSON implements the json.Marshaller interface for SHA256Hash. +func (s SHA256Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.Base64String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (s *SHA256Hash) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) + } + return s.FromBase64String(content) +} + +// SignedTreeHead represents the structure returned by the get-sth CT method +// after base64 decoding. See sections 3.5 and 4.3 in the RFC) +type SignedTreeHead struct { + Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms + TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree + Timestamp uint64 `json:"timestamp"` // The time at which the STH was created + SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree + TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // The Log's signature for this STH (see RFC section 3.5) + LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key +} + +// SignedCertificateTimestamp represents the structure returned by the +// add-chain and add-pre-chain methods after base64 decoding. (see RFC sections +// 3.2 ,4.1 and 4.2) +type SignedCertificateTimestamp struct { + SCTVersion Version // The version of the protocol to which the SCT conforms + LogID SHA256Hash // the SHA-256 hash of the log's public key, calculated over + // the DER encoding of the key represented as SubjectPublicKeyInfo. + Timestamp uint64 // Timestamp (in ms since unix epoc) at which the SCT was issued + Extensions CTExtensions // For future extensions to the protocol + Signature DigitallySigned // The Log's signature for this SCT +} + +func (s SignedCertificateTimestamp) String() string { + return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion, + base64.StdEncoding.EncodeToString(s.LogID[:]), + s.Timestamp, + s.Extensions, + s.Signature) +} + +// TimestampedEntry is part of the MerkleTreeLeaf structure. +// See RFC section 3.4 +type TimestampedEntry struct { + Timestamp uint64 + EntryType LogEntryType + X509Entry ASN1Cert + PrecertEntry PreCert + Extensions CTExtensions +} + +// MerkleTreeLeaf represents the deserialized sructure of the hash input for the +// leaves of a log's Merkle tree. See RFC section 3.4 +type MerkleTreeLeaf struct { + Version Version // the version of the protocol to which the MerkleTreeLeaf corresponds + LeafType MerkleLeafType // The type of the leaf input, currently only TimestampedEntry can exist + TimestampedEntry TimestampedEntry // The entry data itself +} + +// Precertificate represents the parsed CT Precertificate structure. +type Precertificate struct { + // Raw DER bytes of the precert + Raw []byte + // SHA256 hash of the issuing key + IssuerKeyHash [issuerKeyHashLength]byte + // Parsed TBSCertificate structure (held in an x509.Certificate for ease of + // access. + TBSCertificate x509.Certificate +} + +// X509Certificate returns the X.509 Certificate contained within the +// MerkleTreeLeaf. +// Returns a pointer to an x509.Certificate or a non-nil error. +func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) { + return x509.ParseCertificate(m.TimestampedEntry.X509Entry) +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/cert_pool.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/cert_pool.go new file mode 100644 index 000000000..babe94d41 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/cert_pool.go @@ -0,0 +1,116 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/pem" +) + +// CertPool is a set of certificates. +type CertPool struct { + bySubjectKeyId map[string][]int + byName map[string][]int + certs []*Certificate +} + +// NewCertPool returns a new, empty CertPool. +func NewCertPool() *CertPool { + return &CertPool{ + make(map[string][]int), + make(map[string][]int), + nil, + } +} + +// findVerifiedParents attempts to find certificates in s which have signed the +// given certificate. If any candidates were rejected then errCert will be set +// to one of them, arbitrarily, and err will contain the reason that it was +// rejected. +func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) { + if s == nil { + return + } + var candidates []int + + if len(cert.AuthorityKeyId) > 0 { + candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + } + if len(candidates) == 0 { + candidates = s.byName[string(cert.RawIssuer)] + } + + for _, c := range candidates { + if err = cert.CheckSignatureFrom(s.certs[c]); err == nil { + parents = append(parents, c) + } else { + errCert = s.certs[c] + } + } + + return +} + +// AddCert adds a certificate to a pool. +func (s *CertPool) AddCert(cert *Certificate) { + if cert == nil { + panic("adding nil Certificate to CertPool") + } + + // Check that the certificate isn't being added twice. + for _, c := range s.certs { + if c.Equal(cert) { + return + } + } + + n := len(s.certs) + s.certs = append(s.certs, cert) + + if len(cert.SubjectKeyId) > 0 { + keyId := string(cert.SubjectKeyId) + s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) + } + name := string(cert.RawSubject) + s.byName[name] = append(s.byName[name], n) +} + +// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates. +// It appends any certificates found to s and returns true if any certificates +// were successfully parsed. +// +// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set +// of root CAs in a format suitable for this function. +func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := ParseCertificate(block.Bytes) + if err != nil { + continue + } + + s.AddCert(cert) + ok = true + } + + return +} + +// Subjects returns a list of the DER-encoded subjects of +// all of the certificates in the pool. +func (s *CertPool) Subjects() (res [][]byte) { + res = make([][]byte, len(s.certs)) + for i, c := range s.certs { + res[i] = c.RawSubject + } + return +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go new file mode 100644 index 000000000..194c81bf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go @@ -0,0 +1,233 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// RFC 1423 describes the encryption of PEM blocks. The algorithm used to +// generate a key from the password was derived by looking at the OpenSSL +// implementation. + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/md5" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "strings" +) + +type PEMCipher int + +// Possible values for the EncryptPEMBlock encryption algorithm. +const ( + _ PEMCipher = iota + PEMCipherDES + PEMCipher3DES + PEMCipherAES128 + PEMCipherAES192 + PEMCipherAES256 +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, +}, { + cipher: PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, +}, { + cipher: PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, +}, +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte) []byte { + hash := md5.New() + out := make([]byte, c.keySize) + var digest []byte + + for i := 0; i < len(out); i += len(digest) { + hash.Reset() + hash.Write(digest) + hash.Write(password) + hash.Write(salt) + digest = hash.Sum(digest[:0]) + copy(out[i:], digest) + } + return out +} + +// IsEncryptedPEMBlock returns if the PEM block is password encrypted. +func IsEncryptedPEMBlock(b *pem.Block) bool { + _, ok := b.Headers["DEK-Info"] + return ok +} + +// IncorrectPasswordError is returned when an incorrect password is detected. +var IncorrectPasswordError = errors.New("x509: decryption password incorrect") + +// DecryptPEMBlock takes a password encrypted PEM block and the password used to +// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects +// the DEK-Info header to determine the algorithm used for decryption. If no +// DEK-Info header is present, an error is returned. If an incorrect password +// is detected an IncorrectPasswordError is returned. +func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) { + dek, ok := b.Headers["DEK-Info"] + if !ok { + return nil, errors.New("x509: no DEK-Info header in block") + } + + idx := strings.Index(dek, ",") + if idx == -1 { + return nil, errors.New("x509: malformed DEK-Info header") + } + + mode, hexIV := dek[:idx], dek[idx+1:] + ciph := cipherByName(mode) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv, err := hex.DecodeString(hexIV) + if err != nil { + return nil, err + } + if len(iv) != ciph.blockSize { + return nil, errors.New("x509: incorrect IV size") + } + + // Based on the OpenSSL implementation. The salt is the first 8 bytes + // of the initialization vector. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + + data := make([]byte, len(b.Bytes)) + dec := cipher.NewCBCDecrypter(block, iv) + dec.CryptBlocks(data, b.Bytes) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + dlen := len(data) + if dlen == 0 || dlen%ciph.blockSize != 0 { + return nil, errors.New("x509: invalid padding") + } + last := int(data[dlen-1]) + if dlen < last { + return nil, IncorrectPasswordError + } + if last == 0 || last > ciph.blockSize { + return nil, IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, IncorrectPasswordError + } + } + return data[:dlen-last], nil +} + +// EncryptPEMBlock returns a PEM block of the specified type holding the +// given DER-encoded data encrypted with the specified algorithm and +// password. +func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.New("x509: cannot generate IV: " + err.Error()) + } + // The salt is the first 8 bytes of the initialization vector, + // matching the key derivation in DecryptPEMBlock. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, section 1.1 + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + return &pem.Block{ + Type: blockType, + Headers: map[string]string{ + "Proc-Type": "4,ENCRYPTED", + "DEK-Info": ciph.name + "," + hex.EncodeToString(iv), + }, + Bytes: encrypted, + }, nil +} + +func cipherByName(name string) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.name == name { + return alg + } + } + return nil +} + +func cipherByKey(key PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs1.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs1.go new file mode 100644 index 000000000..f78cf589e --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs1.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/rsa" + // START CT CHANGES + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1" + // END CT CHANGES + "errors" + "math/big" +) + +// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key. +type pkcs1PrivateKey struct { + Version int + N *big.Int + E int + D *big.Int + P *big.Int + Q *big.Int + // We ignore these values, if present, because rsa will calculate them. + Dp *big.Int `asn1:"optional"` + Dq *big.Int `asn1:"optional"` + Qinv *big.Int `asn1:"optional"` + + AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"` +} + +type pkcs1AdditionalRSAPrime struct { + Prime *big.Int + + // We ignore these values because rsa will calculate them. + Exp *big.Int + Coeff *big.Int +} + +// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form. +func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err error) { + var priv pkcs1PrivateKey + rest, err := asn1.Unmarshal(der, &priv) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + if priv.Version > 1 { + return nil, errors.New("x509: unsupported private key version") + } + + if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative value") + } + + key = new(rsa.PrivateKey) + key.PublicKey = rsa.PublicKey{ + E: priv.E, + N: priv.N, + } + + key.D = priv.D + key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes)) + key.Primes[0] = priv.P + key.Primes[1] = priv.Q + for i, a := range priv.AdditionalPrimes { + if a.Prime.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative prime") + } + key.Primes[i+2] = a.Prime + // We ignore the other two values because rsa will calculate + // them as needed. + } + + err = key.Validate() + if err != nil { + return nil, err + } + key.Precompute() + + return +} + +// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form. +func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte { + key.Precompute() + + version := 0 + if len(key.Primes) > 2 { + version = 1 + } + + priv := pkcs1PrivateKey{ + Version: version, + N: key.N, + E: key.PublicKey.E, + D: key.D, + P: key.Primes[0], + Q: key.Primes[1], + Dp: key.Precomputed.Dp, + Dq: key.Precomputed.Dq, + Qinv: key.Precomputed.Qinv, + } + + priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues)) + for i, values := range key.Precomputed.CRTValues { + priv.AdditionalPrimes[i].Prime = key.Primes[2+i] + priv.AdditionalPrimes[i].Exp = values.Exp + priv.AdditionalPrimes[i].Coeff = values.Coeff + } + + b, _ := asn1.Marshal(priv) + return b +} + +// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type rsaPublicKey struct { + N *big.Int + E int +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs8.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs8.go new file mode 100644 index 000000000..7d8325c5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkcs8.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + // START CT CHANGES + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix" + // END CT CHANGES + "errors" + "fmt" +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. See +// http://www.rsa.com/rsalabs/node.asp?id=2130 and RFC5208. +func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, err + } + switch { + case privKey.Algo.Algorithm.Equal(oidPublicKeyRSA): + key, err = ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + default: + return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm) + } +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go new file mode 100644 index 000000000..e1e219233 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go @@ -0,0 +1,173 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkix contains shared, low level structures used for ASN.1 parsing +// and serialization of X.509 certificates, CRL and OCSP. +package pkix + +import ( + // START CT CHANGES + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1" + // END CT CHANGES + "math/big" + "time" +) + +// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.1.1.2. +type AlgorithmIdentifier struct { + Algorithm asn1.ObjectIdentifier + Parameters asn1.RawValue `asn1:"optional"` +} + +type RDNSequence []RelativeDistinguishedNameSET + +type RelativeDistinguishedNameSET []AttributeTypeAndValue + +// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in +// http://tools.ietf.org/html/rfc5280#section-4.1.2.4 +type AttributeTypeAndValue struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +// Extension represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2. +type Extension struct { + Id asn1.ObjectIdentifier + Critical bool `asn1:"optional"` + Value []byte +} + +// Name represents an X.509 distinguished name. This only includes the common +// elements of a DN. Additional elements in the name are ignored. +type Name struct { + Country, Organization, OrganizationalUnit []string + Locality, Province []string + StreetAddress, PostalCode []string + SerialNumber, CommonName string + + Names []AttributeTypeAndValue +} + +func (n *Name) FillFromRDNSequence(rdns *RDNSequence) { + for _, rdn := range *rdns { + if len(rdn) == 0 { + continue + } + atv := rdn[0] + n.Names = append(n.Names, atv) + value, ok := atv.Value.(string) + if !ok { + continue + } + + t := atv.Type + if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 { + switch t[3] { + case 3: + n.CommonName = value + case 5: + n.SerialNumber = value + case 6: + n.Country = append(n.Country, value) + case 7: + n.Locality = append(n.Locality, value) + case 8: + n.Province = append(n.Province, value) + case 9: + n.StreetAddress = append(n.StreetAddress, value) + case 10: + n.Organization = append(n.Organization, value) + case 11: + n.OrganizationalUnit = append(n.OrganizationalUnit, value) + case 17: + n.PostalCode = append(n.PostalCode, value) + } + } + } +} + +var ( + oidCountry = []int{2, 5, 4, 6} + oidOrganization = []int{2, 5, 4, 10} + oidOrganizationalUnit = []int{2, 5, 4, 11} + oidCommonName = []int{2, 5, 4, 3} + oidSerialNumber = []int{2, 5, 4, 5} + oidLocality = []int{2, 5, 4, 7} + oidProvince = []int{2, 5, 4, 8} + oidStreetAddress = []int{2, 5, 4, 9} + oidPostalCode = []int{2, 5, 4, 17} +) + +// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence +// and returns the new value. The relativeDistinguishedNameSET contains an +// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and +// search for AttributeTypeAndValue. +func appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence { + if len(values) == 0 { + return in + } + + s := make([]AttributeTypeAndValue, len(values)) + for i, value := range values { + s[i].Type = oid + s[i].Value = value + } + + return append(in, s) +} + +func (n Name) ToRDNSequence() (ret RDNSequence) { + ret = appendRDNs(ret, n.Country, oidCountry) + ret = appendRDNs(ret, n.Organization, oidOrganization) + ret = appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit) + ret = appendRDNs(ret, n.Locality, oidLocality) + ret = appendRDNs(ret, n.Province, oidProvince) + ret = appendRDNs(ret, n.StreetAddress, oidStreetAddress) + ret = appendRDNs(ret, n.PostalCode, oidPostalCode) + if len(n.CommonName) > 0 { + ret = appendRDNs(ret, []string{n.CommonName}, oidCommonName) + } + if len(n.SerialNumber) > 0 { + ret = appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber) + } + + return ret +} + +// CertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the +// signature. +type CertificateList struct { + TBSCertList TBSCertificateList + SignatureAlgorithm AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// HasExpired reports whether now is past the expiry time of certList. +func (certList *CertificateList) HasExpired(now time.Time) bool { + return now.After(certList.TBSCertList.NextUpdate) +} + +// TBSCertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. +type TBSCertificateList struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:2"` + Signature AlgorithmIdentifier + Issuer RDNSequence + ThisUpdate time.Time + NextUpdate time.Time + RevokedCertificates []RevokedCertificate `asn1:"optional"` + Extensions []Extension `asn1:"tag:0,optional,explicit"` +} + +// RevokedCertificate represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. +type RevokedCertificate struct { + SerialNumber *big.Int + RevocationTime time.Time + Extensions []Extension `asn1:"optional"` +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root.go new file mode 100644 index 000000000..8aae14e09 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root.go @@ -0,0 +1,17 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "sync" + +var ( + once sync.Once + systemRoots *CertPool +) + +func systemRootsPool() *CertPool { + once.Do(initSystemRoots) + return systemRoots +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_darwin.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_darwin.go new file mode 100644 index 000000000..3d3fa4e4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_darwin.go @@ -0,0 +1,83 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,cgo + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +#include + +// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates. +// +// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root +// certificates of the system. On failure, the function returns -1. +// +// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after +// we've consumed its content. +int FetchPEMRootsCTX509(CFDataRef *pemRoots) { + if (pemRoots == NULL) { + return -1; + } + + CFArrayRef certs = NULL; + OSStatus err = SecTrustCopyAnchorCertificates(&certs); + if (err != noErr) { + return -1; + } + + CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + int i, ncerts = CFArrayGetCount(certs); + for (i = 0; i < ncerts; i++) { + CFDataRef data = NULL; + SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i); + if (cert == NULL) { + continue; + } + + // Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport. + // Once we support weak imports via cgo we should prefer that, and fall back to this + // for older systems. + err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data); + if (err != noErr) { + continue; + } + + if (data != NULL) { + CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data)); + CFRelease(data); + } + } + + CFRelease(certs); + + *pemRoots = combinedData; + return 0; +} +*/ +import "C" +import "unsafe" + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func initSystemRoots() { + roots := NewCertPool() + + var data C.CFDataRef = nil + err := C.FetchPEMRootsCTX509(&data) + if err == -1 { + return + } + + defer C.CFRelease(C.CFTypeRef(data)) + buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data))) + roots.AppendCertsFromPEM(buf) + systemRoots = roots +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_plan9.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_plan9.go new file mode 100644 index 000000000..9965caade --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_plan9.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package x509 + +import "io/ioutil" + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/sys/lib/tls/ca.pem", +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func initSystemRoots() { + roots := NewCertPool() + for _, file := range certFiles { + data, err := ioutil.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + systemRoots = roots + return + } + } + + // All of the files failed to load. systemRoots will be nil which will + // trigger a specific error at verification time. +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_stub.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_stub.go new file mode 100644 index 000000000..4c742ccc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_stub.go @@ -0,0 +1,14 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,!cgo + +package x509 + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func initSystemRoots() { +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_unix.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_unix.go new file mode 100644 index 000000000..324f855b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_unix.go @@ -0,0 +1,37 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux openbsd netbsd + +package x509 + +import "io/ioutil" + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL + "/etc/ssl/ca-bundle.pem", // OpenSUSE + "/etc/ssl/cert.pem", // OpenBSD + "/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func initSystemRoots() { + roots := NewCertPool() + for _, file := range certFiles { + data, err := ioutil.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + systemRoots = roots + return + } + } + + // All of the files failed to load. systemRoots will be nil which will + // trigger a specific error at verification time. +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_windows.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_windows.go new file mode 100644 index 000000000..81018b78f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/root_windows.go @@ -0,0 +1,229 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "errors" + "syscall" + "unsafe" +) + +// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory +// certificate store containing itself and all of the intermediate certificates specified +// in the opts.Intermediates CertPool. +// +// A pointer to the in-memory store is available in the returned CertContext's Store field. +// The store is automatically freed when the CertContext is freed using +// syscall.CertFreeCertificateContext. +func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) { + var storeCtx *syscall.CertContext + + leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw))) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(leafCtx) + + handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(handle, 0) + + err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx) + if err != nil { + return nil, err + } + + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw))) + if err != nil { + return nil, err + } + + err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil) + syscall.CertFreeCertificateContext(ctx) + if err != nil { + return nil, err + } + } + } + + return storeCtx, nil +} + +// extractSimpleChain extracts the final certificate chain from a CertSimpleChain. +func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) { + if simpleChain == nil || count == 0 { + return nil, errors.New("x509: invalid simple chain") + } + + simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:] + lastChain := simpleChains[count-1] + elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:] + for i := 0; i < int(lastChain.NumElements); i++ { + // Copy the buf, since ParseCertificate does not create its own copy. + cert := elements[i].CertContext + encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:] + buf := make([]byte, cert.Length) + copy(buf, encodedCert[:]) + parsedCert, err := ParseCertificate(buf) + if err != nil { + return nil, err + } + chain = append(chain, parsedCert) + } + + return chain, nil +} + +// checkChainTrustStatus checks the trust status of the certificate chain, translating +// any errors it finds into Go errors in the process. +func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error { + if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR { + status := chainCtx.TrustStatus.ErrorStatus + switch status { + case syscall.CERT_TRUST_IS_NOT_TIME_VALID: + return CertificateInvalidError{c, Expired} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + return nil +} + +// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for +// use as a certificate chain for a SSL/TLS server. +func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error { + servernamep, err := syscall.UTF16PtrFromString(opts.DNSName) + if err != nil { + return err + } + sslPara := &syscall.SSLExtraCertChainPolicyPara{ + AuthType: syscall.AUTHTYPE_SERVER, + ServerName: servernamep, + } + sslPara.Size = uint32(unsafe.Sizeof(*sslPara)) + + para := &syscall.CertChainPolicyPara{ + ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)), + } + para.Size = uint32(unsafe.Sizeof(*para)) + + status := syscall.CertChainPolicyStatus{} + err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status) + if err != nil { + return err + } + + // TODO(mkrautz): use the lChainIndex and lElementIndex fields + // of the CertChainPolicyStatus to provide proper context, instead + // using c. + if status.Error != 0 { + switch status.Error { + case syscall.CERT_E_EXPIRED: + return CertificateInvalidError{c, Expired} + case syscall.CERT_E_CN_NO_MATCH: + return HostnameError{c, opts.DNSName} + case syscall.CERT_E_UNTRUSTEDROOT: + return UnknownAuthorityError{c, nil, nil} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + + return nil +} + +// systemVerify is like Verify, except that it uses CryptoAPI calls +// to build certificate chains and verify them. +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + hasDNSName := opts != nil && len(opts.DNSName) > 0 + + storeCtx, err := createStoreContext(c, opts) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(storeCtx) + + para := new(syscall.CertChainPara) + para.Size = uint32(unsafe.Sizeof(*para)) + + // If there's a DNSName set in opts, assume we're verifying + // a certificate from a TLS server. + if hasDNSName { + oids := []*byte{ + &syscall.OID_PKIX_KP_SERVER_AUTH[0], + // Both IE and Chrome allow certificates with + // Server Gated Crypto as well. Some certificates + // in the wild require them. + &syscall.OID_SERVER_GATED_CRYPTO[0], + &syscall.OID_SGC_NETSCAPE[0], + } + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR + para.RequestedUsage.Usage.Length = uint32(len(oids)) + para.RequestedUsage.Usage.UsageIdentifiers = &oids[0] + } else { + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND + para.RequestedUsage.Usage.Length = 0 + para.RequestedUsage.Usage.UsageIdentifiers = nil + } + + var verifyTime *syscall.Filetime + if opts != nil && !opts.CurrentTime.IsZero() { + ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano()) + verifyTime = &ft + } + + // CertGetCertificateChain will traverse Windows's root stores + // in an attempt to build a verified certificate chain. Once + // it has found a verified chain, it stops. MSDN docs on + // CERT_CHAIN_CONTEXT: + // + // When a CERT_CHAIN_CONTEXT is built, the first simple chain + // begins with an end certificate and ends with a self-signed + // certificate. If that self-signed certificate is not a root + // or otherwise trusted certificate, an attempt is made to + // build a new chain. CTLs are used to create the new chain + // beginning with the self-signed certificate from the original + // chain as the end certificate of the new chain. This process + // continues building additional simple chains until the first + // self-signed certificate is a trusted certificate or until + // an additional simple chain cannot be built. + // + // The result is that we'll only get a single trusted chain to + // return to our caller. + var chainCtx *syscall.CertChainContext + err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateChain(chainCtx) + + err = checkChainTrustStatus(c, chainCtx) + if err != nil { + return nil, err + } + + if hasDNSName { + err = checkChainSSLServerPolicy(c, chainCtx, opts) + if err != nil { + return nil, err + } + } + + chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount)) + if err != nil { + return nil, err + } + + chains = append(chains, chain) + + return chains, nil +} + +func initSystemRoots() { +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/sec1.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/sec1.go new file mode 100644 index 000000000..7c1a4c25e --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/sec1.go @@ -0,0 +1,85 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + // START CT CHANGES + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1" + // START CT CHANGES + "errors" + "fmt" + "math/big" +) + +const ecPrivKeyVersion = 1 + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// RFC5915 +// SEC1 - http://www.secg.org/download/aid-780/sec1-v2.pdf +// Per RFC5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +func ParseECPrivateKey(der []byte) (key *ecdsa.PrivateKey, err error) { + return parseECPrivateKey(nil, der) +} + +// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format. +func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) { + oid, ok := oidFromNamedCurve(key.Curve) + if !ok { + return nil, errors.New("x509: unknown elliptic curve") + } + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: key.D.Bytes(), + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} + +// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +// The OID for the named curve may be provided from another source (such as +// the PKCS8 container) - if it is provided then use this instead of the OID +// that may exist in the EC private key structure. +func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) { + var privKey ecPrivateKey + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("x509: failed to parse EC private key: " + err.Error()) + } + if privKey.Version != ecPrivKeyVersion { + return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) + } + + var curve elliptic.Curve + if namedCurveOID != nil { + curve = namedCurveFromOID(*namedCurveOID) + } else { + curve = namedCurveFromOID(privKey.NamedCurveOID) + } + if curve == nil { + return nil, errors.New("x509: unknown elliptic curve") + } + + k := new(big.Int).SetBytes(privKey.PrivateKey) + if k.Cmp(curve.Params().N) >= 0 { + return nil, errors.New("x509: invalid elliptic curve private key value") + } + priv := new(ecdsa.PrivateKey) + priv.Curve = curve + priv.D = k + priv.X, priv.Y = curve.ScalarBaseMult(privKey.PrivateKey) + + return priv, nil +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/verify.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/verify.go new file mode 100644 index 000000000..e82dbc12b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/verify.go @@ -0,0 +1,476 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + "unicode/utf8" +) + +type InvalidReason int + +const ( + // NotAuthorizedToSign results when a certificate is signed by another + // which isn't marked as a CA certificate. + NotAuthorizedToSign InvalidReason = iota + // Expired results when a certificate has expired, based on the time + // given in the VerifyOptions. + Expired + // CANotAuthorizedForThisName results when an intermediate or root + // certificate has a name constraint which doesn't include the name + // being checked. + CANotAuthorizedForThisName + // TooManyIntermediates results when a path length constraint is + // violated. + TooManyIntermediates + // IncompatibleUsage results when the certificate's key usage indicates + // that it may only be used for a different purpose. + IncompatibleUsage +) + +// CertificateInvalidError results when an odd error occurs. Users of this +// library probably want to handle all these errors uniformly. +type CertificateInvalidError struct { + Cert *Certificate + Reason InvalidReason +} + +func (e CertificateInvalidError) Error() string { + switch e.Reason { + case NotAuthorizedToSign: + return "x509: certificate is not authorized to sign other certificates" + case Expired: + return "x509: certificate has expired or is not yet valid" + case CANotAuthorizedForThisName: + return "x509: a root or intermediate certificate is not authorized to sign in this domain" + case TooManyIntermediates: + return "x509: too many intermediates for path length constraint" + case IncompatibleUsage: + return "x509: certificate specifies an incompatible key usage" + } + return "x509: unknown error" +} + +// HostnameError results when the set of authorized names doesn't match the +// requested name. +type HostnameError struct { + Certificate *Certificate + Host string +} + +func (h HostnameError) Error() string { + c := h.Certificate + + var valid string + if ip := net.ParseIP(h.Host); ip != nil { + // Trying to validate an IP + if len(c.IPAddresses) == 0 { + return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" + } + for _, san := range c.IPAddresses { + if len(valid) > 0 { + valid += ", " + } + valid += san.String() + } + } else { + if len(c.DNSNames) > 0 { + valid = strings.Join(c.DNSNames, ", ") + } else { + valid = c.Subject.CommonName + } + } + return "x509: certificate is valid for " + valid + ", not " + h.Host +} + +// UnknownAuthorityError results when the certificate issuer is unknown +type UnknownAuthorityError struct { + cert *Certificate + // hintErr contains an error that may be helpful in determining why an + // authority wasn't found. + hintErr error + // hintCert contains a possible authority certificate that was rejected + // because of the error in hintErr. + hintCert *Certificate +} + +func (e UnknownAuthorityError) Error() string { + s := "x509: certificate signed by unknown authority" + if e.hintErr != nil { + certName := e.hintCert.Subject.CommonName + if len(certName) == 0 { + if len(e.hintCert.Subject.Organization) > 0 { + certName = e.hintCert.Subject.Organization[0] + } + certName = "serial:" + e.hintCert.SerialNumber.String() + } + s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName) + } + return s +} + +// SystemRootsError results when we fail to load the system root certificates. +type SystemRootsError struct { +} + +func (e SystemRootsError) Error() string { + return "x509: failed to load system roots and no roots provided" +} + +// VerifyOptions contains parameters for Certificate.Verify. It's a structure +// because other PKIX verification APIs have ended up needing many options. +type VerifyOptions struct { + DNSName string + Intermediates *CertPool + Roots *CertPool // if nil, the system roots are used + CurrentTime time.Time // if zero, the current time is used + DisableTimeChecks bool + // KeyUsage specifies which Extended Key Usage values are acceptable. + // An empty list means ExtKeyUsageServerAuth. Key usage is considered a + // constraint down the chain which mirrors Windows CryptoAPI behaviour, + // but not the spec. To accept any key usage, include ExtKeyUsageAny. + KeyUsages []ExtKeyUsage +} + +const ( + leafCertificate = iota + intermediateCertificate + rootCertificate +) + +// isValid performs validity checks on the c. +func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if !opts.DisableTimeChecks { + now := opts.CurrentTime + if now.IsZero() { + now = time.Now() + } + if now.Before(c.NotBefore) || now.After(c.NotAfter) { + return CertificateInvalidError{c, Expired} + } + } + + if len(c.PermittedDNSDomains) > 0 { + ok := false + for _, domain := range c.PermittedDNSDomains { + if opts.DNSName == domain || + (strings.HasSuffix(opts.DNSName, domain) && + len(opts.DNSName) >= 1+len(domain) && + opts.DNSName[len(opts.DNSName)-len(domain)-1] == '.') { + ok = true + break + } + } + + if !ok { + return CertificateInvalidError{c, CANotAuthorizedForThisName} + } + } + + // KeyUsage status flags are ignored. From Engineering Security, Peter + // Gutmann: A European government CA marked its signing certificates as + // being valid for encryption only, but no-one noticed. Another + // European CA marked its signature keys as not being valid for + // signatures. A different CA marked its own trusted root certificate + // as being invalid for certificate signing. Another national CA + // distributed a certificate to be used to encrypt data for the + // country’s tax authority that was marked as only being usable for + // digital signatures but not for encryption. Yet another CA reversed + // the order of the bit flags in the keyUsage due to confusion over + // encoding endianness, essentially setting a random keyUsage in + // certificates that it issued. Another CA created a self-invalidating + // certificate by adding a certificate policy statement stipulating + // that the certificate had to be used strictly as specified in the + // keyUsage, and a keyUsage containing a flag indicating that the RSA + // encryption key could only be used for Diffie-Hellman key agreement. + + if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) { + return CertificateInvalidError{c, NotAuthorizedToSign} + } + + if c.BasicConstraintsValid && c.MaxPathLen >= 0 { + numIntermediates := len(currentChain) - 1 + if numIntermediates > c.MaxPathLen { + return CertificateInvalidError{c, TooManyIntermediates} + } + } + + return nil +} + +// Verify attempts to verify c by building one or more chains from c to a +// certificate in opts.Roots, using certificates in opts.Intermediates if +// needed. If successful, it returns one or more chains where the first +// element of the chain is c and the last element is from opts.Roots. +// +// WARNING: this doesn't do any revocation checking. +func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { + // Use Windows's own verification and chain building. + if opts.Roots == nil && runtime.GOOS == "windows" { + return c.systemVerify(&opts) + } + + if opts.Roots == nil { + opts.Roots = systemRootsPool() + if opts.Roots == nil { + return nil, SystemRootsError{} + } + } + + err = c.isValid(leafCertificate, nil, &opts) + if err != nil { + return + } + + if len(opts.DNSName) > 0 { + err = c.VerifyHostname(opts.DNSName) + if err != nil { + return + } + } + + candidateChains, err := c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts) + if err != nil { + return + } + + keyUsages := opts.KeyUsages + if len(keyUsages) == 0 { + keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} + } + + // If any key usage is acceptable then we're done. + for _, usage := range keyUsages { + if usage == ExtKeyUsageAny { + chains = candidateChains + return + } + } + + for _, candidate := range candidateChains { + if checkChainForKeyUsage(candidate, keyUsages) { + chains = append(chains, candidate) + } + } + + if len(chains) == 0 { + err = CertificateInvalidError{c, IncompatibleUsage} + } + + return +} + +func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = cert + return n +} + +func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) { + possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c) + for _, rootNum := range possibleRoots { + root := opts.Roots.certs[rootNum] + err = root.isValid(rootCertificate, currentChain, opts) + if err != nil { + continue + } + chains = append(chains, appendToFreshChain(currentChain, root)) + } + + possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c) +nextIntermediate: + for _, intermediateNum := range possibleIntermediates { + intermediate := opts.Intermediates.certs[intermediateNum] + for _, cert := range currentChain { + if cert == intermediate { + continue nextIntermediate + } + } + err = intermediate.isValid(intermediateCertificate, currentChain, opts) + if err != nil { + continue + } + var childChains [][]*Certificate + childChains, ok := cache[intermediateNum] + if !ok { + childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts) + cache[intermediateNum] = childChains + } + chains = append(chains, childChains...) + } + + if len(chains) > 0 { + err = nil + } + + if len(chains) == 0 && err == nil { + hintErr := rootErr + hintCert := failedRoot + if hintErr == nil { + hintErr = intermediateErr + hintCert = failedIntermediate + } + err = UnknownAuthorityError{c, hintErr, hintCert} + } + + return +} + +func matchHostnames(pattern, host string) bool { + if len(pattern) == 0 || len(host) == 0 { + return false + } + + patternParts := strings.Split(pattern, ".") + hostParts := strings.Split(host, ".") + + if len(patternParts) != len(hostParts) { + return false + } + + for i, patternPart := range patternParts { + if patternPart == "*" { + continue + } + if patternPart != hostParts[i] { + return false + } + } + + return true +} + +// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use +// an explicitly ASCII function to avoid any sharp corners resulting from +// performing Unicode operations on DNS labels. +func toLowerCaseASCII(in string) string { + // If the string is already lower-case then there's nothing to do. + isAlreadyLowerCase := true + for _, c := range in { + if c == utf8.RuneError { + // If we get a UTF-8 error then there might be + // upper-case ASCII bytes in the invalid sequence. + isAlreadyLowerCase = false + break + } + if 'A' <= c && c <= 'Z' { + isAlreadyLowerCase = false + break + } + } + + if isAlreadyLowerCase { + return in + } + + out := []byte(in) + for i, c := range out { + if 'A' <= c && c <= 'Z' { + out[i] += 'a' - 'A' + } + } + return string(out) +} + +// VerifyHostname returns nil if c is a valid certificate for the named host. +// Otherwise it returns an error describing the mismatch. +func (c *Certificate) VerifyHostname(h string) error { + // IP addresses may be written in [ ]. + candidateIP := h + if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' { + candidateIP = h[1 : len(h)-1] + } + if ip := net.ParseIP(candidateIP); ip != nil { + // We only match IP addresses against IP SANs. + // https://tools.ietf.org/html/rfc6125#appendix-B.2 + for _, candidate := range c.IPAddresses { + if ip.Equal(candidate) { + return nil + } + } + return HostnameError{c, candidateIP} + } + + lowered := toLowerCaseASCII(h) + + if len(c.DNSNames) > 0 { + for _, match := range c.DNSNames { + if matchHostnames(toLowerCaseASCII(match), lowered) { + return nil + } + } + // If Subject Alt Name is given, we ignore the common name. + } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) { + return nil + } + + return HostnameError{c, h} +} + +func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool { + usages := make([]ExtKeyUsage, len(keyUsages)) + copy(usages, keyUsages) + + if len(chain) == 0 { + return false + } + + usagesRemaining := len(usages) + + // We walk down the list and cross out any usages that aren't supported + // by each certificate. If we cross out all the usages, then the chain + // is unacceptable. + + for i := len(chain) - 1; i >= 0; i-- { + cert := chain[i] + if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 { + // The certificate doesn't have any extended key usage specified. + continue + } + + for _, usage := range cert.ExtKeyUsage { + if usage == ExtKeyUsageAny { + // The certificate is explicitly good for any usage. + continue + } + } + + const invalidUsage ExtKeyUsage = -1 + + NextRequestedUsage: + for i, requestedUsage := range usages { + if requestedUsage == invalidUsage { + continue + } + + for _, usage := range cert.ExtKeyUsage { + if requestedUsage == usage { + continue NextRequestedUsage + } else if requestedUsage == ExtKeyUsageServerAuth && + (usage == ExtKeyUsageNetscapeServerGatedCrypto || + usage == ExtKeyUsageMicrosoftServerGatedCrypto) { + // In order to support COMODO + // certificate chains, we have to + // accept Netscape or Microsoft SGC + // usages as equal to ServerAuth. + continue NextRequestedUsage + } + } + + usages[i] = invalidUsage + usagesRemaining-- + if usagesRemaining == 0 { + return false + } + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/x509.go b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/x509.go new file mode 100644 index 000000000..df0823b9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/x509.go @@ -0,0 +1,1622 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 parses X.509-encoded keys and certificates. +// +// START CT CHANGES +// This is a fork of the go library crypto/x509 package, it's more relaxed +// about certificates that it'll accept, and exports the TBSCertificate +// structure. +// END CT CHANGES +package x509 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + // START CT CHANGES + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/asn1" + "github.com/letsencrypt/boulder/Godeps/_workspace/src/github.com/google/certificate-transparency/go/x509/pkix" + // END CT CHANGES + "encoding/pem" + "errors" + // START CT CHANGES + "fmt" + // END CT CHANGES + "io" + "math/big" + "net" + "time" +) + +// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo +// in RFC 3280. +type pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString +} + +// ParsePKIXPublicKey parses a DER encoded public key. These values are +// typically found in PEM blocks with "BEGIN PUBLIC KEY". +func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) { + var pki publicKeyInfo + if _, err = asn1.Unmarshal(derBytes, &pki); err != nil { + return + } + algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm) + if algo == UnknownPublicKeyAlgorithm { + return nil, errors.New("x509: unknown public key algorithm") + } + return parsePublicKey(algo, &pki) +} + +func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(rsaPublicKey{ + N: pub.N, + E: pub.E, + }) + publicKeyAlgorithm.Algorithm = oidPublicKeyRSA + // This is a NULL parameters value which is technically + // superfluous, but most other code includes it and, by + // doing this, we match their public key hashes. + publicKeyAlgorithm.Parameters = asn1.RawValue{ + Tag: 5, + } + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := oidFromNamedCurve(pub.Curve) + if !ok { + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") + } + publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA + var paramBytes []byte + paramBytes, err = asn1.Marshal(oid) + if err != nil { + return + } + publicKeyAlgorithm.Parameters.FullBytes = paramBytes + default: + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only RSA and ECDSA public keys supported") + } + + return publicKeyBytes, publicKeyAlgorithm, nil +} + +// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format. +func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + var err error + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + pkix := pkixPublicKey{ + Algo: publicKeyAlgorithm, + BitString: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: 8 * len(publicKeyBytes), + }, + } + + ret, _ := asn1.Marshal(pkix) + return ret, nil +} + +// These structures reflect the ASN.1 structure of X.509 certificates.: + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:1,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +type dsaAlgorithmParameters struct { + P, Q, G *big.Int +} + +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// RFC 5280, 4.2.1.1 +type authKeyId struct { + Id []byte `asn1:"optional,tag:0"` +} + +type SignatureAlgorithm int + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + MD2WithRSA + MD5WithRSA + SHA1WithRSA + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 + DSAWithSHA256 + ECDSAWithSHA1 + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 +) + +type PublicKeyAlgorithm int + +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA + ECDSA +) + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 } +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm { + switch { + case oid.Equal(oidSignatureMD2WithRSA): + return MD2WithRSA + case oid.Equal(oidSignatureMD5WithRSA): + return MD5WithRSA + case oid.Equal(oidSignatureSHA1WithRSA): + return SHA1WithRSA + case oid.Equal(oidSignatureSHA256WithRSA): + return SHA256WithRSA + case oid.Equal(oidSignatureSHA384WithRSA): + return SHA384WithRSA + case oid.Equal(oidSignatureSHA512WithRSA): + return SHA512WithRSA + case oid.Equal(oidSignatureDSAWithSHA1): + return DSAWithSHA1 + case oid.Equal(oidSignatureDSAWithSHA256): + return DSAWithSHA256 + case oid.Equal(oidSignatureECDSAWithSHA1): + return ECDSAWithSHA1 + case oid.Equal(oidSignatureECDSAWithSHA256): + return ECDSAWithSHA256 + case oid.Equal(oidSignatureECDSAWithSHA384): + return ECDSAWithSHA384 + case oid.Equal(oidSignatureECDSAWithSHA512): + return ECDSAWithSHA512 + } + return UnknownSignatureAlgorithm +} + +// RFC 3279, 2.3 Public Key Algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// rsadsi(113549) pkcs(1) 1 } +// +// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } +// +// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// x9-57(10040) x9cm(4) 1 } +// +// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters +// +// id-ecPublicKey OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } +var ( + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { + switch { + case oid.Equal(oidPublicKeyRSA): + return RSA + case oid.Equal(oidPublicKeyDSA): + return DSA + case oid.Equal(oidPublicKeyECDSA): + return ECDSA + } + return UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// NB: secp256r1 is equivalent to prime256v1 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { + switch { + case oid.Equal(oidNamedCurveP224): + return elliptic.P224() + case oid.Equal(oidNamedCurveP256): + return elliptic.P256() + case oid.Equal(oidNamedCurveP384): + return elliptic.P384() + case oid.Equal(oidNamedCurveP521): + return elliptic.P521() + } + return nil +} + +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { + for _, pair := range extKeyUsageOIDs { + if eku == pair.extKeyUsage { + return pair.oid, true + } + } + return +} + +// A Certificate represents an X.509 certificate. +type Certificate struct { + Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). + RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject + RawIssuer []byte // DER encoded Issuer + + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Version int + SerialNumber *big.Int + Issuer pkix.Name + Subject pkix.Name + NotBefore, NotAfter time.Time // Validity bounds. + KeyUsage KeyUsage + + // Extensions contains raw X.509 extensions. When parsing certificates, + // this can be used to extract non-critical extensions that are not + // parsed by this package. When marshaling certificates, the Extensions + // field is ignored, see ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled certificates. Values override any extensions that would + // otherwise be produced based on the other fields. The ExtraExtensions + // field is not populated when parsing certificates, see Extensions. + ExtraExtensions []pkix.Extension + + ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. + UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. + + BasicConstraintsValid bool // if true then the next two fields are valid. + IsCA bool + MaxPathLen int + + SubjectKeyId []byte + AuthorityKeyId []byte + + // RFC 5280, 4.2.2.1 (Authority Information Access) + OCSPServer []string + IssuingCertificateURL []string + + // Subject Alternate Name values + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + + // Name constraints + PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. + PermittedDNSDomains []string + + // CRL Distribution Points + CRLDistributionPoints []string + + PolicyIdentifiers []asn1.ObjectIdentifier +} + +// ErrUnsupportedAlgorithm results from attempting to perform an operation that +// involves algorithms that are not currently implemented. +var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") + +// ConstraintViolationError results when a requested usage is not permitted by +// a certificate. For example: checking a signature when the public key isn't a +// certificate signing key. +type ConstraintViolationError struct{} + +func (ConstraintViolationError) Error() string { + return "x509: invalid signature: parent certificate cannot sign this kind of certificate" +} + +func (c *Certificate) Equal(other *Certificate) bool { + return bytes.Equal(c.Raw, other.Raw) +} + +// Entrust have a broken root certificate (CN=Entrust.net Certification +// Authority (2048)) which isn't marked as a CA certificate and is thus invalid +// according to PKIX. +// We recognise this certificate by its SubjectPublicKeyInfo and exempt it +// from the Basic Constraints requirement. +// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869 +// +// TODO(agl): remove this hack once their reissued root is sufficiently +// widespread. +var entrustBrokenSPKI = []byte{ + 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05, + 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3, + 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff, + 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10, + 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff, + 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50, + 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8, + 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6, + 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04, + 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c, + 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65, + 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38, + 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda, + 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9, + 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7, + 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37, + 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde, + 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6, + 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c, + 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a, + 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5, + 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2, + 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc, + 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4, + 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b, + 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e, + 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48, + 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05, + 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09, + 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2, + 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d, + 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68, + 0x55, 0x02, 0x03, 0x01, 0x00, 0x01, +} + +// CheckSignatureFrom verifies that the signature on c is a valid signature +// from parent. +func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) { + // RFC 5280, 4.2.1.9: + // "If the basic constraints extension is not present in a version 3 + // certificate, or the extension is present but the cA boolean is not + // asserted, then the certified public key MUST NOT be used to verify + // certificate signatures." + // (except for Entrust, see comment above entrustBrokenSPKI) + if (parent.Version == 3 && !parent.BasicConstraintsValid || + parent.BasicConstraintsValid && !parent.IsCA) && + !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) { + return ConstraintViolationError{} + } + + if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { + return ConstraintViolationError{} + } + + if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { + return ErrUnsupportedAlgorithm + } + + // TODO(agl): don't ignore the path length constraint. + + return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// c's public key. +func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) { + var hashType crypto.Hash + + switch algo { + case SHA1WithRSA, DSAWithSHA1, ECDSAWithSHA1: + hashType = crypto.SHA1 + case SHA256WithRSA, DSAWithSHA256, ECDSAWithSHA256: + hashType = crypto.SHA256 + case SHA384WithRSA, ECDSAWithSHA384: + hashType = crypto.SHA384 + case SHA512WithRSA, ECDSAWithSHA512: + hashType = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if !hashType.Available() { + return ErrUnsupportedAlgorithm + } + h := hashType.New() + + h.Write(signed) + digest := h.Sum(nil) + + switch pub := c.PublicKey.(type) { + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) + case *dsa.PublicKey: + dsaSig := new(dsaSignature) + if _, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return + case *ecdsa.PublicKey: + ecdsaSig := new(ecdsaSignature) + if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + } + return ErrUnsupportedAlgorithm +} + +// CheckCRLSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err error) { + algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} + +// START CT CHANGES +type UnhandledCriticalExtension struct { + ID asn1.ObjectIdentifier +} + +func (h UnhandledCriticalExtension) Error() string { + return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID) +} + +// END CT CHANGES + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// RFC 5280 4.2.1.4 +type policyInformation struct { + Policy asn1.ObjectIdentifier + // policyQualifiers omitted +} + +// RFC 5280, 4.2.1.10 +type nameConstraints struct { + Permitted []generalSubtree `asn1:"optional,tag:0"` + Excluded []generalSubtree `asn1:"optional,tag:1"` +} + +type generalSubtree struct { + Name string `asn1:"tag:2,optional,ia5"` +} + +// RFC 5280, 4.2.2.1 +type authorityInfoAccess struct { + Method asn1.ObjectIdentifier + Location asn1.RawValue +} + +// RFC 5280, 4.2.1.14 +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) { + asn1Data := keyData.PublicKey.RightAlign() + switch algo { + case RSA: + p := new(rsaPublicKey) + _, err := asn1.Unmarshal(asn1Data, p) + if err != nil { + return nil, err + } + + if p.N.Sign() <= 0 { + return nil, errors.New("x509: RSA modulus is not a positive number") + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case DSA: + var p *big.Int + _, err := asn1.Unmarshal(asn1Data, &p) + if err != nil { + return nil, err + } + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(dsaAlgorithmParameters) + _, err = asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + pub := &dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: params.P, + Q: params.Q, + G: params.G, + }, + Y: p, + } + return pub, nil + case ECDSA: + paramsData := keyData.Algorithm.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + _, err := asn1.Unmarshal(paramsData, namedCurveOID) + if err != nil { + return nil, err + } + namedCurve := namedCurveFromOID(*namedCurveOID) + if namedCurve == nil { + return nil, errors.New("x509: unsupported elliptic curve") + } + x, y := elliptic.Unmarshal(namedCurve, asn1Data) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + default: + return nil, nil + } +} + +// START CT CHANGES + +// NonFatalErrors is an error type which can hold a number of other errors. +// It's used to collect a range of non-fatal errors which occur while parsing +// a certificate, that way we can still match on certs which technically are +// invalid. +type NonFatalErrors struct { + Errors []error +} + +// Adds an error to the list of errors contained by NonFatalErrors. +func (e *NonFatalErrors) AddError(err error) { + e.Errors = append(e.Errors, err) +} + +// Returns a string consisting of the values of Error() from all of the errors +// contained in |e| +func (e NonFatalErrors) Error() string { + r := "NonFatalErrors: " + for _, err := range e.Errors { + r += err.Error() + "; " + } + return r +} + +// Returns true if |e| contains at least one error +func (e *NonFatalErrors) HasError() bool { + return len(e.Errors) > 0 +} + +// END CT CHANGES + +func parseCertificate(in *certificate) (*Certificate, error) { + // START CT CHANGES + var nfe NonFatalErrors + // END CT CHANGES + + out := new(Certificate) + out.Raw = in.Raw + out.RawTBSCertificate = in.TBSCertificate.Raw + out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw + out.RawSubject = in.TBSCertificate.Subject.FullBytes + out.RawIssuer = in.TBSCertificate.Issuer.FullBytes + + out.Signature = in.SignatureValue.RightAlign() + out.SignatureAlgorithm = + getSignatureAlgorithmFromOID(in.TBSCertificate.SignatureAlgorithm.Algorithm) + + out.PublicKeyAlgorithm = + getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) + var err error + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey) + if err != nil { + return nil, err + } + + if in.TBSCertificate.SerialNumber.Sign() < 0 { + // START CT CHANGES + nfe.AddError(errors.New("x509: negative serial number")) + // END CT CHANGES + } + + out.Version = in.TBSCertificate.Version + 1 + out.SerialNumber = in.TBSCertificate.SerialNumber + + var issuer, subject pkix.RDNSequence + if _, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { + return nil, err + } + if _, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { + return nil, err + } + + out.Issuer.FillFromRDNSequence(&issuer) + out.Subject.FillFromRDNSequence(&subject) + + out.NotBefore = in.TBSCertificate.Validity.NotBefore + out.NotAfter = in.TBSCertificate.Validity.NotAfter + + for _, e := range in.TBSCertificate.Extensions { + out.Extensions = append(out.Extensions, e) + + if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 { + switch e.Id[3] { + case 15: + // RFC 5280, 4.2.1.3 + var usageBits asn1.BitString + _, err := asn1.Unmarshal(e.Value, &usageBits) + + if err == nil { + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + out.KeyUsage = KeyUsage(usage) + continue + } + case 19: + // RFC 5280, 4.2.1.9 + var constraints basicConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + + if err == nil { + out.BasicConstraintsValid = true + out.IsCA = constraints.IsCA + out.MaxPathLen = constraints.MaxPathLen + continue + } + case 17: + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + _, err := asn1.Unmarshal(e.Value, &seq) + if err != nil { + return nil, err + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return nil, asn1.StructuralError{Msg: "bad SAN sequence"} + } + + parsedName := false + + rest := seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return nil, err + } + switch v.Tag { + case 1: + out.EmailAddresses = append(out.EmailAddresses, string(v.Bytes)) + parsedName = true + case 2: + out.DNSNames = append(out.DNSNames, string(v.Bytes)) + parsedName = true + case 7: + switch len(v.Bytes) { + case net.IPv4len, net.IPv6len: + out.IPAddresses = append(out.IPAddresses, v.Bytes) + default: + // START CT CHANGES + nfe.AddError(fmt.Errorf("x509: certificate contained IP address of length %d : %v", len(v.Bytes), v.Bytes)) + // END CT CHANGES + } + } + } + + if parsedName { + continue + } + // If we didn't parse any of the names then we + // fall through to the critical check below. + + case 30: + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + var constraints nameConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + if err != nil { + return nil, err + } + + if len(constraints.Excluded) > 0 && e.Critical { + // START CT CHANGES + nfe.AddError(UnhandledCriticalExtension{e.Id}) + // END CT CHANGES + } + + for _, subtree := range constraints.Permitted { + if len(subtree.Name) == 0 { + if e.Critical { + // START CT CHANGES + nfe.AddError(UnhandledCriticalExtension{e.Id}) + // END CT CHANGES + } + continue + } + out.PermittedDNSDomains = append(out.PermittedDNSDomains, subtree.Name) + } + continue + + case 31: + // RFC 5280, 4.2.1.14 + + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + + var cdp []distributionPoint + _, err := asn1.Unmarshal(e.Value, &cdp) + if err != nil { + return nil, err + } + + for _, dp := range cdp { + var n asn1.RawValue + _, err = asn1.Unmarshal(dp.DistributionPoint.FullName.Bytes, &n) + if err != nil { + return nil, err + } + + if n.Tag == 6 { + out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(n.Bytes)) + } + } + continue + + case 35: + // RFC 5280, 4.2.1.1 + var a authKeyId + _, err = asn1.Unmarshal(e.Value, &a) + if err != nil { + return nil, err + } + out.AuthorityKeyId = a.Id + continue + + case 37: + // RFC 5280, 4.2.1.12. Extended Key Usage + + // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } + // + // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId + // + // KeyPurposeId ::= OBJECT IDENTIFIER + + var keyUsage []asn1.ObjectIdentifier + _, err = asn1.Unmarshal(e.Value, &keyUsage) + if err != nil { + return nil, err + } + + for _, u := range keyUsage { + if extKeyUsage, ok := extKeyUsageFromOID(u); ok { + out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage) + } else { + out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u) + } + } + + continue + + case 14: + // RFC 5280, 4.2.1.2 + var keyid []byte + _, err = asn1.Unmarshal(e.Value, &keyid) + if err != nil { + return nil, err + } + out.SubjectKeyId = keyid + continue + + case 32: + // RFC 5280 4.2.1.4: Certificate Policies + var policies []policyInformation + if _, err = asn1.Unmarshal(e.Value, &policies); err != nil { + return nil, err + } + out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies)) + for i, policy := range policies { + out.PolicyIdentifiers[i] = policy.Policy + } + } + } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + var aia []authorityInfoAccess + if _, err = asn1.Unmarshal(e.Value, &aia); err != nil { + return nil, err + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(oidAuthorityInfoAccessOcsp) { + out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes)) + } else if v.Method.Equal(oidAuthorityInfoAccessIssuers) { + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) + } + } + } + + if e.Critical { + // START CT CHANGES + nfe.AddError(UnhandledCriticalExtension{e.Id}) + // END CT CHANGES + } + } + // START CT CHANGES + if nfe.HasError() { + return out, nfe + } + // END CT CHANGES + return out, nil +} + +// START CT CHANGES + +// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data. +// The parsed data is returned in a Certificate struct for ease of access. +func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) { + var tbsCert tbsCertificate + rest, err := asn1.Unmarshal(asn1Data, &tbsCert) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + return parseCertificate(&certificate{ + Raw: tbsCert.Raw, + TBSCertificate: tbsCert}) +} + +// END CT CHANGES + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +func ParseCertificate(asn1Data []byte) (*Certificate, error) { + var cert certificate + rest, err := asn1.Unmarshal(asn1Data, &cert) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificate(&cert) +} + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { + var v []*certificate + + for len(asn1Data) > 0 { + cert := new(certificate) + var err error + asn1Data, err = asn1.Unmarshal(asn1Data, cert) + if err != nil { + return nil, err + } + v = append(v, cert) + } + + ret := make([]*Certificate, len(v)) + for i, ci := range v { + cert, err := parseCertificate(ci) + if err != nil { + return nil, err + } + ret[i] = cert + } + + return ret, nil +} + +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +var ( + oidExtensionSubjectKeyId = []int{2, 5, 29, 14} + oidExtensionKeyUsage = []int{2, 5, 29, 15} + oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37} + oidExtensionAuthorityKeyId = []int{2, 5, 29, 35} + oidExtensionBasicConstraints = []int{2, 5, 29, 19} + oidExtensionSubjectAltName = []int{2, 5, 29, 17} + oidExtensionCertificatePolicies = []int{2, 5, 29, 32} + oidExtensionNameConstraints = []int{2, 5, 29, 30} + oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31} + oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} +) + +var ( + oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} +) + +// oidNotInExtensions returns whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} + +func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) { + ret = make([]pkix.Extension, 10 /* maximum number of elements. */) + n := 0 + + if template.KeyUsage != 0 && + !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) { + ret[n].Id = oidExtensionKeyUsage + ret[n].Critical = true + + var a [2]byte + a[0] = reverseBitsInAByte(byte(template.KeyUsage)) + a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: a[0:l], BitLength: l * 8}) + if err != nil { + return + } + n++ + } + + if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && + !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) { + ret[n].Id = oidExtensionExtendedKeyUsage + + var oids []asn1.ObjectIdentifier + for _, u := range template.ExtKeyUsage { + if oid, ok := oidFromExtKeyUsage(u); ok { + oids = append(oids, oid) + } else { + panic("internal error") + } + } + + oids = append(oids, template.UnknownExtKeyUsage...) + + ret[n].Value, err = asn1.Marshal(oids) + if err != nil { + return + } + n++ + } + + if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) { + ret[n].Id = oidExtensionBasicConstraints + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, template.MaxPathLen}) + ret[n].Critical = true + if err != nil { + return + } + n++ + } + + if len(template.SubjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) { + ret[n].Id = oidExtensionSubjectKeyId + ret[n].Value, err = asn1.Marshal(template.SubjectKeyId) + if err != nil { + return + } + n++ + } + + if len(template.AuthorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) { + ret[n].Id = oidExtensionAuthorityKeyId + ret[n].Value, err = asn1.Marshal(authKeyId{template.AuthorityKeyId}) + if err != nil { + return + } + n++ + } + + if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && + !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) { + ret[n].Id = oidExtensionAuthorityInfoAccess + var aiaValues []authorityInfoAccess + for _, name := range template.OCSPServer { + aiaValues = append(aiaValues, authorityInfoAccess{ + Method: oidAuthorityInfoAccessOcsp, + Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, + }) + } + for _, name := range template.IssuingCertificateURL { + aiaValues = append(aiaValues, authorityInfoAccess{ + Method: oidAuthorityInfoAccessIssuers, + Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, + }) + } + ret[n].Value, err = asn1.Marshal(aiaValues) + if err != nil { + return + } + n++ + } + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0) && + !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { + ret[n].Id = oidExtensionSubjectAltName + var rawValues []asn1.RawValue + for _, name := range template.DNSNames { + rawValues = append(rawValues, asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)}) + } + for _, email := range template.EmailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: 1, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range template.IPAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: 7, Class: 2, Bytes: ip}) + } + ret[n].Value, err = asn1.Marshal(rawValues) + if err != nil { + return + } + n++ + } + + if len(template.PolicyIdentifiers) > 0 && + !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) { + ret[n].Id = oidExtensionCertificatePolicies + policies := make([]policyInformation, len(template.PolicyIdentifiers)) + for i, policy := range template.PolicyIdentifiers { + policies[i].Policy = policy + } + ret[n].Value, err = asn1.Marshal(policies) + if err != nil { + return + } + n++ + } + + if len(template.PermittedDNSDomains) > 0 && + !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) { + ret[n].Id = oidExtensionNameConstraints + ret[n].Critical = template.PermittedDNSDomainsCritical + + var out nameConstraints + out.Permitted = make([]generalSubtree, len(template.PermittedDNSDomains)) + for i, permitted := range template.PermittedDNSDomains { + out.Permitted[i] = generalSubtree{Name: permitted} + } + ret[n].Value, err = asn1.Marshal(out) + if err != nil { + return + } + n++ + } + + if len(template.CRLDistributionPoints) > 0 && + !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) { + ret[n].Id = oidExtensionCRLDistributionPoints + + var crlDp []distributionPoint + for _, name := range template.CRLDistributionPoints { + rawFullName, _ := asn1.Marshal(asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}) + + dp := distributionPoint{ + DistributionPoint: distributionPointName{ + FullName: asn1.RawValue{Tag: 0, Class: 2, Bytes: rawFullName}, + }, + } + crlDp = append(crlDp, dp) + } + + ret[n].Value, err = asn1.Marshal(crlDp) + if err != nil { + return + } + n++ + } + + // Adding another extension here? Remember to update the maximum number + // of elements in the make() at the top of the function. + + return append(ret[:n], template.ExtraExtensions...), nil +} + +func subjectBytes(cert *Certificate) ([]byte, error) { + if len(cert.RawSubject) > 0 { + return cert.RawSubject, nil + } + + return asn1.Marshal(cert.Subject.ToRDNSequence()) +} + +// CreateCertificate creates a new certificate based on a template. The +// following members of template are used: SerialNumber, Subject, NotBefore, +// NotAfter, KeyUsage, ExtKeyUsage, UnknownExtKeyUsage, BasicConstraintsValid, +// IsCA, MaxPathLen, SubjectKeyId, DNSNames, PermittedDNSDomainsCritical, +// PermittedDNSDomains. +// +// The certificate is signed by parent. If parent is equal to template then the +// certificate is self-signed. The parameter pub is the public key of the +// signee and priv is the private key of the signer. +// +// The returned slice is the certificate in DER encoding. +// +// The only supported key types are RSA and ECDSA (*rsa.PublicKey or +// *ecdsa.PublicKey for pub, *rsa.PrivateKey or *ecdsa.PublicKey for priv). +func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + var signatureAlgorithm pkix.AlgorithmIdentifier + var hashFunc crypto.Hash + + switch priv := priv.(type) { + case *rsa.PrivateKey: + signatureAlgorithm.Algorithm = oidSignatureSHA1WithRSA + hashFunc = crypto.SHA1 + case *ecdsa.PrivateKey: + switch priv.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + signatureAlgorithm.Algorithm = oidSignatureECDSAWithSHA512 + default: + return nil, errors.New("x509: unknown elliptic curve") + } + default: + return nil, errors.New("x509: only RSA and ECDSA private keys supported") + } + + if err != nil { + return + } + + if len(parent.SubjectKeyId) > 0 { + template.AuthorityKeyId = parent.SubjectKeyId + } + + extensions, err := buildExtensions(template) + if err != nil { + return + } + + asn1Issuer, err := subjectBytes(parent) + if err != nil { + return + } + + asn1Subject, err := subjectBytes(template) + if err != nil { + return + } + + encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} + c := tbsCertificate{ + Version: 2, + SerialNumber: template.SerialNumber, + SignatureAlgorithm: signatureAlgorithm, + Issuer: asn1.RawValue{FullBytes: asn1Issuer}, + Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, + Extensions: extensions, + } + + tbsCertContents, err := asn1.Marshal(c) + if err != nil { + return + } + + c.Raw = tbsCertContents + + h := hashFunc.New() + h.Write(tbsCertContents) + digest := h.Sum(nil) + + var signature []byte + + switch priv := priv.(type) { + case *rsa.PrivateKey: + signature, err = rsa.SignPKCS1v15(rand, priv, hashFunc, digest) + case *ecdsa.PrivateKey: + var r, s *big.Int + if r, s, err = ecdsa.Sign(rand, priv, digest); err == nil { + signature, err = asn1.Marshal(ecdsaSignature{r, s}) + } + default: + panic("internal error") + } + + if err != nil { + return + } + + cert, err = asn1.Marshal(certificate{ + nil, + c, + signatureAlgorithm, + asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) + return +} + +// pemCRLPrefix is the magic string that indicates that we have a PEM encoded +// CRL. +var pemCRLPrefix = []byte("-----BEGIN X509 CRL") + +// pemType is the type of a PEM encoded CRL. +var pemType = "X509 CRL" + +// ParseCRL parses a CRL from the given bytes. It's often the case that PEM +// encoded CRLs will appear where they should be DER encoded, so this function +// will transparently handle PEM encoding as long as there isn't any leading +// garbage. +func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err error) { + if bytes.HasPrefix(crlBytes, pemCRLPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + } + return ParseDERCRL(crlBytes) +} + +// ParseDERCRL parses a DER encoded CRL from the given bytes. +func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) { + certList = new(pkix.CertificateList) + _, err = asn1.Unmarshal(derBytes, certList) + if err != nil { + certList = nil + } + return +} + +// CreateCRL returns a DER encoded CRL, signed by this Certificate, that +// contains the given list of revoked certificates. +// +// The only supported key type is RSA (*rsa.PrivateKey for priv). +func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { + rsaPriv, ok := priv.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("x509: non-RSA private keys not supported") + } + tbsCertList := pkix.TBSCertificateList{ + Version: 2, + Signature: pkix.AlgorithmIdentifier{ + Algorithm: oidSignatureSHA1WithRSA, + }, + Issuer: c.Subject.ToRDNSequence(), + ThisUpdate: now.UTC(), + NextUpdate: expiry.UTC(), + RevokedCertificates: revokedCerts, + } + + tbsCertListContents, err := asn1.Marshal(tbsCertList) + if err != nil { + return + } + + h := sha1.New() + h.Write(tbsCertListContents) + digest := h.Sum(nil) + + signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest) + if err != nil { + return + } + + return asn1.Marshal(pkix.CertificateList{ + TBSCertList: tbsCertList, + SignatureAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: oidSignatureSHA1WithRSA, + }, + SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} diff --git a/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.gitignore b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.gitignore new file mode 100644 index 000000000..dbec55fb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.gitignore @@ -0,0 +1 @@ +*.sw[op] diff --git a/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.travis.yml b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.travis.yml new file mode 100644 index 000000000..ba1b6b7f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.1 +install: + - go get github.com/bmizerany/assert +script: + - pushd $TRAVIS_BUILD_DIR + - go test + - popd +notifications: + email: false diff --git a/Godeps/_workspace/src/github.com/dgryski/go-rc2/LICENSE b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/LICENSE similarity index 85% rename from Godeps/_workspace/src/github.com/dgryski/go-rc2/LICENSE rename to Godeps/_workspace/src/github.com/mreiferson/go-httpclient/LICENSE index 039a2e4c0..89de35479 100644 --- a/Godeps/_workspace/src/github.com/dgryski/go-rc2/LICENSE +++ b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/LICENSE @@ -1,7 +1,3 @@ -The MIT License (MIT) - -Copyright (c) 2015 Damian Gryski - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights @@ -9,13 +5,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/README.md b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/README.md new file mode 100644 index 000000000..6d0dbff94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/README.md @@ -0,0 +1,41 @@ +## go-httpclient + +**requires Go 1.1+** as of `v0.4.0` the API has been completely re-written for Go 1.1 (for a Go +1.0.x compatible release see [1adef50](https://github.com/mreiferson/go-httpclient/tree/1adef50)) + +[![Build +Status](https://secure.travis-ci.org/mreiferson/go-httpclient.png?branch=master)](http://travis-ci.org/mreiferson/go-httpclient) + +Provides an HTTP Transport that implements the `RoundTripper` interface and +can be used as a built in replacement for the standard library's, providing: + + * connection timeouts + * request timeouts + +This is a thin wrapper around `http.Transport` that sets dial timeouts and uses +Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API. + +### Example + +```go +transport := &httpclient.Transport{ + ConnectTimeout: 1*time.Second, + RequestTimeout: 10*time.Second, + ResponseHeaderTimeout: 5*time.Second, +} +defer transport.Close() + +client := &http.Client{Transport: transport} +req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil) +resp, err := client.Do(req) +if err != nil { + return err +} +defer resp.Body.Close() +``` + +*Note:* you will want to re-use a single client object rather than creating one for each request, otherwise you will end up [leaking connections](https://code.google.com/p/go/issues/detail?id=4049#c3). + +### Reference Docs + +For API docs see [godoc](http://godoc.org/github.com/mreiferson/go-httpclient). diff --git a/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/httpclient.go b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/httpclient.go new file mode 100644 index 000000000..89e018bff --- /dev/null +++ b/Godeps/_workspace/src/github.com/mreiferson/go-httpclient/httpclient.go @@ -0,0 +1,237 @@ +/* +Provides an HTTP Transport that implements the `RoundTripper` interface and +can be used as a built in replacement for the standard library's, providing: + + * connection timeouts + * request timeouts + +This is a thin wrapper around `http.Transport` that sets dial timeouts and uses +Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API. +*/ +package httpclient + +import ( + "crypto/tls" + "errors" + "io" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +// returns the current version of the package +func Version() string { + return "0.4.1" +} + +// Transport implements the RoundTripper interface and can be used as a replacement +// for Go's built in http.Transport implementing end-to-end request timeouts. +// +// transport := &httpclient.Transport{ +// ConnectTimeout: 1*time.Second, +// ResponseHeaderTimeout: 5*time.Second, +// RequestTimeout: 10*time.Second, +// } +// defer transport.Close() +// +// client := &http.Client{Transport: transport} +// req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil) +// resp, err := client.Do(req) +// if err != nil { +// return err +// } +// defer resp.Body.Close() +// +type Transport struct { + // Proxy specifies a function to return a proxy for a given + // *http.Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *url.URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // Dial specifies the dial function for creating TCP + // connections. This will override the Transport's ConnectTimeout and + // ReadWriteTimeout settings. + // If Dial is nil, a dialer is generated on demand matching the Transport's + // options. + Dial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // DisableKeepAlives, if true, prevents re-use of TCP connections + // between different HTTP requests. + DisableKeepAlives bool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + // (keep-alive) to keep per-host. If zero, + // http.DefaultMaxIdleConnsPerHost is used. + MaxIdleConnsPerHost int + + // ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for + // a connect to complete. + ConnectTimeout time.Duration + + // ResponseHeaderTimeout, if non-zero, specifies the amount of + // time to wait for a server's response headers after fully + // writing the request (including its body, if any). This + // time does not include the time to read the response body. + ResponseHeaderTimeout time.Duration + + // RequestTimeout, if non-zero, specifies the amount of time for the entire + // request to complete (including all of the above timeouts + entire response body). + // This should never be less than the sum total of the above two timeouts. + RequestTimeout time.Duration + + // ReadWriteTimeout, if non-zero, will set a deadline for every Read and + // Write operation on the request connection. + ReadWriteTimeout time.Duration + + // TCPWriteBufferSize, the size of the operating system's write + // buffer associated with the connection. + TCPWriteBufferSize int + + // TCPReadBuffserSize, the size of the operating system's read + // buffer associated with the connection. + TCPReadBufferSize int + + starter sync.Once + transport *http.Transport +} + +// Close cleans up the Transport, currently a no-op +func (t *Transport) Close() error { + return nil +} + +func (t *Transport) lazyStart() { + if t.Dial == nil { + t.Dial = func(netw, addr string) (net.Conn, error) { + c, err := net.DialTimeout(netw, addr, t.ConnectTimeout) + if err != nil { + return nil, err + } + + if t.TCPReadBufferSize != 0 || t.TCPWriteBufferSize != 0 { + if tcpCon, ok := c.(*net.TCPConn); ok { + if t.TCPWriteBufferSize != 0 { + if err = tcpCon.SetWriteBuffer(t.TCPWriteBufferSize); err != nil { + return nil, err + } + } + if t.TCPReadBufferSize != 0 { + if err = tcpCon.SetReadBuffer(t.TCPReadBufferSize); err != nil { + return nil, err + } + } + } else { + err = errors.New("Not Tcp Connection") + return nil, err + } + } + + if t.ReadWriteTimeout > 0 { + timeoutConn := &rwTimeoutConn{ + TCPConn: c.(*net.TCPConn), + rwTimeout: t.ReadWriteTimeout, + } + return timeoutConn, nil + } + return c, nil + } + } + + t.transport = &http.Transport{ + Dial: t.Dial, + Proxy: t.Proxy, + TLSClientConfig: t.TLSClientConfig, + DisableKeepAlives: t.DisableKeepAlives, + DisableCompression: t.DisableCompression, + MaxIdleConnsPerHost: t.MaxIdleConnsPerHost, + ResponseHeaderTimeout: t.ResponseHeaderTimeout, + } +} + +func (t *Transport) CancelRequest(req *http.Request) { + t.starter.Do(t.lazyStart) + + t.transport.CancelRequest(req) +} + +func (t *Transport) CloseIdleConnections() { + t.starter.Do(t.lazyStart) + + t.transport.CloseIdleConnections() +} + +func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) { + t.starter.Do(t.lazyStart) + + t.transport.RegisterProtocol(scheme, rt) +} + +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + t.starter.Do(t.lazyStart) + + if t.RequestTimeout > 0 { + timer := time.AfterFunc(t.RequestTimeout, func() { + t.transport.CancelRequest(req) + }) + + resp, err = t.transport.RoundTrip(req) + if err != nil { + timer.Stop() + } else { + resp.Body = &bodyCloseInterceptor{ReadCloser: resp.Body, timer: timer} + } + } else { + resp, err = t.transport.RoundTrip(req) + } + + return +} + +type bodyCloseInterceptor struct { + io.ReadCloser + timer *time.Timer +} + +func (bci *bodyCloseInterceptor) Close() error { + bci.timer.Stop() + return bci.ReadCloser.Close() +} + +// A net.Conn that sets a deadline for every Read or Write operation +type rwTimeoutConn struct { + *net.TCPConn + rwTimeout time.Duration +} + +func (c *rwTimeoutConn) Read(b []byte) (int, error) { + err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout)) + if err != nil { + return 0, err + } + return c.TCPConn.Read(b) +} + +func (c *rwTimeoutConn) Write(b []byte) (int, error) { + err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout)) + if err != nil { + return 0, err + } + return c.TCPConn.Write(b) +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ocsp/ocsp_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ocsp/ocsp_test.go deleted file mode 100644 index d55682ffe..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ocsp/ocsp_test.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ocsp - -import ( - "bytes" - "crypto" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "math/big" - "reflect" - "testing" - "time" -) - -func TestOCSPDecode(t *testing.T) { - responseBytes, _ := hex.DecodeString(ocspResponseHex) - resp, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Error(err) - } - - expected := Response{ - Status: Good, - SerialNumber: big.NewInt(0x1d0fa), - RevocationReason: Unspecified, - ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC), - NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC), - } - - if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) { - t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate) - } - - if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate) - } - - if resp.Status != expected.Status { - t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status) - } - - if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 { - t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber) - } - - if resp.RevocationReason != expected.RevocationReason { - t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason) - } -} - -func TestOCSPDecodeWithoutCert(t *testing.T) { - responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex) - _, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Error(err) - } -} - -func TestOCSPSignature(t *testing.T) { - issuerCert, _ := hex.DecodeString(startComHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - response, _ := hex.DecodeString(ocspResponseHex) - if _, err := ParseResponse(response, issuer); err != nil { - t.Error(err) - } -} - -func TestOCSPRequest(t *testing.T) { - leafCert, _ := hex.DecodeString(leafCertHex) - cert, err := x509.ParseCertificate(leafCert) - if err != nil { - t.Fatal(err) - } - - issuerCert, _ := hex.DecodeString(issuerCertHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - request, err := CreateRequest(cert, issuer, nil) - if err != nil { - t.Fatal(err) - } - - expectedBytes, _ := hex.DecodeString(ocspRequestHex) - if !bytes.Equal(request, expectedBytes) { - t.Errorf("request: got %x, wanted %x", request, expectedBytes) - } - - decodedRequest, err := ParseRequest(expectedBytes) - if err != nil { - t.Fatal(err) - } - - if decodedRequest.HashAlgorithm != crypto.SHA1 { - t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1) - } - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - _, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo) - if err != nil { - t.Fatal(err) - } - - h := sha1.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) { - t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash) - } - - if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) { - t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash) - } - - if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 { - t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber) - } -} - -func TestOCSPResponse(t *testing.T) { - leafCert, _ := hex.DecodeString(leafCertHex) - leaf, err := x509.ParseCertificate(leafCert) - if err != nil { - t.Fatal(err) - } - - issuerCert, _ := hex.DecodeString(issuerCertHex) - issuer, err := x509.ParseCertificate(issuerCert) - if err != nil { - t.Fatal(err) - } - - responderCert, _ := hex.DecodeString(responderCertHex) - responder, err := x509.ParseCertificate(responderCert) - if err != nil { - t.Fatal(err) - } - - responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex) - responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER) - if err != nil { - t.Fatal(err) - } - - producedAt := time.Now().Truncate(time.Minute) - thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC) - nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC) - template := Response{ - Status: Revoked, - SerialNumber: leaf.SerialNumber, - ThisUpdate: thisUpdate, - NextUpdate: nextUpdate, - RevokedAt: thisUpdate, - RevocationReason: KeyCompromise, - Certificate: responder, - } - - responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey) - if err != nil { - t.Fatal(err) - } - - resp, err := ParseResponse(responseBytes, nil) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) { - t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, template.ThisUpdate) - } - - if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate) - } - - if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) { - t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt) - } - - if !resp.ProducedAt.Equal(producedAt) { - t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt) - } - - if resp.Status != template.Status { - t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status) - } - - if resp.SerialNumber.Cmp(template.SerialNumber) != 0 { - t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber) - } - - if resp.RevocationReason != template.RevocationReason { - t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason) - } -} - -// This OCSP response was taken from Thawte's public OCSP responder. -// To recreate: -// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443 -// Copy and paste the first certificate into /tmp/cert.crt and the second into -// /tmp/intermediate.crt -// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der -// Then hex encode the result: -// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")' - -const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" + - "c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" + - "6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" + - "5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" + - "2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" + - "b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" + - "30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" + - "000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" + - "fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" + - "467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" + - "4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" + - "672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" + - "d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" + - "17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" + - "e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" + - "06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" + - "040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" + - "69676974616c204365727469666963617465205369676e696e6731383036060355040313" + - "2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" + - "746520536572766572204341301e170d3037313032353030323330365a170d3132313032" + - "333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" + - "617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" + - "2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" + - "0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" + - "7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" + - "a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" + - "fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" + - "4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" + - "ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" + - "3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" + - "29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" + - "01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" + - "0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" + - "bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" + - "6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" + - "55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" + - "4469676974616c204365727469666963617465205369676e696e67312930270603550403" + - "13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" + - "0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" + - "6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" + - "6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" + - "8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" + - "2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" + - "1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" + - "c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" + - "f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" + - "a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" + - "6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42" - -const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" + - "0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" + - "3029060355040b1322536563757265204469676974616c20436572746966696361746520" + - "5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" + - "74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" + - "3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" + - "0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" + - "74616c204365727469666963617465205369676e696e67313830360603550403132f5374" + - "617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" + - "53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" + - "4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" + - "a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" + - "3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" + - "2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" + - "48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" + - "c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" + - "c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" + - "030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" + - "b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" + - "17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" + - "05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" + - "082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" + - "667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" + - "772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" + - "3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" + - "20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" + - "687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" + - "06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" + - "696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" + - "2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" + - "e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" + - "c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" + - "46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" + - "7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" + - "ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" + - "4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" + - "7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" + - "c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" + - "434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" + - "4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" + - "214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" + - "c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" + - "4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" + - "d2ae83a7a8ce5ba7" - -const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" + - "01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" + - "3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" + - "456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" + - "1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" + - "5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" + - "82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" + - "5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" + - "8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" + - "6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" + - "0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" + - "9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" + - "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" + - "5a35fca2e054dfa8" - -const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" + - "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" + - "bcbb235d44ccc7dba62e72" - -const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" + - "864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" + - "7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" + - "333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" + - "766572204341202d20436c617373203331493047060355040b13407777772e7665726973" + - "69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" + - "204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" + - "170d3133313233313233353935395a3068310b3009060355040613025553311330110603" + - "550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" + - "173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" + - "2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" + - "818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" + - "23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" + - "5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" + - "c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" + - "82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" + - "01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" + - "7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" + - "2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" + - "726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" + - "03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" + - "30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" + - "20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" + - "092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" + - "81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" + - "67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" + - "9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817" - -const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" + - "864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" + - "566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" + - "6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" + - "1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" + - "301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" + - "0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" + - "69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" + - "3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" + - "6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" + - "5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" + - "e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" + - "bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" + - "3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" + - "f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" + - "0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" + - "2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" + - "636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" + - "030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" + - "0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" + - "a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" + - "300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" + - "adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" + - "f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" + - "e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" + - "d68071" - -// Key and certificate for the OCSP responder were not taken from the Thawte -// responder, since CreateResponse requires that we have the private key. -// Instead, they were generated randomly. -const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" + - "1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" + - "1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" + - "fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" + - "c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" + - "7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" + - "1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" + - "6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" + - "72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" + - "56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" + - "575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" + - "679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" + - "05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" + - "5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" + - "1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" + - "ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" + - "daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" + - "e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" + - "07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" + - "87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" + - "327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" + - "497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" + - "6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" + - "4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" + - "7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" + - "0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" + - "d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" + - "c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" + - "c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" + - "0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" + - "120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" + - "45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" + - "48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" + - "6bdaa059" - -const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + - "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + - "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + - "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + - "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + - "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + - "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + - "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + - "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + - "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + - "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + - "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + - "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + - "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + - "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + - "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + - "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + - "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + - "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + - "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + - "3a25439a94299a65a709756c7a3e568be049d5c38839" diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000..284d2a68f --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000..88beb9f5d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "github.com/letsencrypt/boulder/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts a object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000..7377ce6fb --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go similarity index 96% rename from Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2.go rename to Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go index d33a49888..8c7090258 100644 --- a/Godeps/_workspace/src/github.com/dgryski/go-rc2/rc2.go +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // Package rc2 implements the RC2 cipher /* https://www.ietf.org/rfc/rfc2268.txt @@ -27,7 +31,7 @@ func New(key []byte, t1 int) (cipher.Block, error) { }, nil } -func (_ *rc2Cipher) BlockSize() int { return BlockSize } +func (*rc2Cipher) BlockSize() int { return BlockSize } var piTable = [256]byte{ 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000..5f38aa7de --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000..5c419d41e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000..e8e179988 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,342 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `ans1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000..def1f7b98 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/cmd/ocsp-responder/main.go b/cmd/ocsp-responder/main.go index 93cbd74fa..fb87d8783 100644 --- a/cmd/ocsp-responder/main.go +++ b/cmd/ocsp-responder/main.go @@ -29,16 +29,6 @@ import ( "github.com/letsencrypt/boulder/sa" ) -type cacheCtrlHandler struct { - http.Handler - MaxAge time.Duration -} - -func (c *cacheCtrlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d", c.MaxAge/time.Second)) - c.Handler.ServeHTTP(w, r) -} - /* DBSource maps a given Database schema to a CA Key Hash, so we can pick from among them when presented with OCSP requests for different certs. @@ -181,8 +171,7 @@ func main() { killTimeout, err := time.ParseDuration(c.OCSPResponder.ShutdownKillTimeout) cmd.FailOnError(err, "Couldn't parse shutdown kill timeout") - m := http.StripPrefix(c.OCSPResponder.Path, - handler(source, c.OCSPResponder.MaxAge.Duration)) + m := http.StripPrefix(c.OCSPResponder.Path, cfocsp.NewResponder(source)) httpMonitor := metrics.NewHTTPMonitor(stats, m, "OCSP") srv := &http.Server{ @@ -201,10 +190,3 @@ func main() { app.Run() } - -func handler(src cfocsp.Source, maxAge time.Duration) http.Handler { - return &cacheCtrlHandler{ - Handler: cfocsp.Responder{Source: src}, - MaxAge: maxAge, - } -} diff --git a/cmd/ocsp-responder/main_test.go b/cmd/ocsp-responder/main_test.go index eeceb3c38..e82adb5f9 100644 --- a/cmd/ocsp-responder/main_test.go +++ b/cmd/ocsp-responder/main_test.go @@ -21,22 +21,6 @@ import ( "github.com/letsencrypt/boulder/test/vars" ) -func TestCacheControl(t *testing.T) { - src := make(cfocsp.InMemorySource) - h := handler(src, 10*time.Second) - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - h.ServeHTTP(w, r) - expected := "max-age=10" - actual := w.Header().Get("Cache-Control") - if actual != expected { - t.Errorf("Cache-Control value: want %#v, got %#v", expected, actual) - } -} - var ( req = mustRead("./testdata/ocsp.req") resp = mustRead("./testdata/ocsp.resp") @@ -50,7 +34,7 @@ func TestHandler(t *testing.T) { src := make(cfocsp.InMemorySource) src[ocspReq.SerialNumber.String()] = resp - h := handler(src, 10*time.Second) + h := cfocsp.NewResponder(src) w := httptest.NewRecorder() r, err := http.NewRequest("POST", "/", bytes.NewReader(req)) if err != nil { @@ -93,7 +77,7 @@ func TestDBHandler(t *testing.T) { t.Fatalf("unable to insert response: %s", err) } - h := handler(src, 10*time.Second) + h := cfocsp.NewResponder(src) w := httptest.NewRecorder() r, err := http.NewRequest("POST", "/", bytes.NewReader(req)) if err != nil {