Godeps: Update google/certificate-transparency-go to c25855a. (#3948)

This updates Boulder's vendored dependency for `github.com/google/certificate-transparency-go` to c25855a, the tip of master at the time of writing.

Unit tests are confirmed to pass:

```
$ git log --pretty=format:'%h' -n 1
c25855a

$ go test ./...
ok    github.com/google/certificate-transparency-go (cached)
ok    github.com/google/certificate-transparency-go/asn1  (cached)
ok    github.com/google/certificate-transparency-go/client  22.985s
?     github.com/google/certificate-transparency-go/client/configpb [no test files]
?     github.com/google/certificate-transparency-go/client/ctclient [no test files]
ok    github.com/google/certificate-transparency-go/ctpolicy  (cached)
ok    github.com/google/certificate-transparency-go/ctutil  (cached)
?     github.com/google/certificate-transparency-go/ctutil/sctcheck [no test files]
?     github.com/google/certificate-transparency-go/ctutil/sctscan  [no test files]
ok    github.com/google/certificate-transparency-go/dnsclient (cached)
ok    github.com/google/certificate-transparency-go/fixchain  0.091s
?     github.com/google/certificate-transparency-go/fixchain/chainfix [no test files]
ok    github.com/google/certificate-transparency-go/fixchain/ratelimiter  (cached)
ok    github.com/google/certificate-transparency-go/gossip  (cached)
?     github.com/google/certificate-transparency-go/gossip/gossip_server  [no test files]
ok    github.com/google/certificate-transparency-go/gossip/minimal  0.028s
?     github.com/google/certificate-transparency-go/gossip/minimal/configpb [no test files]
?     github.com/google/certificate-transparency-go/gossip/minimal/goshawk  [no test files]
?     github.com/google/certificate-transparency-go/gossip/minimal/gosmin [no test files]
ok    github.com/google/certificate-transparency-go/gossip/minimal/x509ext  (cached)
ok    github.com/google/certificate-transparency-go/ingestor/ranges (cached)
ok    github.com/google/certificate-transparency-go/jsonclient  0.007s
ok    github.com/google/certificate-transparency-go/logid (cached)
ok    github.com/google/certificate-transparency-go/loglist (cached)
?     github.com/google/certificate-transparency-go/loglist/findlog [no test files]
ok    github.com/google/certificate-transparency-go/loglist2  (cached)
?     github.com/google/certificate-transparency-go/preload [no test files]
?     github.com/google/certificate-transparency-go/preload/dumpscts  [no test files]
?     github.com/google/certificate-transparency-go/preload/preloader [no test files]
ok    github.com/google/certificate-transparency-go/scanner 0.009s
?     github.com/google/certificate-transparency-go/scanner/scanlog [no test files]
ok    github.com/google/certificate-transparency-go/tls (cached)
ok    github.com/google/certificate-transparency-go/trillian/ctfe (cached)
?     github.com/google/certificate-transparency-go/trillian/ctfe/configpb  [no test files]
?     github.com/google/certificate-transparency-go/trillian/ctfe/ct_server [no test files]
?     github.com/google/certificate-transparency-go/trillian/ctfe/testonly  [no test files]
ok    github.com/google/certificate-transparency-go/trillian/integration  0.023s
?     github.com/google/certificate-transparency-go/trillian/integration/ct_hammer  [no test files]
?     github.com/google/certificate-transparency-go/trillian/migrillian [no test files]
?     github.com/google/certificate-transparency-go/trillian/migrillian/configpb  [no test files]
ok    github.com/google/certificate-transparency-go/trillian/migrillian/core  (cached)
?     github.com/google/certificate-transparency-go/trillian/mockclient [no test files]
ok    github.com/google/certificate-transparency-go/trillian/util (cached)
ok    github.com/google/certificate-transparency-go/x509  (cached)
?     github.com/google/certificate-transparency-go/x509/pkix [no test files]
?     github.com/google/certificate-transparency-go/x509util  [no test files]
?     github.com/google/certificate-transparency-go/x509util/certcheck  [no test files]
?     github.com/google/certificate-transparency-go/x509util/crlcheck [no test files]
```
This commit is contained in:
Daniel McCarney 2018-11-28 19:11:57 -05:00 committed by Jacob Hoffman-Andrews
parent 8f5de538c1
commit c06503319c
43 changed files with 1883 additions and 668 deletions

32
Godeps/Godeps.json generated
View File

@ -151,43 +151,43 @@
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go", "ImportPath": "github.com/google/certificate-transparency-go",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/asn1", "ImportPath": "github.com/google/certificate-transparency-go/asn1",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/client", "ImportPath": "github.com/google/certificate-transparency-go/client",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/client/configpb", "ImportPath": "github.com/google/certificate-transparency-go/client/configpb",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/jsonclient", "ImportPath": "github.com/google/certificate-transparency-go/jsonclient",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/tls", "ImportPath": "github.com/google/certificate-transparency-go/tls",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/x509", "ImportPath": "github.com/google/certificate-transparency-go/x509",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/certificate-transparency-go/x509/pkix", "ImportPath": "github.com/google/certificate-transparency-go/x509/pkix",
"Comment": "v1.0.9-13-g5ab67e5", "Comment": "v1.0.21-106-gc25855a",
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d" "Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
}, },
{ {
"ImportPath": "github.com/google/safebrowsing", "ImportPath": "github.com/google/safebrowsing",

View File

@ -242,7 +242,7 @@ func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Requ
return nil, err return nil, err
} }
var body string var body string
if respErr, ok := err.(ctClient.RspError); ok && respErr.StatusCode < 500 { if respErr, ok := err.(jsonclient.RspError); ok && respErr.StatusCode < 500 {
body = string(respErr.Body) body = string(respErr.Body)
} }
pub.log.AuditErrf("Failed to submit certificate to CT log at %s: %s Body=%q", pub.log.AuditErrf("Failed to submit certificate to CT log at %s: %s Body=%q",

View File

@ -16,9 +16,14 @@
/data /data
/dumpscts /dumpscts
/etcdiscover /etcdiscover
/findlog
/goshawk
/gosmin
/gossip_server /gossip_server
/preloader /preloader
/scanlog /scanlog
/sctcheck
/sctscan
/trillian_log_server /trillian_log_server
/trillian_log_signer /trillian_log_signer
/trillian.json /trillian.json

View File

@ -1,29 +1,42 @@
sudo: false sudo: true # required for CI push into Kubernetes.
language: go language: go
os: linux os: linux
go: 1.9 go: "1.10"
go_import_path: github.com/google/certificate-transparency-go
env: env:
- GOFLAGS= - GOFLAGS=
- GOFLAGS=-race - GOFLAGS=-race
- GOFLAGS= WITH_ETCD=true - GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
- GOFLAGS=-race WITH_ETCD=true - GOFLAGS=-race WITH_ETCD=true
matrix: matrix:
fast_finish: true fast_finish: true
addons:
apt:
sources:
- mysql-5.7-trusty
packages:
- mysql-server
- mysql-client
services:
- docker
before_install:
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
- sudo mysql_upgrade
- sudo service mysql restart
install: install:
- |
if [ ! -d $HOME/gopath/src/github.com/google ]; then
mkdir -p $HOME/gopath/src/github.com/google
ln -s $TRAVIS_BUILD_DIR $HOME/gopath/src/github.com/google/certificate-transparency-go
fi
- mkdir ../protoc - mkdir ../protoc
- | - |
( (
cd ../protoc cd ../protoc
wget https://github.com/google/protobuf/releases/download/v3.2.0/protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
unzip protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
) )
- export PATH=$(pwd)/../protoc/bin:$PATH - export PATH=$(pwd)/../protoc/bin:$PATH
- go get -d -t ./... - go get -d -t ./...
@ -41,9 +54,8 @@ install:
script: script:
- set -e - set -e
- export TRILLIAN_SQL_DRIVER=mysql
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go - cd $HOME/gopath/src/github.com/google/certificate-transparency-go
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS} - ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
- | - |
# Check re-generation didn't change anything # Check re-generation didn't change anything
status=$(git status --porcelain | grep -v coverage) || : status=$(git status --porcelain | grep -v coverage) || :
@ -64,3 +76,4 @@ script:
after_success: after_success:
- cp /tmp/coverage.txt . - cp /tmp/coverage.txt .
- bash <(curl -s https://codecov.io/bash) - bash <(curl -s https://codecov.io/bash)

View File

@ -0,0 +1,232 @@
# CERTIFICATE-TRANSPARENCY-GO Changelog
## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements
Published 2018-08-20 10:11:04 +0000 UTC
### CTFE
`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful.
`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code.
### Mirroring
More progress has been made on log mirroring. We believe that it's now at the point where testing can begin.
### Utilities / Libraries
The `certcheck` and `ct_hammer` utilities have received more enhancements.
`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt).
`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors.
Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21)
## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
Published 2018-07-05 09:21:34 +0000 UTC
Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
## v1.0.19 - CTFE User Quota
Published 2018-06-01 13:51:52 +0000 UTC
CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
Published 2018-06-01 14:28:20 +0000 UTC
Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
Published 2018-06-01 14:25:16 +0000 UTC
Now uses Merkle Tree verification from Trillian.
The CT server now supports CORS.
Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
## v1.0.16 - Lifecycle test / Go 1.10.1
Published 2018-06-01 14:22:23 +0000 UTC
An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
Changes to `x509` were merged from Go 1.10.1.
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
## v1.0.15 - More control of verification, grpclb, stackdriver metrics
Published 2018-06-01 14:20:32 +0000 UTC
Facilities were added to the `x509` package to control whether verification checks are applied.
Log server requests are now balanced using `gRPClb`.
For Kubernetes, metrics can be published to Stackdriver monitoring.
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
## v1.0.14 - SQLite Removed, LeafHashForLeaf
Published 2018-06-01 14:15:37 +0000 UTC
Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
Published 2018-06-01 14:15:21 +0000 UTC
Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
Updates were made to GCE ingress and health checks.
The log list utility can verify signatures.
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
## v1.0.12 - Client / util updates & CTFE fixes
Published 2018-06-01 14:13:42 +0000 UTC
The CT client can now use a JSON loglist to find logs.
CTFE had a fix applied for preissued precerts.
A DNS client was added and CT client was extended to support DNS retrieval.
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
## v1.0.11 - Kubernetes CI / Integration fixes
Published 2018-06-01 14:12:18 +0000 UTC
Updates to Kubernetes configs, mostly related to running a CI instance.
Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
Published 2018-06-01 14:09:47 +0000 UTC
The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
## v1.0.9 - Scanner, x509, utility and client fixes
Published 2018-06-01 14:11:13 +0000 UTC
The `scanner` utility now displays throughput stats.
Build instructions and README files were updated.
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
## v1.0.8 - Client fixes, align with trillian repo
Published 2018-06-01 14:06:44 +0000 UTC
Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
## v1.0.7 - CTFE fixes
Published 2018-06-01 14:06:13 +0000 UTC
An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
## v1.0.6 - crlcheck improvements / other fixes
Published 2018-06-01 14:04:22 +0000 UTC
The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
## v1.0.5 - X509 and asn1 fixes
Published 2018-06-01 14:02:58 +0000 UTC
This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated.
Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
## v1.0.4 - Multi log backend configs
Published 2018-06-01 14:02:07 +0000 UTC
Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
## v1.0.3 - Hammer updates, use standard context
Published 2018-06-01 14:01:11 +0000 UTC
After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
## v1.0.2 - Go 1.9
Published 2018-06-01 14:00:00 +0000 UTC
Go 1.9 is now required to build the code.
Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
## v1.0.1 - Hammer and client improvements
Published 2018-06-01 13:59:29 +0000 UTC
Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
## v1.0 - First Trillian CT Release
Published 2018-06-01 13:59:00 +0000 UTC
This is the point that corresponds to the 1.0 release in the trillian repo.
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)

View File

@ -47,6 +47,7 @@ Oliver Weidner <Oliver.Weidner@gmail.com>
Pascal Leroy <phl@google.com> Pascal Leroy <phl@google.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk> Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Paul Lietar <lietar@google.com> Paul Lietar <lietar@google.com>
Pavel Kalinnikov <pkalinnikov@google.com> <pavelkalinnikov@gmail.com>
Pierre Phaneuf <pphaneuf@google.com> Pierre Phaneuf <pphaneuf@google.com>
Rob Percival <robpercival@google.com> Rob Percival <robpercival@google.com>
Rob Stradling <rob@comodo.com> Rob Stradling <rob@comodo.com>

View File

@ -11,9 +11,9 @@ repository requires Go version 1.9.
- [Repository Structure](#repository-structure) - [Repository Structure](#repository-structure)
- [Trillian CT Personality](#trillian-ct-personality) - [Trillian CT Personality](#trillian-ct-personality)
- [Working on the Code](#working-on-the-code) - [Working on the Code](#working-on-the-code)
- [Running Codebase Checks](#running-codebase-checks)
- [Rebuilding Generated Code](#rebuilding-generated-code) - [Rebuilding Generated Code](#rebuilding-generated-code)
- [Updating Vendor Code](#updating-vendor-code) - [Updating Vendor Code](#updating-vendor-code)
- [Running Codebase Checks](#running-codebase-checks)
## Repository Structure ## Repository Structure
@ -29,57 +29,44 @@ The main parts of the repository are:
[pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1). [pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
- `tls` holds a library for processing TLS-encoded data as described in - `tls` holds a library for processing TLS-encoded data as described in
[RFC 5246](https://tools.ietf.org/html/rfc5246). [RFC 5246](https://tools.ietf.org/html/rfc5246).
- `x509util` provides additional utilities for dealing with - `x509util/` provides additional utilities for dealing with
`x509.Certificate`s. `x509.Certificate`s.
- CT client libraries: - CT client libraries:
- The top-level `ct` package (in `.`) holds types and utilities for working - The top-level `ct` package (in `.`) holds types and utilities for working
with CT data structures defined in with CT data structures defined in
[RFC 6962](https://tools.ietf.org/html/rfc6962). [RFC 6962](https://tools.ietf.org/html/rfc6962).
- `client/` and `jsonclient/` hold libraries that allow access to CT Logs - `client/` and `jsonclient/` hold libraries that allow access to CT Logs
via entrypoints described in via HTTP entrypoints described in
[section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4). [section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
- `dnsclient/` has a library that allows access to CT Logs over
[DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md).
- `scanner/` holds a library for scanning the entire contents of an existing - `scanner/` holds a library for scanning the entire contents of an existing
CT Log. CT Log.
- CT Personality for [Trillian](https://github.com/google/trillian):
- `trillian/` holds code that allows a Certificate Transparency Log to be
run using a Trillian Log as its back-end -- see
[below](#trillian-ct-personality).
- Command line tools: - Command line tools:
- `./client/ctclient` allows interaction with a CT Log - `./client/ctclient` allows interaction with a CT Log.
- `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT
Log to be verified.
- `./scanner/scanlog` allows an existing CT Log to be scanned for certificates - `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
of interest; please be polite when running this tool against a Log. of interest; please be polite when running this tool against a Log.
- `./x509util/certcheck` allows display and verification of certificates - `./x509util/certcheck` allows display and verification of certificates
- `./x509util/crlcheck` allows display and verification of certificate - `./x509util/crlcheck` allows display and verification of certificate
revocation lists (CRLs). revocation lists (CRLs).
- CT Personality for [Trillian](https://github.com/google/trillian): - Other libraries related to CT:
- `trillian/` holds code that allows a Certificate Transparency Log to be - `ctutil/` holds utility functions for validating and verifying CT data
run using a Trillian Log as its back-end -- see structures.
[below](#trillian-ct-personality). - `loglist/` has a library for reading
[JSON lists of CT Logs](https://www.certificate-transparency.org/known-logs).
## Trillian CT Personality ## Trillian CT Personality
The `trillian/` subdirectory holds code and scripts for running a CT Log based The `trillian/` subdirectory holds code and scripts for running a CT Log based
on the [Trillian](https://github.com/google/trillian) general transparency Log. on the [Trillian](https://github.com/google/trillian) general transparency Log,
and is [documented separately](trillian/README.md).
The main code for the CT personality is held in `trillian/ctfe`; this code
responds to HTTP requests on the
[CT API paths](https://tools.ietf.org/html/rfc6962#section-4) and translates
them to the equivalent gRPC API requests to the Trillian Log.
This obviously relies on the gRPC API definitions at
`github.com/google/trillian`; the code also uses common libraries from the
Trillian project for:
- exposing monitoring and statistics via an `interface` and corresponding
Prometheus implementation (`github.com/google/trillian/monitoring/...`)
- dealing with cryptographic keys (`github.com/google/trillian/crypto/...`).
The `trillian/integration/` directory holds scripts and tests for running the whole
system locally. In particular:
- `trillian/integration/ct_integration_test.sh` brings up local processes
running a Trillian Log server, signer and a CT personality, and exercises the
complete set of RFC 6962 API entrypoints.
- `trillian/integration/ct_hammer_test.sh` brings up a complete system and runs
a continuous randomized test of the CT entrypoints.
These scripts require a local database instance to be configured as described
in the [Trillian instructions](https://github.com/google/trillian#mysql-setup).
## Working on the Code ## Working on the Code
@ -90,6 +77,27 @@ dependencies and tools, described in the following sections. The
for the required tools and scripts, as it may be more up-to-date than this for the required tools and scripts, as it may be more up-to-date than this
document. document.
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
and tests over the codebase; please ensure this script passes before sending
pull requests for review.
```bash
# Install gometalinter and all linters
go get -u github.com/alecthomas/gometalinter
gometalinter --install
# Run code generation, build, test and linters
./scripts/presubmit.sh
# Run build, test and linters but skip code generation
./scripts/presubmit.sh --no-generate
# Or just run the linters alone:
gometalinter --config=gometalinter.json ./...
```
### Rebuilding Generated Code ### Rebuilding Generated Code
Some of the CT Go code is autogenerated from other files: Some of the CT Go code is autogenerated from other files:
@ -121,24 +129,3 @@ upstream repository does not guarantee back-compatibility between the tip
`master` branch and the current stable release). See `master` branch and the current stable release). See
[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code) [instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code)
for how to update vendored subtrees. for how to update vendored subtrees.
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
and tests over the codebase.
```bash
# Install gometalinter and all linters
go get -u github.com/alecthomas/gometalinter
gometalinter --install
# Run code generation, build, test and linters
./scripts/presubmit.sh
# Run build, test and linters but skip code generation
./scripts/presubmit.sh --no-generate
# Or just run the linters alone:
gometalinter --config=gometalinter.json ./...
```

View File

@ -9,9 +9,20 @@
// http://luca.ntop.org/Teaching/Appunti/asn1.html. // http://luca.ntop.org/Teaching/Appunti/asn1.html.
// //
// This is a fork of the Go standard library ASN.1 implementation // This is a fork of the Go standard library ASN.1 implementation
// (encoding/asn1). The main difference is that this version tries to correct // (encoding/asn1), with the aim of relaxing checks for various things
// for errors (e.g. use of tagPrintableString when the string data is really // that are common errors present in many X.509 certificates in the
// ISO8859-1 - a common error present in many x509 certificates in the wild.) // wild.
//
// Main differences:
// - Extra "lax" tag that recursively applies and relaxes some strict
// checks:
// - parsePrintableString() copes with invalid PrintableString contents,
// e.g. use of tagPrintableString when the string data is really
// ISO8859-1.
// - checkInteger() allows integers that are not minimally encoded (and
// so are not correct DER).
// - parseObjectIdentifier() allows zero-length OIDs.
// - Better diagnostics on which particular field causes errors.
package asn1 package asn1
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc // ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
@ -31,7 +42,6 @@ import (
"math/big" "math/big"
"reflect" "reflect"
"strconv" "strconv"
"strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
) )
@ -94,13 +104,16 @@ func parseBool(bytes []byte, fieldName string) (ret bool, err error) {
// checkInteger returns nil if the given bytes are a valid DER-encoded // checkInteger returns nil if the given bytes are a valid DER-encoded
// INTEGER and an error otherwise. // INTEGER and an error otherwise.
func checkInteger(bytes []byte, fieldName string) error { func checkInteger(bytes []byte, lax bool, fieldName string) error {
if len(bytes) == 0 { if len(bytes) == 0 {
return StructuralError{"empty integer", fieldName} return StructuralError{"empty integer", fieldName}
} }
if len(bytes) == 1 { if len(bytes) == 1 {
return nil return nil
} }
if lax {
return nil
}
if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
return StructuralError{"integer not minimally-encoded", fieldName} return StructuralError{"integer not minimally-encoded", fieldName}
} }
@ -109,8 +122,8 @@ func checkInteger(bytes []byte, fieldName string) error {
// parseInt64 treats the given bytes as a big-endian, signed integer and // parseInt64 treats the given bytes as a big-endian, signed integer and
// returns the result. // returns the result.
func parseInt64(bytes []byte, fieldName string) (ret int64, err error) { func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) {
err = checkInteger(bytes, fieldName) err = checkInteger(bytes, lax, fieldName)
if err != nil { if err != nil {
return return
} }
@ -132,11 +145,11 @@ func parseInt64(bytes []byte, fieldName string) (ret int64, err error) {
// parseInt treats the given bytes as a big-endian, signed integer and returns // parseInt treats the given bytes as a big-endian, signed integer and returns
// the result. // the result.
func parseInt32(bytes []byte, fieldName string) (int32, error) { func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) {
if err := checkInteger(bytes, fieldName); err != nil { if err := checkInteger(bytes, lax, fieldName); err != nil {
return 0, err return 0, err
} }
ret64, err := parseInt64(bytes, fieldName) ret64, err := parseInt64(bytes, lax, fieldName)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -150,8 +163,8 @@ var bigOne = big.NewInt(1)
// parseBigInt treats the given bytes as a big-endian, signed integer and returns // parseBigInt treats the given bytes as a big-endian, signed integer and returns
// the result. // the result.
func parseBigInt(bytes []byte, fieldName string) (*big.Int, error) { func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) {
if err := checkInteger(bytes, fieldName); err != nil { if err := checkInteger(bytes, lax, fieldName); err != nil {
return nil, err return nil, err
} }
ret := new(big.Int) ret := new(big.Int)
@ -270,8 +283,11 @@ func (oi ObjectIdentifier) String() string {
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and // parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
// returns it. An object identifier is a sequence of variable length integers // returns it. An object identifier is a sequence of variable length integers
// that are assigned in a hierarchy. // that are assigned in a hierarchy.
func parseObjectIdentifier(bytes []byte, fieldName string) (s []int, err error) { func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) {
if len(bytes) == 0 { if len(bytes) == 0 {
if lax {
return ObjectIdentifier{}, nil
}
err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName} err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName}
return return
} }
@ -415,10 +431,25 @@ func isNumeric(b byte) bool {
// parsePrintableString parses an ASN.1 PrintableString from the given byte // parsePrintableString parses an ASN.1 PrintableString from the given byte
// array and returns it. // array and returns it.
func parsePrintableString(bytes []byte, fieldName string) (ret string, err error) { func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) {
for _, b := range bytes { for _, b := range bytes {
if !isPrintable(b, allowAsterisk, allowAmpersand) { if !isPrintable(b, allowAsterisk, allowAmpersand) {
err = SyntaxError{"PrintableString contains invalid character", fieldName} if !lax {
err = SyntaxError{"PrintableString contains invalid character", fieldName}
} else {
// Might be an ISO8859-1 string stuffed in, check if it
// would be valid and assume that's what's happened if so,
// otherwise try T.61, failing that give up and just assign
// the bytes
switch {
case couldBeISO8859_1(bytes):
ret, err = iso8859_1ToUTF8(bytes), nil
case couldBeT61(bytes):
ret, err = parseT61String(bytes)
default:
err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName}
}
}
return return
} }
} }
@ -592,7 +623,7 @@ func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagA
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse // parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
// a number of ASN.1 values from the given byte slice and returns them as a // a number of ASN.1 values from the given byte slice and returns them as a
// slice of Go values of the given type. // slice of Go values of the given type.
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, fieldName string) (ret reflect.Value, err error) { func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) {
matchAny, expectedTag, compoundType, ok := getUniversalType(elemType) matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
if !ok { if !ok {
err = StructuralError{"unknown Go type for slice", fieldName} err = StructuralError{"unknown Go type for slice", fieldName}
@ -631,7 +662,7 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type
numElements++ numElements++
} }
ret = reflect.MakeSlice(sliceType, numElements, numElements) ret = reflect.MakeSlice(sliceType, numElements, numElements)
params := fieldParameters{} params := fieldParameters{lax: lax}
offset := 0 offset := 0
for i := 0; i < numElements; i++ { for i := 0; i < numElements; i++ {
offset, err = parseField(ret.Index(i), bytes, offset, params) offset, err = parseField(ret.Index(i), bytes, offset, params)
@ -735,22 +766,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
innerBytes := bytes[offset : offset+t.length] innerBytes := bytes[offset : offset+t.length]
switch t.tag { switch t.tag {
case TagPrintableString: case TagPrintableString:
result, err = parsePrintableString(innerBytes, params.name) result, err = parsePrintableString(innerBytes, params.lax, params.name)
if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
// Probably an ISO8859-1 string stuffed in, check if it
// would be valid and assume that's what's happened if so,
// otherwise try T.61, failing that give up and just assign
// the bytes
switch {
case couldBeISO8859_1(innerBytes):
result, err = iso8859_1ToUTF8(innerBytes), nil
case couldBeT61(innerBytes):
result, err = parseT61String(innerBytes)
default:
result = nil
err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
}
}
case TagNumericString: case TagNumericString:
result, err = parseNumericString(innerBytes, params.name) result, err = parseNumericString(innerBytes, params.name)
case TagIA5String: case TagIA5String:
@ -760,11 +776,11 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
case TagUTF8String: case TagUTF8String:
result, err = parseUTF8String(innerBytes) result, err = parseUTF8String(innerBytes)
case TagInteger: case TagInteger:
result, err = parseInt64(innerBytes, params.name) result, err = parseInt64(innerBytes, params.lax, params.name)
case TagBitString: case TagBitString:
result, err = parseBitString(innerBytes, params.name) result, err = parseBitString(innerBytes, params.name)
case TagOID: case TagOID:
result, err = parseObjectIdentifier(innerBytes, params.name) result, err = parseObjectIdentifier(innerBytes, params.lax, params.name)
case TagUTCTime: case TagUTCTime:
result, err = parseUTCTime(innerBytes) result, err = parseUTCTime(innerBytes)
case TagGeneralizedTime: case TagGeneralizedTime:
@ -873,6 +889,12 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
matchAnyClassAndTag = false matchAnyClassAndTag = false
} }
if !params.explicit && params.private && params.tag != nil {
expectedClass = ClassPrivate
expectedTag = *params.tag
matchAnyClassAndTag = false
}
// We have unwrapped any explicit tagging at this point. // We have unwrapped any explicit tagging at this point.
if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) || if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
(!matchAny && t.isCompound != compoundType) { (!matchAny && t.isCompound != compoundType) {
@ -899,7 +921,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
v.Set(reflect.ValueOf(result)) v.Set(reflect.ValueOf(result))
return return
case objectIdentifierType: case objectIdentifierType:
newSlice, err1 := parseObjectIdentifier(innerBytes, params.name) newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name)
v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
if err1 == nil { if err1 == nil {
reflect.Copy(v, reflect.ValueOf(newSlice)) reflect.Copy(v, reflect.ValueOf(newSlice))
@ -927,7 +949,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
err = err1 err = err1
return return
case enumeratedType: case enumeratedType:
parsedInt, err1 := parseInt32(innerBytes, params.name) parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
if err1 == nil { if err1 == nil {
v.SetInt(int64(parsedInt)) v.SetInt(int64(parsedInt))
} }
@ -937,7 +959,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
v.SetBool(true) v.SetBool(true)
return return
case bigIntType: case bigIntType:
parsedInt, err1 := parseBigInt(innerBytes, params.name) parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name)
if err1 == nil { if err1 == nil {
v.Set(reflect.ValueOf(parsedInt)) v.Set(reflect.ValueOf(parsedInt))
} }
@ -954,13 +976,13 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
return return
case reflect.Int, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int32, reflect.Int64:
if val.Type().Size() == 4 { if val.Type().Size() == 4 {
parsedInt, err1 := parseInt32(innerBytes, params.name) parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
if err1 == nil { if err1 == nil {
val.SetInt(int64(parsedInt)) val.SetInt(int64(parsedInt))
} }
err = err1 err = err1
} else { } else {
parsedInt, err1 := parseInt64(innerBytes, params.name) parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name)
if err1 == nil { if err1 == nil {
val.SetInt(parsedInt) val.SetInt(parsedInt)
} }
@ -992,6 +1014,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
} }
innerParams := parseFieldParameters(field.Tag.Get("asn1")) innerParams := parseFieldParameters(field.Tag.Get("asn1"))
innerParams.name = field.Name innerParams.name = field.Name
innerParams.lax = params.lax
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams) innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams)
if err != nil { if err != nil {
return return
@ -1008,7 +1031,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
reflect.Copy(val, reflect.ValueOf(innerBytes)) reflect.Copy(val, reflect.ValueOf(innerBytes))
return return
} }
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.name) newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name)
if err1 == nil { if err1 == nil {
val.Set(newSlice) val.Set(newSlice)
} }
@ -1018,7 +1041,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
var v string var v string
switch universalTag { switch universalTag {
case TagPrintableString: case TagPrintableString:
v, err = parsePrintableString(innerBytes, params.name) v, err = parsePrintableString(innerBytes, params.lax, params.name)
case TagNumericString: case TagNumericString:
v, err = parseNumericString(innerBytes, params.name) v, err = parseNumericString(innerBytes, params.name)
case TagIA5String: case TagIA5String:
@ -1110,11 +1133,13 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
// The following tags on struct fields have special meaning to Unmarshal: // The following tags on struct fields have special meaning to Unmarshal:
// //
// application specifies that an APPLICATION tag is used // application specifies that an APPLICATION tag is used
// private specifies that a PRIVATE tag is used
// default:x sets the default value for optional integer fields (only used if optional is also present) // default:x sets the default value for optional integer fields (only used if optional is also present)
// explicit specifies that an additional, explicit tag wraps the implicit one // explicit specifies that an additional, explicit tag wraps the implicit one
// optional marks the field as ASN.1 OPTIONAL // optional marks the field as ASN.1 OPTIONAL
// set causes a SET, rather than a SEQUENCE type to be expected // set causes a SET, rather than a SEQUENCE type to be expected
// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC // tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
// lax relax strict encoding checks for this field, and for any fields within it
// //
// If the type of the first field of a structure is RawContent then the raw // If the type of the first field of a structure is RawContent then the raw
// ASN1 contents of the struct will be stored in it. // ASN1 contents of the struct will be stored in it.

View File

@ -75,12 +75,14 @@ type fieldParameters struct {
optional bool // true iff the field is OPTIONAL optional bool // true iff the field is OPTIONAL
explicit bool // true iff an EXPLICIT tag is in use. explicit bool // true iff an EXPLICIT tag is in use.
application bool // true iff an APPLICATION tag is in use. application bool // true iff an APPLICATION tag is in use.
private bool // true iff a PRIVATE tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling. stringType int // the string tag to use when marshaling.
timeType int // the time tag to use when marshaling. timeType int // the time tag to use when marshaling.
set bool // true iff this should be encoded as a SET set bool // true iff this should be encoded as a SET
omitEmpty bool // true iff this should be omitted if empty when marshaling. omitEmpty bool // true iff this should be omitted if empty when marshaling.
lax bool // true iff unmarshalling should skip some error checks
name string // name of field for better diagnostics name string // name of field for better diagnostics
// Invariants: // Invariants:
@ -131,8 +133,15 @@ func parseFieldParameters(str string) (ret fieldParameters) {
if ret.tag == nil { if ret.tag == nil {
ret.tag = new(int) ret.tag = new(int)
} }
case part == "private":
ret.private = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "omitempty": case part == "omitempty":
ret.omitEmpty = true ret.omitEmpty = true
case part == "lax":
ret.lax = true
} }
} }
return return

View File

@ -631,6 +631,8 @@ func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
if params.tag != nil { if params.tag != nil {
if params.application { if params.application {
class = ClassApplication class = ClassApplication
} else if params.private {
class = ClassPrivate
} else { } else {
class = ClassContextSpecific class = ClassContextSpecific
} }

View File

@ -1,22 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// source: multilog.proto // source: multilog.proto
/*
Package configpb is a generated protocol buffer package.
It is generated from these files:
multilog.proto
It has these top-level messages:
TemporalLogConfig
LogShardConfig
*/
package configpb package configpb
import proto "github.com/golang/protobuf/proto" import (
import fmt "fmt" fmt "fmt"
import math "math" proto "github.com/golang/protobuf/proto"
import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
@ -32,13 +24,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// TemporalLogConfig is a set of LogShardConfig messages, whose // TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous. // time limits should be contiguous.
type TemporalLogConfig struct { type TemporalLogConfig struct {
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard" json:"shard,omitempty"` Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} } func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) } func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
func (*TemporalLogConfig) ProtoMessage() {} func (*TemporalLogConfig) ProtoMessage() {}
func (*TemporalLogConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_33e545c6d900a512, []int{0}
}
func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
}
func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
}
func (m *TemporalLogConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_TemporalLogConfig.Merge(m, src)
}
func (m *TemporalLogConfig) XXX_Size() int {
return xxx_messageInfo_TemporalLogConfig.Size(m)
}
func (m *TemporalLogConfig) XXX_DiscardUnknown() {
xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
}
var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
func (m *TemporalLogConfig) GetShard() []*LogShardConfig { func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
if m != nil { if m != nil {
@ -50,23 +65,46 @@ func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
// LogShardConfig describes the acceptable date range for a single shard of a temporal // LogShardConfig describes the acceptable date range for a single shard of a temporal
// log. // log.
type LogShardConfig struct { type LogShardConfig struct {
Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"` Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
// The log's public key in DER-encoded PKIX form. // The log's public key in DER-encoded PKIX form.
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
// not_after_start defines the start of the range of acceptable NotAfter // not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive. // values, inclusive.
// Leaving this unset implies no lower bound to the range. // Leaving this unset implies no lower bound to the range.
NotAfterStart *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart" json:"not_after_start,omitempty"` NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
// not_after_limit defines the end of the range of acceptable NotAfter values, // not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive. // exclusive.
// Leaving this unset implies no upper bound to the range. // Leaving this unset implies no upper bound to the range.
NotAfterLimit *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit" json:"not_after_limit,omitempty"` NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} } func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) } func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
func (*LogShardConfig) ProtoMessage() {} func (*LogShardConfig) ProtoMessage() {}
func (*LogShardConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (*LogShardConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_33e545c6d900a512, []int{1}
}
func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
}
func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
}
func (m *LogShardConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_LogShardConfig.Merge(m, src)
}
func (m *LogShardConfig) XXX_Size() int {
return xxx_messageInfo_LogShardConfig.Size(m)
}
func (m *LogShardConfig) XXX_DiscardUnknown() {
xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
}
var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
func (m *LogShardConfig) GetUri() string { func (m *LogShardConfig) GetUri() string {
if m != nil { if m != nil {
@ -82,14 +120,14 @@ func (m *LogShardConfig) GetPublicKeyDer() []byte {
return nil return nil
} }
func (m *LogShardConfig) GetNotAfterStart() *google_protobuf.Timestamp { func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
if m != nil { if m != nil {
return m.NotAfterStart return m.NotAfterStart
} }
return nil return nil
} }
func (m *LogShardConfig) GetNotAfterLimit() *google_protobuf.Timestamp { func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
if m != nil { if m != nil {
return m.NotAfterLimit return m.NotAfterLimit
} }
@ -101,9 +139,9 @@ func init() {
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig") proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
} }
func init() { proto.RegisterFile("multilog.proto", fileDescriptor0) } func init() { proto.RegisterFile("multilog.proto", fileDescriptor_33e545c6d900a512) }
var fileDescriptor0 = []byte{ var fileDescriptor_33e545c6d900a512 = []byte{
// 241 bytes of a gzipped FileDescriptorProto // 241 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a, 0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,

View File

@ -41,11 +41,7 @@ func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.Ge
} }
var resp ct.GetEntriesResponse var resp ct.GetEntriesResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp) if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil {
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
@ -66,7 +62,7 @@ func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogE
for i, entry := range resp.Entries { for i, entry := range resp.Entries {
index := start + int64(i) index := start + int64(i)
logEntry, err := ct.LogEntryFromLeaf(index, &entry) logEntry, err := ct.LogEntryFromLeaf(index, &entry)
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil { if x509.IsFatal(err) {
return nil, err return nil, err
} }
entries[i] = *logEntry entries[i] = *logEntry

View File

@ -19,7 +19,6 @@ package client
import ( import (
"context" "context"
"crypto/sha256"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"net/http" "net/http"
@ -35,11 +34,19 @@ type LogClient struct {
jsonclient.JSONClient jsonclient.JSONClient
} }
// CheckLogClient is an interface that allows (just) checking of various log contents.
type CheckLogClient interface {
BaseURI() string
GetSTH(context.Context) (*ct.SignedTreeHead, error)
GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
}
// New constructs a new LogClient instance. // New constructs a new LogClient instance.
// |uri| is the base URI of the CT log instance to interact with, e.g. // |uri| is the base URI of the CT log instance to interact with, e.g.
// http://ct.googleapis.com/pilot // https://ct.googleapis.com/pilot
// |hc| is the underlying client to be used for HTTP requests to the CT log. // |hc| is the underlying client to be used for HTTP requests to the CT log.
// |opts| can be used to provide a customer logger interface and a public key // |opts| can be used to provide a custom logger interface and a public key
// for signature verification. // for signature verification.
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
logClient, err := jsonclient.New(uri, hc, opts) logClient, err := jsonclient.New(uri, hc, opts)
@ -49,18 +56,8 @@ func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, erro
return &LogClient{*logClient}, err return &LogClient{*logClient}, err
} }
// RspError represents an error that occurred when processing a response from a server, // RspError represents a server error including HTTP information.
// and also includes key details from the http.Response that triggered the error. type RspError = jsonclient.RspError
type RspError struct {
Err error
StatusCode int
Body []byte
}
// Error formats the RspError instance, focusing on the error.
func (e RspError) Error() string {
return e.Err.Error()
}
// Attempts to add |chain| to the log, using the api end-point specified by // Attempts to add |chain| to the log, using the api end-point specified by
// |path|. If provided context expires before submission is complete an // |path|. If provided context expires before submission is complete an
@ -74,9 +71,6 @@ func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType
httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
if err != nil { if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
@ -131,9 +125,6 @@ func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCe
var resp ct.AddChainResponse var resp ct.AddChainResponse
httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp) httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp)
if err != nil { if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
var ds ct.DigitallySigned var ds ct.DigitallySigned
@ -164,40 +155,18 @@ func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
var resp ct.GetSTHResponse var resp ct.GetSTHResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
if err != nil { if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
sth := ct.SignedTreeHead{
TreeSize: resp.TreeSize,
Timestamp: resp.Timestamp,
}
if len(resp.SHA256RootHash) != sha256.Size { sth, err := resp.ToSignedTreeHead()
return nil, RspError{ if err != nil {
Err: fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(resp.SHA256RootHash)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
copy(sth.SHA256RootHash[:], resp.SHA256RootHash)
var ds ct.DigitallySigned
if rest, err := tls.Unmarshal(resp.TreeHeadSignature, &ds); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} else if len(rest) > 0 {
return nil, RspError{
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
sth.TreeHeadSignature = ds
if err := c.VerifySTHSignature(sth); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} }
return &sth, nil
if err := c.VerifySTHSignature(*sth); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return sth, nil
} }
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is // VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
@ -232,11 +201,7 @@ func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64)
"second": strconv.FormatUint(second, base10), "second": strconv.FormatUint(second, base10),
} }
var resp ct.GetSTHConsistencyResponse var resp ct.GetSTHConsistencyResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp) if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil {
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
return resp.Consistency, nil return resp.Consistency, nil
@ -251,11 +216,7 @@ func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize ui
"hash": b64Hash, "hash": b64Hash,
} }
var resp ct.GetProofByHashResponse var resp ct.GetProofByHashResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp) if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil {
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
return &resp, nil return &resp, nil
@ -266,9 +227,6 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
var resp ct.GetRootsResponse var resp ct.GetRootsResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
if err != nil { if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err return nil, err
} }
var roots []ct.ASN1Cert var roots []ct.ASN1Cert
@ -281,3 +239,17 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
} }
return roots, nil return roots, nil
} }
// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
base10 := 10
params := map[string]string{
"leaf_index": strconv.FormatUint(index, base10),
"tree_size": strconv.FormatUint(treeSize, base10),
}
var resp ct.GetEntryAndProofResponse
if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil {
return nil, err
}
return &resp, nil
}

View File

@ -23,7 +23,7 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/gogo/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
ct "github.com/google/certificate-transparency-go" ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client/configpb" "github.com/google/certificate-transparency-go/client/configpb"
@ -106,7 +106,7 @@ func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*Tem
} }
clients := make([]*LogClient, 0, len(cfg.Shard)) clients := make([]*LogClient, 0, len(cfg.Shard))
for i, shard := range cfg.Shard { for i, shard := range cfg.Shard {
opts := jsonclient.Options{} opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"}
opts.PublicKeyDER = shard.GetPublicKeyDer() opts.PublicKeyDER = shard.GetPublicKeyDer()
c, err := New(shard.Uri, hc, opts) c, err := New(shard.Uri, hc, opts)
if err != nil { if err != nil {

View File

@ -0,0 +1,63 @@
substitutions:
_CLUSTER_NAME: trillian-opensource-ci
_MASTER_ZONE: us-central1-a
steps:
- id: build_ctfe
name: gcr.io/cloud-builders/docker
args:
- build
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
- --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
- .
waitFor: ["-"]
- id: push_ctfe
name: gcr.io/cloud-builders/docker
args:
- push
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
waitFor:
- build_ctfe
- id: tag_latest_ctfe
name: gcr.io/cloud-builders/gcloud
args:
- container
- images
- add-tag
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
- gcr.io/${PROJECT_ID}/ctfe:latest
waitFor:
- push_ctfe
- id: build_envsubst
name: gcr.io/cloud-builders/docker
args:
- build
- trillian/examples/deployment/docker/envsubst
- -t
- envsubst
waitFor: ["-"]
- id: envsubst_kubernetes_configs
name: envsubst
args:
- trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
- trillian/examples/deployment/kubernetes/ctfe-service.yaml
- trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
env:
- PROJECT_ID=${PROJECT_ID}
- IMAGE_TAG=${COMMIT_SHA}
waitFor:
- build_envsubst
- id: update_kubernetes_configs
name: gcr.io/cloud-builders/kubectl
args:
- apply
- -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
- -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
- -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor:
- envsubst_kubernetes_configs
- push_ctfe
images:
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}

View File

@ -0,0 +1,10 @@
steps:
- id: build_ctfe
name: gcr.io/cloud-builders/docker
args:
- build
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
- .
images:
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}

View File

@ -1,7 +1,9 @@
{ {
"Deadline": "60s",
"Linters": { "Linters": {
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE", "license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE" "forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
"unforked": "./scripts/check_unforked.sh:PATH:LINE:MESSAGE"
}, },
"Enable": [ "Enable": [
"forked", "forked",
@ -11,6 +13,7 @@
"golint", "golint",
"license", "license",
"misspell", "misspell",
"unforked",
"vet" "vet"
], ],
"Exclude": [ "Exclude": [

View File

@ -53,11 +53,12 @@ type backoffer interface {
// JSONClient provides common functionality for interacting with a JSON server // JSONClient provides common functionality for interacting with a JSON server
// that uses cryptographic signatures. // that uses cryptographic signatures.
type JSONClient struct { type JSONClient struct {
uri string // the base URI of the server. e.g. http://ct.googleapis/pilot uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
httpClient *http.Client // used to interact with the server via HTTP httpClient *http.Client // used to interact with the server via HTTP
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
logger Logger // interface to use for logging warnings and errors logger Logger // interface to use for logging warnings and errors
backoff backoffer // object used to store and calculate backoff information backoff backoffer // object used to store and calculate backoff information
userAgent string // If set, this is sent as the UserAgent header.
} }
// Logger is a simple logging interface used to log internal errors and warnings // Logger is a simple logging interface used to log internal errors and warnings
@ -75,6 +76,8 @@ type Options struct {
PublicKey string PublicKey string
// DER format public key to use for signature verification. // DER format public key to use for signature verification.
PublicKeyDER []byte PublicKeyDER []byte
// UserAgent, if set, will be sent as the User-Agent header with each request.
UserAgent string
} }
// ParsePublicKey parses and returns the public key contained in opts. // ParsePublicKey parses and returns the public key contained in opts.
@ -105,6 +108,19 @@ func (bl *basicLogger) Printf(msg string, args ...interface{}) {
log.Printf(msg, args...) log.Printf(msg, args...)
} }
// RspError represents an error that occurred when processing a response from a server,
// and also includes key details from the http.Response that triggered the error.
type RspError struct {
Err error
StatusCode int
Body []byte
}
// Error formats the RspError instance, focusing on the error.
func (e RspError) Error() string {
return e.Err.Error()
}
// New constructs a new JSONClient instance, for the given base URI, using the // New constructs a new JSONClient instance, for the given base URI, using the
// given http.Client object (if provided) and the Options object. // given http.Client object (if provided) and the Options object.
// If opts does not specify a public key, signatures will not be verified. // If opts does not specify a public key, signatures will not be verified.
@ -136,14 +152,19 @@ func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
Verifier: verifier, Verifier: verifier,
logger: logger, logger: logger,
backoff: &backoff{}, backoff: &backoff{},
userAgent: opts.UserAgent,
}, nil }, nil
} }
// BaseURI returns the base URI that the JSONClient makes queries to.
func (c *JSONClient) BaseURI() string {
return c.uri
}
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse // GetAndParse makes a HTTP GET call to the given path, and attempta to parse
// the response as a JSON representation of the rsp structure. Returns the // the response as a JSON representation of the rsp structure. Returns the
// http.Response, the body of the response, and an error. Note that the // http.Response, the body of the response, and an error (which may be of
// returned http.Response can be non-nil even when an error is returned, // type RspError if the HTTP response was available).
// in particular when the HTTP status is not OK or when the JSON parsing fails.
func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil { if ctx == nil {
return nil, nil, errors.New("context.Context required") return nil, nil, errors.New("context.Context required")
@ -158,6 +179,9 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if len(c.userAgent) != 0 {
httpReq.Header.Set("User-Agent", c.userAgent)
}
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
if err != nil { if err != nil {
@ -168,15 +192,15 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
body, err := ioutil.ReadAll(httpRsp.Body) body, err := ioutil.ReadAll(httpRsp.Body)
httpRsp.Body.Close() httpRsp.Body.Close()
if err != nil { if err != nil {
return httpRsp, body, fmt.Errorf("failed to read response body: %v", err) return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body}
} }
if httpRsp.StatusCode != http.StatusOK { if httpRsp.StatusCode != http.StatusOK {
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status) return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body}
} }
if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
return httpRsp, body, err return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} }
return httpRsp, body, nil return httpRsp, body, nil
@ -185,9 +209,7 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
// PostAndParse makes a HTTP POST call to the given path, including the request // PostAndParse makes a HTTP POST call to the given path, including the request
// parameters, and attempts to parse the response as a JSON representation of // parameters, and attempts to parse the response as a JSON representation of
// the rsp structure. Returns the http.Response, the body of the response, and // the rsp structure. Returns the http.Response, the body of the response, and
// an error. Note that the returned http.Response can be non-nil even when an // an error (which may be of type RspError if the HTTP response was available).
// error is returned, in particular when the HTTP status is not OK or when the
// JSON parsing fails.
func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil { if ctx == nil {
return nil, nil, errors.New("context.Context required") return nil, nil, errors.New("context.Context required")
@ -202,6 +224,9 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if len(c.userAgent) != 0 {
httpReq.Header.Set("User-Agent", c.userAgent)
}
httpReq.Header.Set("Content-Type", "application/json") httpReq.Header.Set("Content-Type", "application/json")
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
@ -213,12 +238,15 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
httpRsp.Body.Close() httpRsp.Body.Close()
} }
if err != nil { if err != nil {
return httpRsp, body, err if httpRsp != nil {
return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
}
return nil, nil, err
} }
if httpRsp.StatusCode == http.StatusOK { if httpRsp.StatusCode == http.StatusOK {
if err = json.Unmarshal(body, &rsp); err != nil { if err = json.Unmarshal(body, &rsp); err != nil {
return httpRsp, body, err return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
} }
} }
return httpRsp, body, nil return httpRsp, body, nil
@ -255,7 +283,7 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
return nil, nil, err return nil, nil, err
} }
wait := c.backoff.set(nil) wait := c.backoff.set(nil)
c.logger.Printf("Request failed, backing-off for %s: %s", wait, err) c.logger.Printf("Request failed, backing-off on %s for %s: %s", c.uri, wait, err)
} else { } else {
switch { switch {
case httpRsp.StatusCode == http.StatusOK: case httpRsp.StatusCode == http.StatusOK:
@ -279,7 +307,10 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
wait := c.backoff.set(backoff) wait := c.backoff.set(backoff)
c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status) c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status)
default: default:
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status) return nil, nil, RspError{
StatusCode: httpRsp.StatusCode,
Body: body,
Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)}
} }
} }
if err := c.waitForBackoff(ctx); err != nil { if err := c.waitForBackoff(ctx); err != nil {

View File

@ -20,6 +20,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strings" "strings"
"time"
"github.com/google/certificate-transparency-go/tls" "github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509" "github.com/google/certificate-transparency-go/x509"
@ -127,7 +128,7 @@ func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timesta
chain := make([]*x509.Certificate, count) chain := make([]*x509.Certificate, count)
for i := range chain { for i := range chain {
cert, err := x509.ParseCertificate(rawChain[i].Data) cert, err := x509.ParseCertificate(rawChain[i].Data)
if err != nil { if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err) return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
} }
chain[i] = cert chain[i] = cert
@ -189,6 +190,53 @@ func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, time
return &leaf, nil return &leaf, nil
} }
// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
// contains embedded SCTs. It is assumed that the timestamp provided is from
// one of the SCTs embedded within the leaf certificate.
func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
// For building the leaf for a certificate and SCT where the SCT is embedded
// in the certificate, we need to build the original precertificate TBS
// data. First, parse the leaf cert and its issuer.
if len(chain) < 2 {
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
}
issuer := chain[1]
cert := chain[0]
// Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
// extension.
tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
if err != nil {
return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
}
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
EntryType: PrecertLogEntryType,
Timestamp: timestamp,
PrecertEntry: &PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: tbs,
},
},
}, nil
}
// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
leafData, err := tls.Marshal(*leaf)
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
}
data := append([]byte{TreeLeafPrefix}, leafData...)
leafHash := sha256.Sum256(data)
return leafHash, nil
}
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific // IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
// certificate transparency extended key usage. // certificate transparency extended key usage.
func IsPreIssuer(issuer *x509.Certificate) bool { func IsPreIssuer(issuer *x509.Certificate) bool {
@ -200,56 +248,100 @@ func IsPreIssuer(issuer *x509.Certificate) bool {
return false return false
} }
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data after JSON parsing) // RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// into a LogEntry object (which includes x509.Certificate objects, after TLS and ASN.1 parsing). // after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
// Note that this function may return a valid LogEntry object and a non-nil error value, when func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
// the error indicates a non-fatal parsing error (of type x509.NonFatalErrors). ret := RawLogEntry{Index: index}
func LogEntryFromLeaf(index int64, leafEntry *LeafEntry) (*LogEntry, error) { if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
var leaf MerkleTreeLeaf return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
if rest, err := tls.Unmarshal(leafEntry.LeafInput, &leaf); err != nil {
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf for index %d: %v", index, err)
} else if len(rest) > 0 { } else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after MerkleTreeLeaf for index %d", len(rest), index) return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
} }
var err error switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
entry := LogEntry{Index: index, Leaf: leaf}
switch leaf.TimestampedEntry.EntryType {
case X509LogEntryType: case X509LogEntryType:
var certChain CertificateChain var certChain CertificateChain
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &certChain); err != nil { if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal ExtraData for index %d: %v", index, err) return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
} else if len(rest) > 0 { } else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after CertificateChain for index %d", len(rest), index) return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
}
entry.Chain = certChain.Entries
entry.X509Cert, err = leaf.X509Certificate()
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
return nil, fmt.Errorf("failed to parse certificate in MerkleTreeLeaf for index %d: %v", index, err)
} }
ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
ret.Chain = certChain.Entries
case PrecertLogEntryType: case PrecertLogEntryType:
var precertChain PrecertChainEntry var precertChain PrecertChainEntry
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &precertChain); err != nil { if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry for index %d: %v", index, err) return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
} else if len(rest) > 0 { } else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after PrecertChainEntry for index %d", len(rest), index) return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
} }
entry.Chain = precertChain.CertificateChain ret.Cert = precertChain.PreCertificate
ret.Chain = precertChain.CertificateChain
default:
// TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
// are not errors. We should revisit how we process this case.
return nil, fmt.Errorf("unknown entry type: %v", eType)
}
return &ret, nil
}
// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
// (pre-)certificate.
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
var err error
entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
entry.X509Cert, err = rle.Leaf.X509Certificate()
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse certificate: %v", err)
}
case PrecertLogEntryType:
var tbsCert *x509.Certificate var tbsCert *x509.Certificate
tbsCert, err = leaf.Precertificate() tbsCert, err = rle.Leaf.Precertificate()
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil { if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse precertificate in MerkleTreeLeaf for index %d: %v", index, err) return nil, fmt.Errorf("failed to parse precertificate: %v", err)
} }
entry.Precert = &Precertificate{ entry.Precert = &Precertificate{
Submitted: precertChain.PreCertificate, Submitted: rle.Cert,
IssuerKeyHash: leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: tbsCert, TBSCertificate: tbsCert,
} }
default: default:
return nil, fmt.Errorf("saw unknown entry type at index %d: %v", index, leaf.TimestampedEntry.EntryType) return nil, fmt.Errorf("unknown entry type: %v", eType)
} }
// err may hold a x509.NonFatalErrors object.
// err may be non-nil for a non-fatal error.
return &entry, err return &entry, err
} }
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// after JSON parsing) into a LogEntry object (which includes x509.Certificate
// objects, after TLS and ASN.1 parsing).
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
rle, err := RawLogEntryFromLeaf(index, leaf)
if err != nil {
return nil, err
}
return rle.ToLogEntry()
}
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
// since UNIX epoch) to a Go Time.
func TimestampToTime(ts uint64) time.Time {
secs := int64(ts / 1000)
msecs := int64(ts % 1000)
return time.Unix(secs, msecs*1000000)
}

View File

@ -20,8 +20,8 @@ import (
"crypto/elliptic" "crypto/elliptic"
"crypto/rsa" "crypto/rsa"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/pem" "encoding/pem"
"flag"
"fmt" "fmt"
"log" "log"
@ -29,8 +29,10 @@ import (
"github.com/google/certificate-transparency-go/x509" "github.com/google/certificate-transparency-go/x509"
) )
var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false, // AllowVerificationWithNonCompliantKeys may be set to true in order to allow
"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.") // SignatureVerifier to use keys which are technically non-compliant with
// RFC6962.
var AllowVerificationWithNonCompliantKeys = false
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error. // PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) { func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
@ -42,9 +44,18 @@ func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
return k, sha256.Sum256(p.Bytes), rest, err return k, sha256.Sum256(p.Bytes), rest, err
} }
// PublicKeyFromB64 parses a base64-encoded public key.
func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
der, err := base64.StdEncoding.DecodeString(b64PubKey)
if err != nil {
return nil, fmt.Errorf("error decoding public key: %s", err)
}
return x509.ParsePKIXPublicKey(der)
}
// SignatureVerifier can verify signatures on SCTs and STHs // SignatureVerifier can verify signatures on SCTs and STHs
type SignatureVerifier struct { type SignatureVerifier struct {
pubKey crypto.PublicKey PubKey crypto.PublicKey
} }
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey. // NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
@ -53,7 +64,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
case *rsa.PublicKey: case *rsa.PublicKey:
if pkType.N.BitLen() < 2048 { if pkType.N.BitLen() < 2048 {
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen()) e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
if !(*allowVerificationWithNonCompliantKeys) { if !AllowVerificationWithNonCompliantKeys {
return nil, e return nil, e
} }
log.Printf("WARNING: %v", e) log.Printf("WARNING: %v", e)
@ -62,7 +73,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
params := *(pkType.Params()) params := *(pkType.Params())
if params != *elliptic.P256().Params() { if params != *elliptic.P256().Params() {
e := fmt.Errorf("public is ECDSA, but not on the P256 curve") e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
if !(*allowVerificationWithNonCompliantKeys) { if !AllowVerificationWithNonCompliantKeys {
return nil, e return nil, e
} }
log.Printf("WARNING: %v", e) log.Printf("WARNING: %v", e)
@ -72,14 +83,12 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
return nil, fmt.Errorf("Unsupported public key type %v", pkType) return nil, fmt.Errorf("Unsupported public key type %v", pkType)
} }
return &SignatureVerifier{ return &SignatureVerifier{PubKey: pk}, nil
pubKey: pk,
}, nil
} }
// VerifySignature verifies the given signature sig matches the data. // VerifySignature verifies the given signature sig matches the data.
func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error { func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
return tls.VerifySignature(s.pubKey, data, sig) return tls.VerifySignature(s.PubKey, data, sig)
} }
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry. // VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.

View File

@ -14,7 +14,13 @@
package tls package tls
import "fmt" import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"fmt"
)
// DigitallySigned gives information about a signature, including the algorithm used // DigitallySigned gives information about a signature, including the algorithm used
// and the signature value. Defined in RFC 5246 s4.7. // and the signature value. Defined in RFC 5246 s4.7.
@ -94,3 +100,18 @@ func (s SignatureAlgorithm) String() string {
return fmt.Sprintf("UNKNOWN(%d)", s) return fmt.Sprintf("UNKNOWN(%d)", s)
} }
} }
// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
switch k.(type) {
case *ecdsa.PublicKey:
return ECDSA
case *rsa.PublicKey:
return RSA
case *dsa.PublicKey:
return DSA
default:
return Anonymous
}
}

View File

@ -54,6 +54,12 @@ func (e LogEntryType) String() string {
} }
} }
// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
const (
TreeLeafPrefix = byte(0x00)
TreeNodePrefix = byte(0x01)
)
// MerkleLeafType represents the MerkleLeafType enum from section 3.4: // MerkleLeafType represents the MerkleLeafType enum from section 3.4:
// enum { timestamped_entry(0), (255) } MerkleLeafType; // enum { timestamped_entry(0), (255) } MerkleLeafType;
type MerkleLeafType tls.Enum // tls:"maxval:255" type MerkleLeafType tls.Enum // tls:"maxval:255"
@ -193,6 +199,25 @@ func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
return d.FromBase64String(content) return d.FromBase64String(content)
} }
// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
type RawLogEntry struct {
// Index is a position of the entry in the log.
Index int64
// Leaf is a parsed Merkle leaf hash input.
Leaf MerkleTreeLeaf
// Cert is:
// - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
// - A precertificate if Leaf.TimestampedEntry.EntryType is
// PrecertLogEntryType, in the form of a DER-encoded Certificate as
// originally added (which includes the poison extension and a signature
// generated over the pre-cert by the pre-cert issuer).
// - Empty otherwise.
Cert ASN1Cert
// Chain is the issuing certificate chain starting with the issuer of Cert,
// or an empty slice if Cert is empty.
Chain []ASN1Cert
}
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described // LogEntry represents the (parsed) contents of an entry in a CT log. This is described
// in section 3.1, but note that this structure does *not* match the TLS structure // in section 3.1, but note that this structure does *not* match the TLS structure
// defined there (the TLS structure is never used directly in RFC6962). // defined there (the TLS structure is never used directly in RFC6962).
@ -273,6 +298,23 @@ type SignedTreeHead struct {
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
} }
func (s SignedTreeHead) String() string {
sigStr, err := s.TreeHeadSignature.Base64String()
if err != nil {
sigStr = tls.DigitallySigned(s.TreeHeadSignature).String()
}
// If the LogID field in the SignedTreeHead is empty, don't include it in
// the string.
var logIDStr string
if id, empty := s.LogID, (SHA256Hash{}); id != empty {
logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String())
}
return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}",
logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr)
}
// TreeHeadSignature holds the data over which the signature in an STH is // TreeHeadSignature holds the data over which the signature in an STH is
// generated; see section 3.5 // generated; see section 3.5
type TreeHeadSignature struct { type TreeHeadSignature struct {
@ -368,7 +410,27 @@ func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate) return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
} }
// APIEndpoint is a string that represents one of the Certificate Transparency
// Log API endpoints.
type APIEndpoint string
// Certificate Transparency Log API endpoints; see section 4.
// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const (
AddChainStr APIEndpoint = "add-chain"
AddPreChainStr APIEndpoint = "add-pre-chain"
GetSTHStr APIEndpoint = "get-sth"
GetEntriesStr APIEndpoint = "get-entries"
GetProofByHashStr APIEndpoint = "get-proof-by-hash"
GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
GetRootsStr APIEndpoint = "get-roots"
GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
)
// URI paths for Log requests; see section 4. // URI paths for Log requests; see section 4.
// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const ( const (
AddChainPath = "/ct/v1/add-chain" AddChainPath = "/ct/v1/add-chain"
AddPreChainPath = "/ct/v1/add-pre-chain" AddPreChainPath = "/ct/v1/add-pre-chain"
@ -415,6 +477,29 @@ type GetSTHResponse struct {
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
} }
// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
sth := SignedTreeHead{
TreeSize: r.TreeSize,
Timestamp: r.Timestamp,
}
if len(r.SHA256RootHash) != sha256.Size {
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
}
copy(sth.SHA256RootHash[:], r.SHA256RootHash)
var ds DigitallySigned
if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
}
sth.TreeHeadSignature = ds
return &sth, nil
}
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency // GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and // GET method from section 4.4. (The corresponding GET request has parameters 'first' and
// 'second'.) // 'second'.)

View File

@ -25,16 +25,43 @@ func NewCertPool() *CertPool {
} }
} }
func (s *CertPool) copy() *CertPool {
p := &CertPool{
bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)),
byName: make(map[string][]int, len(s.byName)),
certs: make([]*Certificate, len(s.certs)),
}
for k, v := range s.bySubjectKeyId {
indexes := make([]int, len(v))
copy(indexes, v)
p.bySubjectKeyId[k] = indexes
}
for k, v := range s.byName {
indexes := make([]int, len(v))
copy(indexes, v)
p.byName[k] = indexes
}
copy(p.certs, s.certs)
return p
}
// SystemCertPool returns a copy of the system cert pool. // SystemCertPool returns a copy of the system cert pool.
// //
// Any mutations to the returned pool are not written to disk and do // Any mutations to the returned pool are not written to disk and do
// not affect any other pool. // not affect any other pool.
//
// New changes in the the system cert pool might not be reflected
// in subsequent calls.
func SystemCertPool() (*CertPool, error) { func SystemCertPool() (*CertPool, error) {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
// Issue 16736, 18609: // Issue 16736, 18609:
return nil, errors.New("crypto/x509: system root pool is not available on Windows") return nil, errors.New("crypto/x509: system root pool is not available on Windows")
} }
if sysRoots := systemRootsPool(); sysRoots != nil {
return sysRoots.copy(), nil
}
return loadSystemRoots() return loadSystemRoots()
} }
@ -121,7 +148,7 @@ func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
} }
cert, err := ParseCertificate(block.Bytes) cert, err := ParseCertificate(block.Bytes)
if err != nil { if IsFatal(err) {
continue continue
} }

View File

@ -0,0 +1,37 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/elliptic"
"math/big"
"sync"
)
// This file holds ECC curves that are not supported by the main Go crypto/elliptic
// library, but which have been observed in certificates in the wild.
var initonce sync.Once
var p192r1 *elliptic.CurveParams
func initAllCurves() {
initSECP192R1()
}
func initSECP192R1() {
// See SEC-2, section 2.2.2
p192r1 = &elliptic.CurveParams{Name: "P-192"}
p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
p192r1.BitSize = 192
}
func secp192r1() elliptic.Curve {
initonce.Do(initAllCurves)
return p192r1
}

View File

@ -7,7 +7,7 @@
package x509 package x509
/* /*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080 #cgo CFLAGS: -mmacosx-version-min=10.6
#cgo LDFLAGS: -framework CoreFoundation -framework Security #cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h> #include <CoreFoundation/CoreFoundation.h>

View File

@ -7,7 +7,7 @@
package x509 package x509
/* /*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080 #cgo CFLAGS: -mmacosx-version-min=10.6
#cgo LDFLAGS: -framework CoreFoundation -framework Security #cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h> #include <CoreFoundation/CoreFoundation.h>

View File

@ -57,7 +57,7 @@ func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
} }
// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form. // MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form.
// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey. // The following key types are supported: *rsa.PrivateKey, *ecdsa.PrivateKey.
// Unsupported key types result in an error. // Unsupported key types result in an error.
// //
// See RFC 5208. // See RFC 5208.

View File

@ -7,14 +7,12 @@
package pkix package pkix
import ( import (
// START CT CHANGES
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/google/certificate-transparency-go/asn1"
// END CT CHANGES
"math/big" "math/big"
"time" "time"
"github.com/google/certificate-transparency-go/asn1"
) )
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC // AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
@ -98,7 +96,7 @@ func (r RDNSequence) String() string {
type RelativeDistinguishedNameSET []AttributeTypeAndValue type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in // AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4 // https://tools.ietf.org/html/rfc5280#section-4.1.2.4
type AttributeTypeAndValue struct { type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier Type asn1.ObjectIdentifier
Value interface{} Value interface{}

View File

@ -0,0 +1,20 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package x509
import (
"syscall"
"unsafe"
)
// For Go versions >= 1.11, the ExtraPolicyPara field in
// syscall.CertChainPolicyPara is of type syscall.Pointer. See:
// https://github.com/golang/go/commit/4869ec00e87ef
func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer {
return (syscall.Pointer)(p)
}

View File

@ -0,0 +1,17 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package x509
import "unsafe"
// For Go versions before 1.11, the ExtraPolicyPara field in
// syscall.CertChainPolicyPara was of type uintptr. See:
// https://github.com/golang/go/commit/4869ec00e87ef
func convertToPolicyParaType(p unsafe.Pointer) uintptr {
return uintptr(p)
}

View File

@ -14,12 +14,15 @@ import (
"github.com/google/certificate-transparency-go/x509/pkix" "github.com/google/certificate-transparency-go/x509/pkix"
) )
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var ( var (
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20} OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3 )
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21} OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24} OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
@ -238,7 +241,7 @@ func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
} }
case e.Id.Equal(OIDExtensionAuthorityInfoAccess): case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7 // RFC 5280 s5.2.7
var aia []authorityInfoAccess var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err) errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 { } else if len(rest) != 0 {

View File

@ -19,4 +19,7 @@ func systemRootsPool() *CertPool {
func initSystemRoots() { func initSystemRoots() {
systemRoots, systemRootsErr = loadSystemRoots() systemRoots, systemRootsErr = loadSystemRoots()
if systemRootsErr != nil {
systemRoots = nil
}
} }

View File

@ -7,7 +7,7 @@
package x509 package x509
/* /*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080 #cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300
#cgo LDFLAGS: -framework CoreFoundation -framework Security #cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <errno.h> #include <errno.h>
@ -16,58 +16,6 @@ package x509
#include <CoreFoundation/CoreFoundation.h> #include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h> #include <Security/Security.h>
// FetchPEMRootsCTX509_MountainLion is the version of FetchPEMRoots from Go 1.6
// which still works on OS X 10.8 (Mountain Lion).
// It lacks support for admin & user cert domains.
// See golang.org/issue/16473
int FetchPEMRootsCTX509_MountainLion(CFDataRef *pemRoots) {
if (pemRoots == NULL) {
return -1;
}
CFArrayRef certs = NULL;
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
if (err != noErr) {
return -1;
}
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
int i, ncerts = CFArrayGetCount(certs);
for (i = 0; i < ncerts; i++) {
CFDataRef data = NULL;
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
if (cert == NULL) {
continue;
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
*pemRoots = combinedData;
return 0;
}
// useOldCodeCTX509 reports whether the running machine is OS X 10.8 Mountain Lion
// or older. We only support Mountain Lion and higher, but we'll at least try our
// best on older machines and continue to use the old code path.
//
// See golang.org/issue/16473
int useOldCodeCTX509() {
char str[256];
size_t size = sizeof(str);
memset(str, 0, size);
sysctlbyname("kern.osrelease", str, &size, NULL, 0);
// OS X 10.8 is osrelease "12.*", 10.7 is 11.*, 10.6 is 10.*.
// We never supported things before that.
return memcmp(str, "12.", 3) == 0 || memcmp(str, "11.", 3) == 0 || memcmp(str, "10.", 3) == 0;
}
// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates. // FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
// //
@ -78,9 +26,7 @@ int useOldCodeCTX509() {
// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must // Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
// be released (using CFRelease) after we've consumed its content. // be released (using CFRelease) after we've consumed its content.
int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) { int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
if (useOldCodeCTX509()) { int i;
return FetchPEMRootsCTX509_MountainLion(pemRoots);
}
// Get certificates from all domains, not just System, this lets // Get certificates from all domains, not just System, this lets
// the user add CAs to their "login" keychain, and Admins to add // the user add CAs to their "login" keychain, and Admins to add
@ -101,7 +47,8 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0); CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0); CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
for (int i = 0; i < numDomains; i++) { for (i = 0; i < numDomains; i++) {
int j;
CFArrayRef certs = NULL; CFArrayRef certs = NULL;
OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs); OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs);
if (err != noErr) { if (err != noErr) {
@ -109,7 +56,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
} }
CFIndex numCerts = CFArrayGetCount(certs); CFIndex numCerts = CFArrayGetCount(certs);
for (int j = 0; j < numCerts; j++) { for (j = 0; j < numCerts; j++) {
CFDataRef data = NULL; CFDataRef data = NULL;
CFErrorRef errRef = NULL; CFErrorRef errRef = NULL;
CFArrayRef trustSettings = NULL; CFArrayRef trustSettings = NULL;
@ -124,6 +71,9 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
if (i == 0) { if (i == 0) {
trustAsRoot = 1; trustAsRoot = 1;
} else { } else {
int k;
CFIndex m;
// Certs found in the system domain are always trusted. If the user // Certs found in the system domain are always trusted. If the user
// configures "Never Trust" on such a cert, it will also be found in the // configures "Never Trust" on such a cert, it will also be found in the
// admin or user domain, causing it to be added to untrustedPemRoots. The // admin or user domain, causing it to be added to untrustedPemRoots. The
@ -133,7 +83,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
// SecTrustServer.c, "user trust settings overrule admin trust settings", // SecTrustServer.c, "user trust settings overrule admin trust settings",
// so take the last trust settings array we find. // so take the last trust settings array we find.
// Skip the system domain since it is always trusted. // Skip the system domain since it is always trusted.
for (int k = i; k < numDomains; k++) { for (k = i; k < numDomains; k++) {
CFArrayRef domainTrustSettings = NULL; CFArrayRef domainTrustSettings = NULL;
err = SecTrustSettingsCopyTrustSettings(cert, domains[k], &domainTrustSettings); err = SecTrustSettingsCopyTrustSettings(cert, domains[k], &domainTrustSettings);
if (err == errSecSuccess && domainTrustSettings != NULL) { if (err == errSecSuccess && domainTrustSettings != NULL) {
@ -147,9 +97,9 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
// "this certificate must be verified to a known trusted certificate"; aka not a root. // "this certificate must be verified to a known trusted certificate"; aka not a root.
continue; continue;
} }
for (CFIndex k = 0; k < CFArrayGetCount(trustSettings); k++) { for (m = 0; m < CFArrayGetCount(trustSettings); m++) {
CFNumberRef cfNum; CFNumberRef cfNum;
CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, k); CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m);
if (CFDictionaryGetValueIfPresent(tSetting, policy, (const void**)&cfNum)){ if (CFDictionaryGetValueIfPresent(tSetting, policy, (const void**)&cfNum)){
SInt32 result = 0; SInt32 result = 0;
CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result); CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
@ -187,10 +137,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
} }
} }
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport. err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) { if (err != noErr) {
continue; continue;
} }

View File

@ -181,12 +181,12 @@ func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
} }
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
if debugExecDarwinRoots { if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject.CommonName, bytes.TrimSpace(stderr.Bytes()))) println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject, bytes.TrimSpace(stderr.Bytes())))
} }
return false return false
} }
if debugExecDarwinRoots { if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject.CommonName)) println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject))
} }
return true return true
} }

View File

@ -0,0 +1,10 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js,wasm
package x509
// Possible certificate files; stop after finding one.
var certFiles = []string{}

View File

@ -33,5 +33,8 @@ func loadSystemRoots() (*CertPool, error) {
bestErr = err bestErr = err
} }
} }
if bestErr == nil {
return roots, nil
}
return nil, bestErr return nil, bestErr
} }

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build dragonfly freebsd linux nacl netbsd openbsd solaris // +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
package x509 package x509
@ -80,7 +80,7 @@ func loadSystemRoots() (*CertPool, error) {
} }
} }
if len(roots.certs) > 0 { if len(roots.certs) > 0 || firstErr == nil {
return roots, nil return roots, nil
} }

View File

@ -109,7 +109,7 @@ func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContex
sslPara.Size = uint32(unsafe.Sizeof(*sslPara)) sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
para := &syscall.CertChainPolicyPara{ para := &syscall.CertChainPolicyPara{
ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)), ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)),
} }
para.Size = uint32(unsafe.Sizeof(*para)) para.Size = uint32(unsafe.Sizeof(*para))

View File

@ -0,0 +1,242 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
)
// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
// where the BitLength field holds the prefix length.
type IPAddressPrefix asn1.BitString
// IPAddressRange describes an (inclusive) IP address range.
type IPAddressRange struct {
Min IPAddressPrefix
Max IPAddressPrefix
}
// Most relevant values for AFI from:
// http://www.iana.org/assignments/address-family-numbers.
const (
IPv4AddressFamilyIndicator = uint16(1)
IPv6AddressFamilyIndicator = uint16(2)
)
// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
type IPAddressFamilyBlocks struct {
// AFI holds an address family indicator from
// http://www.iana.org/assignments/address-family-numbers.
AFI uint16
// SAFI holds a subsequent address family indicator from
// http://www.iana.org/assignments/safi-namespace.
SAFI byte
// InheritFromIssuer indicates that the set of addresses should
// be taken from the issuer's certificate.
InheritFromIssuer bool
// AddressPrefixes holds prefixes if InheritFromIssuer is false.
AddressPrefixes []IPAddressPrefix
// AddressRanges holds ranges if InheritFromIssuer is false.
AddressRanges []IPAddressRange
}
// Internal types for asn1 unmarshalling.
type ipAddressFamily struct {
AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
Choice asn1.RawValue
}
// Internally, use raw asn1.BitString rather than the IPAddressPrefix
// type alias (so that asn1.Unmarshal() decodes properly).
type ipAddressRange struct {
Min asn1.BitString
Max asn1.BitString
}
func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
// RFC 3779 2.2.3
// IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
//
// IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
// addressFamily OCTET STRING (SIZE (2..3)),
// ipAddressChoice IPAddressChoice }
//
// IPAddressChoice ::= CHOICE {
// inherit NULL, -- inherit from issuer --
// addressesOrRanges SEQUENCE OF IPAddressOrRange }
//
// IPAddressOrRange ::= CHOICE {
// addressPrefix IPAddress,
// addressRange IPAddressRange }
//
// IPAddressRange ::= SEQUENCE {
// min IPAddress,
// max IPAddress }
//
// IPAddress ::= BIT STRING
var addrBlocks []ipAddressFamily
if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
return nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
return nil
}
var results []*IPAddressFamilyBlocks
for i, block := range addrBlocks {
var fam IPAddressFamilyBlocks
if l := len(block.AddressFamily); l < 2 || l > 3 {
nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
continue
}
fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
if len(block.AddressFamily) > 2 {
fam.SAFI = block.AddressFamily[2]
}
// IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
fam.InheritFromIssuer = true
results = append(results, &fam)
continue
}
var addrRanges []asn1.RawValue
if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
continue
}
for j, ar := range addrRanges {
// Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
// tags -- here, either BIT STRING or SEQUENCE.
switch ar.Tag {
case asn1.TagBitString:
// BIT STRING for single prefix IPAddress
var val asn1.BitString
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
continue
}
fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
case asn1.TagSequence:
var val ipAddressRange
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
continue
}
fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
default:
nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
}
}
results = append(results, &fam)
}
return results
}
// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
// domain identifiers).
type ASIDRange struct {
Min int
Max int
}
// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
// domain identifiers).
type ASIdentifiers struct {
// InheritFromIssuer indicates that the set of AS identifiers should
// be taken from the issuer's certificate.
InheritFromIssuer bool
// ASIDs holds AS identifiers if InheritFromIssuer is false.
ASIDs []int
// ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
ASIDRanges []ASIDRange
}
type asIdentifiers struct {
ASNum asn1.RawValue `asn1:"optional,tag:0"`
RDI asn1.RawValue `asn1:"optional,tag:1"`
}
func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
// RFC 3779 2.3.2
// ASIdentifierChoice ::= CHOICE {
// inherit NULL, -- inherit from issuer --
// asIdsOrRanges SEQUENCE OF ASIdOrRange }
// ASIdOrRange ::= CHOICE {
// id ASId,
// range ASRange }
// ASRange ::= SEQUENCE {
// min ASId,
// max ASId }
// ASId ::= INTEGER
if len(val.FullBytes) == 0 { // OPTIONAL
return nil
}
// ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
if bytes.Equal(val.Bytes, asn1.NullBytes) {
return &ASIdentifiers{InheritFromIssuer: true}
}
var ids []asn1.RawValue
if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
return nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
return nil
}
var asID ASIdentifiers
for i, id := range ids {
// Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
// tags -- here, either INTEGER or SEQUENCE.
switch id.Tag {
case asn1.TagInteger:
var val int
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
continue
}
asID.ASIDs = append(asID.ASIDs, val)
case asn1.TagSequence:
var val ASIDRange
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
continue
}
asID.ASIDRanges = append(asID.ASIDRanges, val)
default:
nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
}
}
return &asID
}
func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
// RFC 3779 2.3.2
// ASIdentifiers ::= SEQUENCE {
// asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
// rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
var asIDs asIdentifiers
if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
return nil, nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
return nil, nil
}
return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
}

View File

@ -72,11 +72,12 @@ func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *e
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
} }
var nfe NonFatalErrors
var curve elliptic.Curve var curve elliptic.Curve
if namedCurveOID != nil { if namedCurveOID != nil {
curve = namedCurveFromOID(*namedCurveOID) curve = namedCurveFromOID(*namedCurveOID, &nfe)
} else { } else {
curve = namedCurveFromOID(privKey.NamedCurveOID) curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
} }
if curve == nil { if curve == nil {
return nil, errors.New("x509: unknown elliptic curve") return nil, errors.New("x509: unknown elliptic curve")

View File

@ -10,6 +10,7 @@ import (
"fmt" "fmt"
"net" "net"
"net/url" "net/url"
"os"
"reflect" "reflect"
"runtime" "runtime"
"strings" "strings"
@ -17,6 +18,9 @@ import (
"unicode/utf8" "unicode/utf8"
) )
// ignoreCN disables interpreting Common Name as a hostname. See issue 24151.
var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1")
type InvalidReason int type InvalidReason int
const ( const (
@ -41,21 +45,25 @@ const (
NameMismatch NameMismatch
// NameConstraintsWithoutSANs results when a leaf certificate doesn't // NameConstraintsWithoutSANs results when a leaf certificate doesn't
// contain a Subject Alternative Name extension, but a CA certificate // contain a Subject Alternative Name extension, but a CA certificate
// contains name constraints. // contains name constraints, and the Common Name can be interpreted as
// a hostname.
//
// You can avoid this error by setting the experimental GODEBUG environment
// variable to "x509ignoreCN=1", disabling Common Name matching entirely.
// This behavior might become the default in the future.
NameConstraintsWithoutSANs NameConstraintsWithoutSANs
// UnconstrainedName results when a CA certificate contains permitted // UnconstrainedName results when a CA certificate contains permitted
// name constraints, but leaf certificate contains a name of an // name constraints, but leaf certificate contains a name of an
// unsupported or unconstrained type. // unsupported or unconstrained type.
UnconstrainedName UnconstrainedName
// TooManyConstraints results when the number of comparision operations // TooManyConstraints results when the number of comparison operations
// needed to check a certificate exceeds the limit set by // needed to check a certificate exceeds the limit set by
// VerifyOptions.MaxConstraintComparisions. This limit exists to // VerifyOptions.MaxConstraintComparisions. This limit exists to
// prevent pathological certificates can consuming excessive amounts of // prevent pathological certificates can consuming excessive amounts of
// CPU time to verify. // CPU time to verify.
TooManyConstraints TooManyConstraints
// CANotAuthorizedForExtKeyUsage results when an intermediate or root // CANotAuthorizedForExtKeyUsage results when an intermediate or root
// certificate does not permit an extended key usage that is claimed by // certificate does not permit a requested extended key usage.
// the leaf certificate.
CANotAuthorizedForExtKeyUsage CANotAuthorizedForExtKeyUsage
) )
@ -80,7 +88,7 @@ func (e CertificateInvalidError) Error() string {
case TooManyIntermediates: case TooManyIntermediates:
return "x509: too many intermediates for path length constraint" return "x509: too many intermediates for path length constraint"
case IncompatibleUsage: case IncompatibleUsage:
return "x509: certificate specifies an incompatible key usage: " + e.Detail return "x509: certificate specifies an incompatible key usage"
case NameMismatch: case NameMismatch:
return "x509: issuer name does not match subject from issuing certificate" return "x509: issuer name does not match subject from issuing certificate"
case NameConstraintsWithoutSANs: case NameConstraintsWithoutSANs:
@ -101,6 +109,12 @@ type HostnameError struct {
func (h HostnameError) Error() string { func (h HostnameError) Error() string {
c := h.Certificate c := h.Certificate
if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) &&
matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) {
// This would have validated, if it weren't for the validHostname check on Common Name.
return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName
}
var valid string var valid string
if ip := net.ParseIP(h.Host); ip != nil { if ip := net.ParseIP(h.Host); ip != nil {
// Trying to validate an IP // Trying to validate an IP
@ -114,10 +128,10 @@ func (h HostnameError) Error() string {
valid += san.String() valid += san.String()
} }
} else { } else {
if c.hasSANExtension() { if c.commonNameAsHostname() {
valid = strings.Join(c.DNSNames, ", ")
} else {
valid = c.Subject.CommonName valid = c.Subject.CommonName
} else {
valid = strings.Join(c.DNSNames, ", ")
} }
} }
@ -174,19 +188,28 @@ var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificat
// VerifyOptions contains parameters for Certificate.Verify. It's a structure // VerifyOptions contains parameters for Certificate.Verify. It's a structure
// because other PKIX verification APIs have ended up needing many options. // because other PKIX verification APIs have ended up needing many options.
type VerifyOptions struct { type VerifyOptions struct {
DNSName string DNSName string
Intermediates *CertPool Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used CurrentTime time.Time // if zero, the current time is used
DisableTimeChecks bool // Options to disable various verification checks.
// KeyUsage specifies which Extended Key Usage values are acceptable. DisableTimeChecks bool
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a DisableCriticalExtensionChecks bool
// constraint down the chain which mirrors Windows CryptoAPI behavior, DisableNameChecks bool
// but not the spec. To accept any key usage, include ExtKeyUsageAny. DisableEKUChecks bool
DisablePathLenChecks bool
DisableNameConstraintChecks bool
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// certificate is accepted if it contains any of the listed values. An empty
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
// Certificate chains are required to nest these extended key usage values.
// (This matches the Windows CryptoAPI behavior, but not the spec.)
KeyUsages []ExtKeyUsage KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to // MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If // perform when checking a given certificate's name constraints. If
// zero, a sensible default is used. This limit prevents pathalogical // zero, a sensible default is used. This limit prevents pathological
// certificates from consuming excessive amounts of CPU time when // certificates from consuming excessive amounts of CPU time when
// validating. // validating.
MaxConstraintComparisions int MaxConstraintComparisions int
@ -544,49 +567,14 @@ func (c *Certificate) checkNameConstraints(count *int,
return nil return nil
} }
// ekuPermittedBy returns true iff the given extended key usage is permitted by
// the given EKU from a certificate. Normally, this would be a simple
// comparison plus a special case for the “any” EKU. But, in order to support
// existing certificates, some exceptions are made.
func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
if certEKU == ExtKeyUsageAny || eku == certEKU {
return true
}
// Some exceptions are made to support existing certificates. Firstly,
// the ServerAuth and SGC EKUs are treated as a group.
mapServerAuthEKUs := func(eku ExtKeyUsage) ExtKeyUsage {
if eku == ExtKeyUsageNetscapeServerGatedCrypto || eku == ExtKeyUsageMicrosoftServerGatedCrypto {
return ExtKeyUsageServerAuth
}
return eku
}
eku = mapServerAuthEKUs(eku)
certEKU = mapServerAuthEKUs(certEKU)
if eku == certEKU ||
// ServerAuth in a CA permits ClientAuth in the leaf.
(eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
// Any CA may issue an OCSP responder certificate.
eku == ExtKeyUsageOCSPSigning ||
// Code-signing CAs can use Microsoft's commercial and
// kernel-mode EKUs.
((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) {
return true
}
return false
}
// isValid performs validity checks on c given that it is a candidate to append // isValid performs validity checks on c given that it is a candidate to append
// to the chain in currentChain. // to the chain in currentChain.
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
if len(c.UnhandledCriticalExtensions) > 0 { if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 {
return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]} return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]}
} }
if len(currentChain) > 0 { if !opts.DisableNameChecks && len(currentChain) > 0 {
child := currentChain[len(currentChain)-1] child := currentChain[len(currentChain)-1]
if !bytes.Equal(child.RawIssuer, c.RawSubject) { if !bytes.Equal(child.RawIssuer, c.RawSubject) {
return CertificateInvalidError{c, NameMismatch, ""} return CertificateInvalidError{c, NameMismatch, ""}
@ -617,24 +605,22 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
leaf = currentChain[0] leaf = currentChain[0]
} }
if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() { checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints()
sanExtension, ok := leaf.getSANExtension() if checkNameConstraints && leaf.commonNameAsHostname() {
if !ok { // This is the deprecated, legacy case of depending on the commonName as
// This is the deprecated, legacy case of depending on // a hostname. We don't enforce name constraints against the CN, but
// the CN as a hostname. Chains modern enough to be // VerifyHostname will look for hostnames in there if there are no SANs.
// using name constraints should not be depending on // In order to ensure VerifyHostname will not accept an unchecked name,
// CNs. // return an error here.
return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""} return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
} } else if checkNameConstraints && leaf.hasSANExtension() {
err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
err := forEachSAN(sanExtension, func(tag int, data []byte) error {
switch tag { switch tag {
case nameTypeEmail: case nameTypeEmail:
name := string(data) name := string(data)
mailbox, ok := parseRFC2821Mailbox(name) mailbox, ok := parseRFC2821Mailbox(name)
if !ok { if !ok {
// This certificate should not have parsed. return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
return errors.New("x509: internal error: rfc822Name SAN failed to parse")
} }
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
@ -646,6 +632,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
case nameTypeDNS: case nameTypeDNS:
name := string(data) name := string(data)
if _, ok := domainToReverseLabels(name); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", name)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
func(parsedName, constraint interface{}) (bool, error) { func(parsedName, constraint interface{}) (bool, error) {
return matchDomainConstraint(parsedName.(string), constraint.(string)) return matchDomainConstraint(parsedName.(string), constraint.(string))
@ -692,59 +682,6 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
} }
} }
checkEKUs := certType == intermediateCertificate
// If no extended key usages are specified, then all are acceptable.
if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
checkEKUs = false
}
// If the “any” key usage is permitted, then no more checks are needed.
if checkEKUs {
for _, caEKU := range c.ExtKeyUsage {
comparisonCount++
if caEKU == ExtKeyUsageAny {
checkEKUs = false
break
}
}
}
if checkEKUs {
NextEKU:
for _, eku := range leaf.ExtKeyUsage {
if comparisonCount > maxConstraintComparisons {
return CertificateInvalidError{c, TooManyConstraints, ""}
}
for _, caEKU := range c.ExtKeyUsage {
comparisonCount++
if ekuPermittedBy(eku, caEKU) {
continue NextEKU
}
}
oid, _ := oidFromExtKeyUsage(eku)
return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", oid)}
}
NextUnknownEKU:
for _, eku := range leaf.UnknownExtKeyUsage {
if comparisonCount > maxConstraintComparisons {
return CertificateInvalidError{c, TooManyConstraints, ""}
}
for _, caEKU := range c.UnknownExtKeyUsage {
comparisonCount++
if caEKU.Equal(eku) {
continue NextUnknownEKU
}
}
return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", eku)}
}
}
// KeyUsage status flags are ignored. From Engineering Security, Peter // KeyUsage status flags are ignored. From Engineering Security, Peter
// Gutmann: A European government CA marked its signing certificates as // Gutmann: A European government CA marked its signing certificates as
// being valid for encryption only, but no-one noticed. Another // being valid for encryption only, but no-one noticed. Another
@ -766,7 +703,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
return CertificateInvalidError{c, NotAuthorizedToSign, ""} return CertificateInvalidError{c, NotAuthorizedToSign, ""}
} }
if c.BasicConstraintsValid && c.MaxPathLen >= 0 { if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 {
numIntermediates := len(currentChain) - 1 numIntermediates := len(currentChain) - 1
if numIntermediates > c.MaxPathLen { if numIntermediates > c.MaxPathLen {
return CertificateInvalidError{c, TooManyIntermediates, ""} return CertificateInvalidError{c, TooManyIntermediates, ""}
@ -833,36 +770,6 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
} }
} }
requestedKeyUsages := make([]ExtKeyUsage, len(opts.KeyUsages))
copy(requestedKeyUsages, opts.KeyUsages)
if len(requestedKeyUsages) == 0 {
requestedKeyUsages = append(requestedKeyUsages, ExtKeyUsageServerAuth)
}
// If no key usages are specified, then any are acceptable.
checkEKU := len(c.ExtKeyUsage) > 0
for _, eku := range requestedKeyUsages {
if eku == ExtKeyUsageAny {
checkEKU = false
break
}
}
if checkEKU {
NextUsage:
for _, eku := range requestedKeyUsages {
for _, leafEKU := range c.ExtKeyUsage {
if ekuPermittedBy(eku, leafEKU) {
continue NextUsage
}
}
oid, _ := oidFromExtKeyUsage(eku)
return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)}
}
}
var candidateChains [][]*Certificate var candidateChains [][]*Certificate
if opts.Roots.contains(c) { if opts.Roots.contains(c) {
candidateChains = append(candidateChains, []*Certificate{c}) candidateChains = append(candidateChains, []*Certificate{c})
@ -872,7 +779,29 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
} }
} }
return candidateChains, nil keyUsages := opts.KeyUsages
if len(keyUsages) == 0 {
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
}
// If any key usage is acceptable then we're done.
for _, usage := range keyUsages {
if usage == ExtKeyUsageAny {
return candidateChains, nil
}
}
for _, candidate := range candidateChains {
if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) {
chains = append(chains, candidate)
}
}
if len(chains) == 0 {
return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
}
return chains, nil
} }
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
@ -940,6 +869,64 @@ nextIntermediate:
return return
} }
// validHostname returns whether host is a valid hostname that can be matched or
// matched against according to RFC 6125 2.2, with some leniency to accomodate
// legacy values.
func validHostname(host string) bool {
host = strings.TrimSuffix(host, ".")
if len(host) == 0 {
return false
}
for i, part := range strings.Split(host, ".") {
if part == "" {
// Empty label.
return false
}
if i == 0 && part == "*" {
// Only allow full left-most wildcards, as those are the only ones
// we match, and matching literal '*' characters is probably never
// the expected behavior.
continue
}
for j, c := range part {
if 'a' <= c && c <= 'z' {
continue
}
if '0' <= c && c <= '9' {
continue
}
if 'A' <= c && c <= 'Z' {
continue
}
if c == '-' && j != 0 {
continue
}
if c == '_' || c == ':' {
// Not valid characters in hostnames, but commonly
// found in deployments outside the WebPKI.
continue
}
return false
}
}
return true
}
// commonNameAsHostname reports whether the Common Name field should be
// considered the hostname that the certificate is valid for. This is a legacy
// behavior, disabled if the Subject Alt Name extension is present.
//
// It applies the strict validHostname check to the Common Name field, so that
// certificates without SANs can still be validated against CAs with name
// constraints if there is no risk the CN would be matched as a hostname.
// See NameConstraintsWithoutSANs and issue 24151.
func (c *Certificate) commonNameAsHostname() bool {
return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName)
}
func matchHostnames(pattern, host string) bool { func matchHostnames(pattern, host string) bool {
host = strings.TrimSuffix(host, ".") host = strings.TrimSuffix(host, ".")
pattern = strings.TrimSuffix(pattern, ".") pattern = strings.TrimSuffix(pattern, ".")
@ -1020,16 +1007,79 @@ func (c *Certificate) VerifyHostname(h string) error {
lowered := toLowerCaseASCII(h) lowered := toLowerCaseASCII(h)
if c.hasSANExtension() { if c.commonNameAsHostname() {
if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
return nil
}
} else {
for _, match := range c.DNSNames { for _, match := range c.DNSNames {
if matchHostnames(toLowerCaseASCII(match), lowered) { if matchHostnames(toLowerCaseASCII(match), lowered) {
return nil return nil
} }
} }
// If Subject Alt Name is given, we ignore the common name.
} else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
return nil
} }
return HostnameError{c, h} return HostnameError{c, h}
} }
func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
usages := make([]ExtKeyUsage, len(keyUsages))
copy(usages, keyUsages)
if len(chain) == 0 {
return false
}
usagesRemaining := len(usages)
// We walk down the list and cross out any usages that aren't supported
// by each certificate. If we cross out all the usages, then the chain
// is unacceptable.
NextCert:
for i := len(chain) - 1; i >= 0; i-- {
cert := chain[i]
if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
// The certificate doesn't have any extended key usage specified.
continue
}
for _, usage := range cert.ExtKeyUsage {
if usage == ExtKeyUsageAny {
// The certificate is explicitly good for any usage.
continue NextCert
}
}
const invalidUsage ExtKeyUsage = -1
NextRequestedUsage:
for i, requestedUsage := range usages {
if requestedUsage == invalidUsage {
continue
}
for _, usage := range cert.ExtKeyUsage {
if requestedUsage == usage {
continue NextRequestedUsage
} else if requestedUsage == ExtKeyUsageServerAuth &&
(usage == ExtKeyUsageNetscapeServerGatedCrypto ||
usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
// In order to support COMODO
// certificate chains, we have to
// accept Netscape or Microsoft SGC
// usages as equal to ServerAuth.
continue NextRequestedUsage
}
}
usages[i] = invalidUsage
usagesRemaining--
if usagesRemaining == 0 {
return false
}
}
}
return true
}

View File

@ -8,9 +8,39 @@
// can be used to override the system default locations for the SSL certificate // can be used to override the system default locations for the SSL certificate
// file and SSL certificate files directory, respectively. // file and SSL certificate files directory, respectively.
// //
// This is a fork of the go library crypto/x509 package, it's more relaxed // This is a fork of the Go library crypto/x509 package, primarily adapted for
// about certificates that it'll accept, and exports the TBSCertificate // use with Certificate Transparency. Main areas of difference are:
// structure. //
// - Life as a fork:
// - Rename OS-specific cgo code so it doesn't clash with main Go library.
// - Use local library imports (asn1, pkix) throughout.
// - Add version-specific wrappers for Go version-incompatible code (in
// nilref_*_darwin.go, ptr_*_windows.go).
// - Laxer certificate parsing:
// - Add options to disable various validation checks (times, EKUs etc).
// - Use NonFatalErrors type for some errors and continue parsing; this
// can be checked with IsFatal(err).
// - Support for short bitlength ECDSA curves (in curves.go).
// - Certificate Transparency specific function:
// - Parsing and marshaling of SCTList extension.
// - RemoveSCTList() function for rebuilding CT leaf entry.
// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(),
// ParseTBSCertificate(), IsPrecertificate()).
// - Revocation list processing:
// - Detailed CRL parsing (in revoked.go)
// - Detailed error recording mechanism (in error.go, errors.go)
// - Factor out parseDistributionPoints() for reuse.
// - Factor out and generalize GeneralNames parsing (in names.go)
// - Fix CRL commenting.
// - RPKI support:
// - Support for SubjectInfoAccess extension
// - Support for RFC3779 extensions (in rpki.go)
// - General improvements:
// - Export and use OID values throughout.
// - Export OIDFromNamedCurve().
// - Export SignatureAlgorithmFromAI().
// - Add OID value to UnhandledCriticalExtension error.
// - Minor typo/lint fixes.
package x509 package x509
import ( import (
@ -69,7 +99,16 @@ func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
if algo == UnknownPublicKeyAlgorithm { if algo == UnknownPublicKeyAlgorithm {
return nil, errors.New("x509: unknown public key algorithm") return nil, errors.New("x509: unknown public key algorithm")
} }
return parsePublicKey(algo, &pki) var nfe NonFatalErrors
pub, err = parsePublicKey(algo, &pki, &nfe)
if err != nil {
return pub, err
}
// Treat non-fatal errors as fatal for this entrypoint.
if len(nfe.Errors) > 0 {
return nil, nfe.Errors[0]
}
return pub, nil
} }
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
@ -432,10 +471,10 @@ func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
// https://tools.ietf.org/html/rfc3447#section-8.1), that the // https://tools.ietf.org/html/rfc3447#section-8.1), that the
// salt length matches the hash length, and that the trailer // salt length matches the hash length, and that the trailer
// field has the default value. // field has the default value.
if !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes) || if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
!params.MGF.Algorithm.Equal(oidMGF1) || !params.MGF.Algorithm.Equal(oidMGF1) ||
!mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
!bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes) || (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
params.TrailerField != 1 { params.TrailerField != 1 {
return UnknownSignatureAlgorithm return UnknownSignatureAlgorithm
} }
@ -500,15 +539,21 @@ func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm
// secp521r1 OBJECT IDENTIFIER ::= { // secp521r1 OBJECT IDENTIFIER ::= {
// iso(1) identified-organization(3) certicom(132) curve(0) 35 } // iso(1) identified-organization(3) certicom(132) curve(0) 35 }
// //
// NB: secp256r1 is equivalent to prime256v1 // secp192r1 OBJECT IDENTIFIER ::= {
// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
// prime(1) 1 }
//
// NB: secp256r1 is equivalent to prime256v1,
// secp192r1 is equivalent to ansix9p192r and prime192v1
var ( var (
OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}
) )
func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve {
switch { switch {
case oid.Equal(OIDNamedCurveP224): case oid.Equal(OIDNamedCurveP224):
return elliptic.P224() return elliptic.P224()
@ -518,6 +563,9 @@ func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
return elliptic.P384() return elliptic.P384()
case oid.Equal(OIDNamedCurveP521): case oid.Equal(OIDNamedCurveP521):
return elliptic.P521() return elliptic.P521()
case oid.Equal(OIDNamedCurveP192):
nfe.AddError(errors.New("insecure curve (secp192r1) specified"))
return secp192r1()
} }
return nil return nil
} }
@ -534,6 +582,8 @@ func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
return OIDNamedCurveP384, true return OIDNamedCurveP384, true
case elliptic.P521(): case elliptic.P521():
return OIDNamedCurveP521, true return OIDNamedCurveP521, true
case secp192r1():
return OIDNamedCurveP192, true
} }
return nil, false return nil, false
@ -737,7 +787,13 @@ type Certificate struct {
OCSPServer []string OCSPServer []string
IssuingCertificateURL []string IssuingCertificateURL []string
// Subject Alternate Name values // Subject Information Access
SubjectTimestamps []string
SubjectCARepositories []string
// Subject Alternate Name values. (Note that these values may not be valid
// if invalid values were contained within a parsed certificate. For
// example, an element of DNSNames may not be a valid DNS domain name.)
DNSNames []string DNSNames []string
EmailAddresses []string EmailAddresses []string
IPAddresses []net.IP IPAddresses []net.IP
@ -759,6 +815,9 @@ type Certificate struct {
PolicyIdentifiers []asn1.ObjectIdentifier PolicyIdentifiers []asn1.ObjectIdentifier
RPKIAddressRanges []*IPAddressFamilyBlocks
RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers
// Certificate Transparency SCT extension contents; this is a TLS-encoded // Certificate Transparency SCT extension contents; this is a TLS-encoded
// SignedCertificateTimestampList (RFC 6962 s3.3). // SignedCertificateTimestampList (RFC 6962 s3.3).
RawSCT []byte RawSCT []byte
@ -792,6 +851,20 @@ func (c *Certificate) Equal(other *Certificate) bool {
return bytes.Equal(c.Raw, other.Raw) return bytes.Equal(c.Raw, other.Raw)
} }
// IsPrecertificate checks whether the certificate is a precertificate, by
// checking for the presence of the CT Poison extension.
func (c *Certificate) IsPrecertificate() bool {
if c == nil {
return false
}
for _, ext := range c.Extensions {
if ext.Id.Equal(OIDExtensionCTPoison) {
return true
}
}
return false
}
func (c *Certificate) hasSANExtension() bool { func (c *Certificate) hasSANExtension() bool {
return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions) return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions)
} }
@ -880,23 +953,17 @@ func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature
} }
func (c *Certificate) hasNameConstraints() bool { func (c *Certificate) hasNameConstraints() bool {
for _, e := range c.Extensions { return oidInExtensions(OIDExtensionNameConstraints, c.Extensions)
if len(e.Id) == 4 && e.Id[0] == OIDExtensionNameConstraints[0] && e.Id[1] == OIDExtensionNameConstraints[1] && e.Id[2] == OIDExtensionNameConstraints[2] && e.Id[3] == OIDExtensionNameConstraints[3] {
return true
}
}
return false
} }
func (c *Certificate) getSANExtension() ([]byte, bool) { func (c *Certificate) getSANExtension() []byte {
for _, e := range c.Extensions { for _, e := range c.Extensions {
if len(e.Id) == 4 && e.Id[0] == OIDExtensionSubjectAltName[0] && e.Id[1] == OIDExtensionSubjectAltName[1] && e.Id[2] == OIDExtensionSubjectAltName[2] && e.Id[3] == OIDExtensionSubjectAltName[3] { if e.Id.Equal(OIDExtensionSubjectAltName) {
return e.Value, true return e.Value
} }
} }
return nil, false return nil
} }
func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error { func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error {
@ -995,6 +1062,50 @@ func (h UnhandledCriticalExtension) Error() string {
return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID) return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
} }
// removeExtension takes a DER-encoded TBSCertificate, removes the extension
// specified by oid (preserving the order of other extensions), and returns the
// result still as a DER-encoded TBSCertificate. This function will fail if
// there is not exactly 1 extension of the type specified by the oid present.
func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) {
var tbs tbsCertificate
rest, err := asn1.Unmarshal(tbsData, &tbs)
if err != nil {
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
} else if rLen := len(rest); rLen > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
}
extAt := -1
for i, ext := range tbs.Extensions {
if ext.Id.Equal(oid) {
if extAt != -1 {
return nil, errors.New("multiple extensions of specified type present")
}
extAt = i
}
}
if extAt == -1 {
return nil, errors.New("no extension of specified type present")
}
tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...)
// Clear out the asn1.RawContent so the re-marshal operation sees the
// updated structure (rather than just copying the out-of-date DER data).
tbs.Raw = nil
data, err := asn1.Marshal(tbs)
if err != nil {
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
}
return data, nil
}
// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT
// extension that contains the SCT list (preserving the order of other
// extensions), and returns the result still as a DER-encoded TBSCertificate.
// This function will fail if there is not exactly 1 CT SCT extension present.
func RemoveSCTList(tbsData []byte) ([]byte, error) {
return removeExtension(tbsData, OIDExtensionCTSCT)
}
// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison // RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison
// extension (preserving the order of other extensions), and returns the result // extension (preserving the order of other extensions), and returns the result
// still as a DER-encoded TBSCertificate. This function will fail if there is // still as a DER-encoded TBSCertificate. This function will fail if there is
@ -1019,27 +1130,18 @@ func RemoveCTPoison(tbsData []byte) ([]byte, error) {
// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the // - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
// intermediate. // intermediate.
func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) { func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
data, err := removeExtension(tbsData, OIDExtensionCTPoison)
if err != nil {
return nil, err
}
var tbs tbsCertificate var tbs tbsCertificate
rest, err := asn1.Unmarshal(tbsData, &tbs) rest, err := asn1.Unmarshal(data, &tbs)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
} else if rLen := len(rest); rLen > 0 { } else if rLen := len(rest); rLen > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
} }
poisonAt := -1
for i, ext := range tbs.Extensions {
if ext.Id.Equal(OIDExtensionCTPoison) {
if poisonAt != -1 {
return nil, errors.New("multiple CT poison extensions present")
}
poisonAt = i
}
}
if poisonAt == -1 {
return nil, errors.New("no CT poison extension present")
}
tbs.Extensions = append(tbs.Extensions[:poisonAt], tbs.Extensions[poisonAt+1:]...)
tbs.Raw = nil
if preIssuer != nil { if preIssuer != nil {
// Update the precert's Issuer field. Use the RawIssuer rather than the // Update the precert's Issuer field. Use the RawIssuer rather than the
@ -1092,9 +1194,13 @@ func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
} }
tbs.Extensions = append(tbs.Extensions, authKeyIDExt) tbs.Extensions = append(tbs.Extensions, authKeyIDExt)
} }
// Clear out the asn1.RawContent so the re-marshal operation sees the
// updated structure (rather than just copying the out-of-date DER data).
tbs.Raw = nil
} }
data, err := asn1.Marshal(tbs) data, err = asn1.Marshal(tbs)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
} }
@ -1120,7 +1226,7 @@ const (
) )
// RFC 5280, 4.2.2.1 // RFC 5280, 4.2.2.1
type authorityInfoAccess struct { type accessDescription struct {
Method asn1.ObjectIdentifier Method asn1.ObjectIdentifier
Location asn1.RawValue Location asn1.RawValue
} }
@ -1137,27 +1243,32 @@ type distributionPointName struct {
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
} }
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) { func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) {
asn1Data := keyData.PublicKey.RightAlign() asn1Data := keyData.PublicKey.RightAlign()
switch algo { switch algo {
case RSA: case RSA:
// RSA public keys must have a NULL in the parameters // RSA public keys must have a NULL in the parameters
// (https://tools.ietf.org/html/rfc3279#section-2.3.1). // (https://tools.ietf.org/html/rfc3279#section-2.3.1).
if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
return nil, errors.New("x509: RSA key missing NULL parameters") nfe.AddError(errors.New("x509: RSA key missing NULL parameters"))
} }
p := new(pkcs1PublicKey) p := new(pkcs1PublicKey)
rest, err := asn1.Unmarshal(asn1Data, p) rest, err := asn1.Unmarshal(asn1Data, p)
if err != nil { if err != nil {
return nil, err var laxErr error
rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax")
if laxErr != nil {
return nil, laxErr
}
nfe.AddError(err)
} }
if len(rest) != 0 { if len(rest) != 0 {
return nil, errors.New("x509: trailing data after RSA public key") return nil, errors.New("x509: trailing data after RSA public key")
} }
if p.N.Sign() <= 0 { if p.N.Sign() <= 0 {
return nil, errors.New("x509: RSA modulus is not a positive number") nfe.AddError(errors.New("x509: RSA modulus is not a positive number"))
} }
if p.E <= 0 { if p.E <= 0 {
return nil, errors.New("x509: RSA public exponent is not a positive number") return nil, errors.New("x509: RSA public exponent is not a positive number")
@ -1172,7 +1283,12 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
var p *big.Int var p *big.Int
rest, err := asn1.Unmarshal(asn1Data, &p) rest, err := asn1.Unmarshal(asn1Data, &p)
if err != nil { if err != nil {
return nil, err var laxErr error
rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax")
if laxErr != nil {
return nil, laxErr
}
nfe.AddError(err)
} }
if len(rest) != 0 { if len(rest) != 0 {
return nil, errors.New("x509: trailing data after DSA public key") return nil, errors.New("x509: trailing data after DSA public key")
@ -1203,14 +1319,14 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
namedCurveOID := new(asn1.ObjectIdentifier) namedCurveOID := new(asn1.ObjectIdentifier)
rest, err := asn1.Unmarshal(paramsData, namedCurveOID) rest, err := asn1.Unmarshal(paramsData, namedCurveOID)
if err != nil { if err != nil {
return nil, err return nil, errors.New("x509: failed to parse ECDSA parameters as named curve")
} }
if len(rest) != 0 { if len(rest) != 0 {
return nil, errors.New("x509: trailing data after ECDSA parameters") return nil, errors.New("x509: trailing data after ECDSA parameters")
} }
namedCurve := namedCurveFromOID(*namedCurveOID) namedCurve := namedCurveFromOID(*namedCurveOID, nfe)
if namedCurve == nil { if namedCurve == nil {
return nil, errors.New("x509: unsupported elliptic curve") return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID)
} }
x, y := elliptic.Unmarshal(namedCurve, asn1Data) x, y := elliptic.Unmarshal(namedCurve, asn1Data)
if x == nil { if x == nil {
@ -1235,7 +1351,7 @@ type NonFatalErrors struct {
Errors []error Errors []error
} }
// Adds an error to the list of errors contained by NonFatalErrors. // AddError adds an error to the list of errors contained by NonFatalErrors.
func (e *NonFatalErrors) AddError(err error) { func (e *NonFatalErrors) AddError(err error) {
e.Errors = append(e.Errors, err) e.Errors = append(e.Errors, err)
} }
@ -1250,11 +1366,25 @@ func (e NonFatalErrors) Error() string {
return r return r
} }
// Returns true if |e| contains at least one error // HasError returns true if |e| contains at least one error
func (e *NonFatalErrors) HasError() bool { func (e *NonFatalErrors) HasError() bool {
return len(e.Errors) > 0 return len(e.Errors) > 0
} }
// IsFatal indicates whether an error is fatal.
func IsFatal(err error) bool {
if err == nil {
return false
}
if _, ok := err.(NonFatalErrors); ok {
return false
}
if errs, ok := err.(*Errors); ok {
return errs.Fatal()
}
return true
}
func parseDistributionPoints(data []byte, crldp *[]string) error { func parseDistributionPoints(data []byte, crldp *[]string) error {
// CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
// //
@ -1337,17 +1467,9 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
err = forEachSAN(value, func(tag int, data []byte) error { err = forEachSAN(value, func(tag int, data []byte) error {
switch tag { switch tag {
case nameTypeEmail: case nameTypeEmail:
mailbox := string(data) emailAddresses = append(emailAddresses, string(data))
if _, ok := parseRFC2821Mailbox(mailbox); !ok {
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
emailAddresses = append(emailAddresses, mailbox)
case nameTypeDNS: case nameTypeDNS:
domain := string(data) dnsNames = append(dnsNames, string(data))
if _, ok := domainToReverseLabels(domain); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", string(data))
}
dnsNames = append(dnsNames, domain)
case nameTypeURI: case nameTypeURI:
uri, err := url.Parse(string(data)) uri, err := url.Parse(string(data))
if err != nil { if err != nil {
@ -1364,7 +1486,7 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
case net.IPv4len, net.IPv6len: case net.IPv4len, net.IPv6len:
ipAddresses = append(ipAddresses, data) ipAddresses = append(ipAddresses, data)
default: default:
nfe.AddError(fmt.Errorf("x509: certificate contained IP address of length %d : %v", len(data), data)) nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))))
} }
} }
@ -1399,7 +1521,7 @@ func isValidIPMask(mask []byte) bool {
return true return true
} }
func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandled bool, err error) { func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) {
// RFC 5280, 4.2.1.10 // RFC 5280, 4.2.1.10
// NameConstraints ::= SEQUENCE { // NameConstraints ::= SEQUENCE {
@ -1466,7 +1588,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
trimmedDomain = trimmedDomain[1:] trimmedDomain = trimmedDomain[1:]
} }
if _, ok := domainToReverseLabels(trimmedDomain); !ok { if _, ok := domainToReverseLabels(trimmedDomain); !ok {
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain))
} }
dnsNames = append(dnsNames, domain) dnsNames = append(dnsNames, domain)
@ -1503,7 +1625,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
// it specifies an exact mailbox name. // it specifies an exact mailbox name.
if strings.Contains(constraint, "@") { if strings.Contains(constraint, "@") {
if _, ok := parseRFC2821Mailbox(constraint); !ok { if _, ok := parseRFC2821Mailbox(constraint); !ok {
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
} }
} else { } else {
// Otherwise it's a domain name. // Otherwise it's a domain name.
@ -1512,7 +1634,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
domain = domain[1:] domain = domain[1:]
} }
if _, ok := domainToReverseLabels(domain); !ok { if _, ok := domainToReverseLabels(domain); !ok {
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
} }
} }
emails = append(emails, constraint) emails = append(emails, constraint)
@ -1536,7 +1658,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
trimmedDomain = trimmedDomain[1:] trimmedDomain = trimmedDomain[1:]
} }
if _, ok := domainToReverseLabels(trimmedDomain); !ok { if _, ok := domainToReverseLabels(trimmedDomain); !ok {
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain))
} }
uriDomains = append(uriDomains, domain) uriDomains = append(uriDomains, domain)
@ -1575,7 +1697,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
out.PublicKeyAlgorithm = out.PublicKeyAlgorithm =
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
var err error var err error
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey) out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1585,12 +1707,22 @@ func parseCertificate(in *certificate) (*Certificate, error) {
var issuer, subject pkix.RDNSequence var issuer, subject pkix.RDNSequence
if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
return nil, err var laxErr error
rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax")
if laxErr != nil {
return nil, laxErr
}
nfe.AddError(err)
} else if len(rest) != 0 { } else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 subject") return nil, errors.New("x509: trailing data after X.509 subject")
} }
if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
return nil, err var laxErr error
rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax")
if laxErr != nil {
return nil, laxErr
}
nfe.AddError(err)
} else if len(rest) != 0 { } else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 subject") return nil, errors.New("x509: trailing data after X.509 subject")
} }
@ -1651,7 +1783,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
} }
case OIDExtensionNameConstraints[3]: case OIDExtensionNameConstraints[3]:
unhandled, err = parseNameConstraintsExtension(out, e) unhandled, err = parseNameConstraintsExtension(out, e, &nfe)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1682,10 +1814,21 @@ func parseCertificate(in *certificate) (*Certificate, error) {
// KeyPurposeId ::= OBJECT IDENTIFIER // KeyPurposeId ::= OBJECT IDENTIFIER
var keyUsage []asn1.ObjectIdentifier var keyUsage []asn1.ObjectIdentifier
if rest, err := asn1.Unmarshal(e.Value, &keyUsage); err != nil { if len(e.Value) == 0 {
return nil, err nfe.AddError(errors.New("x509: empty ExtendedKeyUsage"))
} else if len(rest) != 0 { } else {
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage") rest, err := asn1.Unmarshal(e.Value, &keyUsage)
if err != nil {
var laxErr error
rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax")
if laxErr != nil {
return nil, laxErr
}
nfe.AddError(err)
}
if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
}
} }
for _, u := range keyUsage { for _, u := range keyUsage {
@ -1725,12 +1868,15 @@ func parseCertificate(in *certificate) (*Certificate, error) {
} }
} else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) { } else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
// RFC 5280 4.2.2.1: Authority Information Access // RFC 5280 4.2.2.1: Authority Information Access
var aia []authorityInfoAccess var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
return nil, err return nil, err
} else if len(rest) != 0 { } else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 authority information") return nil, errors.New("x509: trailing data after X.509 authority information")
} }
if len(aia) == 0 {
nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension"))
}
for _, v := range aia { for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String // GeneralName: uniformResourceIdentifier [6] IA5String
@ -1743,6 +1889,34 @@ func parseCertificate(in *certificate) (*Certificate, error) {
out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
} }
} }
} else if e.Id.Equal(OIDExtensionSubjectInfoAccess) {
// RFC 5280 4.2.2.2: Subject Information Access
var sia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil {
return nil, err
} else if len(rest) != 0 {
return nil, errors.New("x509: trailing data after X.509 subject information")
}
if len(sia) == 0 {
nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension"))
}
for _, v := range sia {
// TODO(drysdale): cope with non-URI types of GeneralName
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != 6 {
continue
}
if v.Method.Equal(OIDSubjectInfoAccessTimestamp) {
out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes))
} else if v.Method.Equal(OIDSubjectInfoAccessCARepo) {
out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes))
}
}
} else if e.Id.Equal(OIDExtensionIPPrefixList) {
out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe)
} else if e.Id.Equal(OIDExtensionASList) {
out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe)
} else if e.Id.Equal(OIDExtensionCTSCT) { } else if e.Id.Equal(OIDExtensionCTSCT) {
if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil { if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err)) nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
@ -1787,6 +1961,8 @@ func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
} }
// ParseCertificate parses a single certificate from the given ASN.1 DER data. // ParseCertificate parses a single certificate from the given ASN.1 DER data.
// This function can return both a Certificate and an error (in which case the
// error will be of type NonFatalErrors).
func ParseCertificate(asn1Data []byte) (*Certificate, error) { func ParseCertificate(asn1Data []byte) (*Certificate, error) {
var cert certificate var cert certificate
rest, err := asn1.Unmarshal(asn1Data, &cert) rest, err := asn1.Unmarshal(asn1Data, &cert)
@ -1802,6 +1978,8 @@ func ParseCertificate(asn1Data []byte) (*Certificate, error) {
// ParseCertificates parses one or more certificates from the given ASN.1 DER // ParseCertificates parses one or more certificates from the given ASN.1 DER
// data. The certificates must be concatenated with no intermediate padding. // data. The certificates must be concatenated with no intermediate padding.
// This function can return both a slice of Certificate and an error (in which
// case the error will be of type NonFatalErrors).
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
var v []*certificate var v []*certificate
@ -1815,15 +1993,23 @@ func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
v = append(v, cert) v = append(v, cert)
} }
var nfe NonFatalErrors
ret := make([]*Certificate, len(v)) ret := make([]*Certificate, len(v))
for i, ci := range v { for i, ci := range v {
cert, err := parseCertificate(ci) cert, err := parseCertificate(ci)
if err != nil { if err != nil {
return nil, err if errs, ok := err.(NonFatalErrors); !ok {
return nil, err
} else {
nfe.Errors = append(nfe.Errors, errs.Errors...)
}
} }
ret[i] = cert ret[i] = cert
} }
if nfe.HasError() {
return ret, nfe
}
return ret, nil return ret, nil
} }
@ -1875,15 +2061,23 @@ var (
OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11} OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
// OIDExtensionCTPoison is defined in RFC 6962 s3.1. // OIDExtensionCTPoison is defined in RFC 6962 s3.1.
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
// OIDExtensionCTSCT is defined in RFC 6962 s3.3. // OIDExtensionCTSCT is defined in RFC 6962 s3.3.
OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
// OIDExtensionIPPrefixList is defined in RFC 3779 s2.
OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7}
// OIDExtensionASList is defined in RFC 3779 s3.
OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8}
) )
var ( var (
OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3}
OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5}
OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0}
) )
// oidInExtensions returns whether an extension with the given oid exists in // oidInExtensions returns whether an extension with the given oid exists in
@ -1932,7 +2126,7 @@ func isIA5String(s string) error {
} }
func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) { func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
ret = make([]pkix.Extension, 11 /* maximum number of elements. */) ret = make([]pkix.Extension, 12 /* maximum number of elements. */)
n := 0 n := 0
if template.KeyUsage != 0 && if template.KeyUsage != 0 &&
@ -2017,15 +2211,15 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
!oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) { !oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
ret[n].Id = OIDExtensionAuthorityInfoAccess ret[n].Id = OIDExtensionAuthorityInfoAccess
var aiaValues []authorityInfoAccess var aiaValues []accessDescription
for _, name := range template.OCSPServer { for _, name := range template.OCSPServer {
aiaValues = append(aiaValues, authorityInfoAccess{ aiaValues = append(aiaValues, accessDescription{
Method: OIDAuthorityInfoAccessOCSP, Method: OIDAuthorityInfoAccessOCSP,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
}) })
} }
for _, name := range template.IssuingCertificateURL { for _, name := range template.IssuingCertificateURL {
aiaValues = append(aiaValues, authorityInfoAccess{ aiaValues = append(aiaValues, accessDescription{
Method: OIDAuthorityInfoAccessIssuers, Method: OIDAuthorityInfoAccessIssuers,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
}) })
@ -2037,6 +2231,29 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
n++ n++
} }
if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 &&
!oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) {
ret[n].Id = OIDExtensionSubjectInfoAccess
var siaValues []accessDescription
for _, ts := range template.SubjectTimestamps {
siaValues = append(siaValues, accessDescription{
Method: OIDSubjectInfoAccessTimestamp,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)},
})
}
for _, repo := range template.SubjectCARepositories {
siaValues = append(siaValues, accessDescription{
Method: OIDSubjectInfoAccessCARepo,
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)},
})
}
ret[n].Value, err = asn1.Marshal(siaValues)
if err != nil {
return
}
n++
}
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
!oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
ret[n].Id = OIDExtensionSubjectAltName ret[n].Id = OIDExtensionSubjectAltName
@ -2203,7 +2420,8 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
} }
// Adding another extension here? Remember to update the maximum number // Adding another extension here? Remember to update the maximum number
// of elements in the make() at the top of the function. // of elements in the make() at the top of the function and the list of
// template fields used in CreateCertificate documentation.
return append(ret[:n], template.ExtraExtensions...), nil return append(ret[:n], template.ExtraExtensions...), nil
} }
@ -2290,12 +2508,26 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
var emptyASN1Subject = []byte{0x30, 0} var emptyASN1Subject = []byte{0x30, 0}
// CreateCertificate creates a new X.509v3 certificate based on a template. // CreateCertificate creates a new X.509v3 certificate based on a template.
// The following members of template are used: AuthorityKeyId, // The following members of template are used:
// BasicConstraintsValid, DNSNames, ExcludedDNSDomains, ExtKeyUsage, // - SerialNumber
// IsCA, KeyUsage, MaxPathLen, MaxPathLenZero, NotAfter, NotBefore, // - Subject
// PermittedDNSDomains, PermittedDNSDomainsCritical, SerialNumber, // - NotBefore, NotAfter
// SignatureAlgorithm, Subject, SubjectKeyId, UnknownExtKeyUsage, // - SignatureAlgorithm
// and RawSCT. // - For extensions:
// - KeyUsage
// - ExtKeyUsage, UnknownExtKeyUsage
// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero
// - SubjectKeyId
// - AuthorityKeyId
// - OCSPServer, IssuingCertificateURL
// - SubjectTimestamps, SubjectCARepositories
// - DNSNames, EmailAddresses, IPAddresses, URIs
// - PolicyIdentifiers
// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical,
// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains
// - CRLDistributionPoints
// - RawSCT, SCTList
// - ExtraExtensions
// //
// The certificate is signed by parent. If parent is equal to template then the // The certificate is signed by parent. If parent is equal to template then the
// certificate is self-signed. The parameter pub is the public key of the // certificate is self-signed. The parameter pub is the public key of the
@ -2569,7 +2801,7 @@ func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawVal
return rawAttributes, nil return rawAttributes, nil
} }
// parseRawAttributes Unmarshals RawAttributes intos AttributeTypeAndValueSETs. // parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
var attributes []pkix.AttributeTypeAndValueSET var attributes []pkix.AttributeTypeAndValueSET
for _, rawAttr := range rawAttributes { for _, rawAttr := range rawAttributes {
@ -2617,9 +2849,18 @@ func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error)
} }
// CreateCertificateRequest creates a new certificate request based on a // CreateCertificateRequest creates a new certificate request based on a
// template. The following members of template are used: Attributes, DNSNames, // template. The following members of template are used:
// EmailAddresses, ExtraExtensions, IPAddresses, URIs, SignatureAlgorithm, and //
// Subject. The private key is the private key of the signer. // - Attributes
// - DNSNames
// - EmailAddresses
// - ExtraExtensions
// - IPAddresses
// - URIs
// - SignatureAlgorithm
// - Subject
//
// The private key is the private key of the signer.
// //
// The returned slice is the certificate request in DER encoding. // The returned slice is the certificate request in DER encoding.
// //
@ -2662,70 +2903,57 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
extensions = append(extensions, template.ExtraExtensions...) extensions = append(extensions, template.ExtraExtensions...)
var attributes []pkix.AttributeTypeAndValueSET // Make a copy of template.Attributes because we may alter it below.
attributes = append(attributes, template.Attributes...) attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
for _, attr := range template.Attributes {
values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
copy(values, attr.Value)
attributes = append(attributes, pkix.AttributeTypeAndValueSET{
Type: attr.Type,
Value: values,
})
}
extensionsAppended := false
if len(extensions) > 0 { if len(extensions) > 0 {
// specifiedExtensions contains all the extensions that we // Append the extensions to an existing attribute if possible.
// found specified via template.Attributes. for _, atvSet := range attributes {
specifiedExtensions := make(map[string]bool) if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
for _, atvSet := range template.Attributes {
if !atvSet.Type.Equal(oidExtensionRequest) {
continue continue
} }
// specifiedExtensions contains all the extensions that we
// found specified via template.Attributes.
specifiedExtensions := make(map[string]bool)
for _, atvs := range atvSet.Value { for _, atvs := range atvSet.Value {
for _, atv := range atvs { for _, atv := range atvs {
specifiedExtensions[atv.Type.String()] = true specifiedExtensions[atv.Type.String()] = true
} }
} }
}
atvs := make([]pkix.AttributeTypeAndValue, 0, len(extensions)) newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
for _, e := range extensions { newValue = append(newValue, atvSet.Value[0]...)
if specifiedExtensions[e.Id.String()] {
// Attributes already contained a value for for _, e := range extensions {
// this extension and it takes priority. if specifiedExtensions[e.Id.String()] {
continue // Attributes already contained a value for
// this extension and it takes priority.
continue
}
newValue = append(newValue, pkix.AttributeTypeAndValue{
// There is no place for the critical
// flag in an AttributeTypeAndValue.
Type: e.Id,
Value: e.Value,
})
} }
atvs = append(atvs, pkix.AttributeTypeAndValue{ atvSet.Value[0] = newValue
// There is no place for the critical flag in a CSR. extensionsAppended = true
Type: e.Id,
Value: e.Value,
})
}
// Append the extensions to an existing attribute if possible.
appended := false
for _, atvSet := range attributes {
if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
continue
}
atvSet.Value[0] = append(atvSet.Value[0], atvs...)
appended = true
break break
} }
// Otherwise, add a new attribute for the extensions.
if !appended {
attributes = append(attributes, pkix.AttributeTypeAndValueSET{
Type: oidExtensionRequest,
Value: [][]pkix.AttributeTypeAndValue{
atvs,
},
})
}
}
asn1Subject := template.RawSubject
if len(asn1Subject) == 0 {
asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
if err != nil {
return
}
} }
rawAttributes, err := newRawAttributes(attributes) rawAttributes, err := newRawAttributes(attributes)
@ -2733,6 +2961,38 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
return return
} }
// If not included in attributes, add a new attribute for the
// extensions.
if len(extensions) > 0 && !extensionsAppended {
attr := struct {
Type asn1.ObjectIdentifier
Value [][]pkix.Extension `asn1:"set"`
}{
Type: oidExtensionRequest,
Value: [][]pkix.Extension{extensions},
}
b, err := asn1.Marshal(attr)
if err != nil {
return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
}
var rawValue asn1.RawValue
if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
return nil, err
}
rawAttributes = append(rawAttributes, rawValue)
}
asn1Subject := template.RawSubject
if len(asn1Subject) == 0 {
asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
if err != nil {
return nil, err
}
}
tbsCSR := tbsCertificateRequest{ tbsCSR := tbsCertificateRequest{
Version: 0, // PKCS #10, RFC 2986 Version: 0, // PKCS #10, RFC 2986
Subject: asn1.RawValue{FullBytes: asn1Subject}, Subject: asn1.RawValue{FullBytes: asn1Subject},
@ -2789,7 +3049,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
out := &CertificateRequest{ out := &CertificateRequest{
Raw: in.Raw, Raw: in.Raw,
RawTBSCertificateRequest: in.TBSCSR.Raw, RawTBSCertificateRequest: in.TBSCSR.Raw,
RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
RawSubject: in.TBSCSR.Subject.FullBytes, RawSubject: in.TBSCSR.Subject.FullBytes,
@ -2804,10 +3064,15 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
} }
var err error var err error
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey) var nfe NonFatalErrors
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Treat non-fatal errors as fatal here.
if len(nfe.Errors) > 0 {
return nil, nfe.Errors[0]
}
var subject pkix.RDNSequence var subject pkix.RDNSequence
if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
@ -2822,7 +3087,6 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
return nil, err return nil, err
} }
var nfe NonFatalErrors
for _, extension := range out.Extensions { for _, extension := range out.Extensions {
if extension.Id.Equal(OIDExtensionSubjectAltName) { if extension.Id.Equal(OIDExtensionSubjectAltName) {
out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe) out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)

View File

@ -11,13 +11,12 @@ package main
import ( import (
"crypto/rand" "crypto/rand"
// START CT CHANGES
"github.com/google/certificate-transparency-go/x509"
"github.com/google/certificate-transparency-go/x509/pkix"
// END CT CHANGES
"encoding/pem" "encoding/pem"
"math/big" "math/big"
"time" "time"
"github.com/google/certificate-transparency-go/x509"
"github.com/google/certificate-transparency-go/x509/pkix"
) )
func main() { func main() {