Godeps: Update google/certificate-transparency-go to c25855a. (#3948)
This updates Boulder's vendored dependency for `github.com/google/certificate-transparency-go` to c25855a, the tip of master at the time of writing. Unit tests are confirmed to pass: ``` $ git log --pretty=format:'%h' -n 1 c25855a $ go test ./... ok github.com/google/certificate-transparency-go (cached) ok github.com/google/certificate-transparency-go/asn1 (cached) ok github.com/google/certificate-transparency-go/client 22.985s ? github.com/google/certificate-transparency-go/client/configpb [no test files] ? github.com/google/certificate-transparency-go/client/ctclient [no test files] ok github.com/google/certificate-transparency-go/ctpolicy (cached) ok github.com/google/certificate-transparency-go/ctutil (cached) ? github.com/google/certificate-transparency-go/ctutil/sctcheck [no test files] ? github.com/google/certificate-transparency-go/ctutil/sctscan [no test files] ok github.com/google/certificate-transparency-go/dnsclient (cached) ok github.com/google/certificate-transparency-go/fixchain 0.091s ? github.com/google/certificate-transparency-go/fixchain/chainfix [no test files] ok github.com/google/certificate-transparency-go/fixchain/ratelimiter (cached) ok github.com/google/certificate-transparency-go/gossip (cached) ? github.com/google/certificate-transparency-go/gossip/gossip_server [no test files] ok github.com/google/certificate-transparency-go/gossip/minimal 0.028s ? github.com/google/certificate-transparency-go/gossip/minimal/configpb [no test files] ? github.com/google/certificate-transparency-go/gossip/minimal/goshawk [no test files] ? github.com/google/certificate-transparency-go/gossip/minimal/gosmin [no test files] ok github.com/google/certificate-transparency-go/gossip/minimal/x509ext (cached) ok github.com/google/certificate-transparency-go/ingestor/ranges (cached) ok github.com/google/certificate-transparency-go/jsonclient 0.007s ok github.com/google/certificate-transparency-go/logid (cached) ok github.com/google/certificate-transparency-go/loglist (cached) ? github.com/google/certificate-transparency-go/loglist/findlog [no test files] ok github.com/google/certificate-transparency-go/loglist2 (cached) ? github.com/google/certificate-transparency-go/preload [no test files] ? github.com/google/certificate-transparency-go/preload/dumpscts [no test files] ? github.com/google/certificate-transparency-go/preload/preloader [no test files] ok github.com/google/certificate-transparency-go/scanner 0.009s ? github.com/google/certificate-transparency-go/scanner/scanlog [no test files] ok github.com/google/certificate-transparency-go/tls (cached) ok github.com/google/certificate-transparency-go/trillian/ctfe (cached) ? github.com/google/certificate-transparency-go/trillian/ctfe/configpb [no test files] ? github.com/google/certificate-transparency-go/trillian/ctfe/ct_server [no test files] ? github.com/google/certificate-transparency-go/trillian/ctfe/testonly [no test files] ok github.com/google/certificate-transparency-go/trillian/integration 0.023s ? github.com/google/certificate-transparency-go/trillian/integration/ct_hammer [no test files] ? github.com/google/certificate-transparency-go/trillian/migrillian [no test files] ? github.com/google/certificate-transparency-go/trillian/migrillian/configpb [no test files] ok github.com/google/certificate-transparency-go/trillian/migrillian/core (cached) ? github.com/google/certificate-transparency-go/trillian/mockclient [no test files] ok github.com/google/certificate-transparency-go/trillian/util (cached) ok github.com/google/certificate-transparency-go/x509 (cached) ? github.com/google/certificate-transparency-go/x509/pkix [no test files] ? github.com/google/certificate-transparency-go/x509util [no test files] ? github.com/google/certificate-transparency-go/x509util/certcheck [no test files] ? github.com/google/certificate-transparency-go/x509util/crlcheck [no test files] ```
This commit is contained in:
parent
8f5de538c1
commit
c06503319c
|
|
@ -151,43 +151,43 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/asn1",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/client",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/client/configpb",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/jsonclient",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/tls",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/x509",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency-go/x509/pkix",
|
||||
"Comment": "v1.0.9-13-g5ab67e5",
|
||||
"Rev": "5ab67e519c93568ac3ee50fd6772a5bcf8aa460d"
|
||||
"Comment": "v1.0.21-106-gc25855a",
|
||||
"Rev": "c25855a82c757ba69d84b68c1a6706df6dbb5cee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/safebrowsing",
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Requ
|
|||
return nil, err
|
||||
}
|
||||
var body string
|
||||
if respErr, ok := err.(ctClient.RspError); ok && respErr.StatusCode < 500 {
|
||||
if respErr, ok := err.(jsonclient.RspError); ok && respErr.StatusCode < 500 {
|
||||
body = string(respErr.Body)
|
||||
}
|
||||
pub.log.AuditErrf("Failed to submit certificate to CT log at %s: %s Body=%q",
|
||||
|
|
|
|||
|
|
@ -16,9 +16,14 @@
|
|||
/data
|
||||
/dumpscts
|
||||
/etcdiscover
|
||||
/findlog
|
||||
/goshawk
|
||||
/gosmin
|
||||
/gossip_server
|
||||
/preloader
|
||||
/scanlog
|
||||
/sctcheck
|
||||
/sctscan
|
||||
/trillian_log_server
|
||||
/trillian_log_signer
|
||||
/trillian.json
|
||||
|
|
|
|||
|
|
@ -1,29 +1,42 @@
|
|||
sudo: false
|
||||
sudo: true # required for CI push into Kubernetes.
|
||||
language: go
|
||||
os: linux
|
||||
go: 1.9
|
||||
go: "1.10"
|
||||
|
||||
go_import_path: github.com/google/certificate-transparency-go
|
||||
|
||||
env:
|
||||
- GOFLAGS=
|
||||
- GOFLAGS=-race
|
||||
- GOFLAGS= WITH_ETCD=true
|
||||
- GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
|
||||
- GOFLAGS=-race WITH_ETCD=true
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- mysql-5.7-trusty
|
||||
packages:
|
||||
- mysql-server
|
||||
- mysql-client
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
before_install:
|
||||
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
|
||||
- sudo mysql_upgrade
|
||||
- sudo service mysql restart
|
||||
|
||||
install:
|
||||
- |
|
||||
if [ ! -d $HOME/gopath/src/github.com/google ]; then
|
||||
mkdir -p $HOME/gopath/src/github.com/google
|
||||
ln -s $TRAVIS_BUILD_DIR $HOME/gopath/src/github.com/google/certificate-transparency-go
|
||||
fi
|
||||
- mkdir ../protoc
|
||||
- |
|
||||
(
|
||||
cd ../protoc
|
||||
wget https://github.com/google/protobuf/releases/download/v3.2.0/protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip
|
||||
unzip protoc-3.2.0-${TRAVIS_OS_NAME}-x86_64.zip
|
||||
wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
|
||||
unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
|
||||
)
|
||||
- export PATH=$(pwd)/../protoc/bin:$PATH
|
||||
- go get -d -t ./...
|
||||
|
|
@ -41,9 +54,8 @@ install:
|
|||
|
||||
script:
|
||||
- set -e
|
||||
- export TRILLIAN_SQL_DRIVER=mysql
|
||||
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go
|
||||
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS}
|
||||
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
|
||||
- |
|
||||
# Check re-generation didn't change anything
|
||||
status=$(git status --porcelain | grep -v coverage) || :
|
||||
|
|
@ -64,3 +76,4 @@ script:
|
|||
after_success:
|
||||
- cp /tmp/coverage.txt .
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
|
|
|
|||
232
vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
generated
vendored
Normal file
232
vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
# CERTIFICATE-TRANSPARENCY-GO Changelog
|
||||
|
||||
## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements
|
||||
|
||||
Published 2018-08-20 10:11:04 +0000 UTC
|
||||
|
||||
### CTFE
|
||||
|
||||
`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful.
|
||||
|
||||
`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code.
|
||||
|
||||
### Mirroring
|
||||
|
||||
More progress has been made on log mirroring. We believe that it's now at the point where testing can begin.
|
||||
|
||||
### Utilities / Libraries
|
||||
|
||||
The `certcheck` and `ct_hammer` utilities have received more enhancements.
|
||||
|
||||
`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt).
|
||||
|
||||
`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors.
|
||||
|
||||
Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21)
|
||||
|
||||
## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
|
||||
|
||||
Published 2018-07-05 09:21:34 +0000 UTC
|
||||
|
||||
Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
|
||||
|
||||
The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
|
||||
|
||||
An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
|
||||
|
||||
An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
|
||||
|
||||
Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
|
||||
|
||||
## v1.0.19 - CTFE User Quota
|
||||
|
||||
Published 2018-06-01 13:51:52 +0000 UTC
|
||||
|
||||
CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
|
||||
|
||||
Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
|
||||
|
||||
## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
|
||||
|
||||
Published 2018-06-01 14:28:20 +0000 UTC
|
||||
|
||||
Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
|
||||
|
||||
The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
|
||||
|
||||
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
|
||||
|
||||
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
|
||||
|
||||
## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
|
||||
|
||||
Published 2018-06-01 14:25:16 +0000 UTC
|
||||
|
||||
Now uses Merkle Tree verification from Trillian.
|
||||
|
||||
The CT server now supports CORS.
|
||||
|
||||
Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
|
||||
|
||||
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
|
||||
|
||||
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
|
||||
|
||||
## v1.0.16 - Lifecycle test / Go 1.10.1
|
||||
|
||||
Published 2018-06-01 14:22:23 +0000 UTC
|
||||
|
||||
An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
|
||||
|
||||
Changes to `x509` were merged from Go 1.10.1.
|
||||
|
||||
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
|
||||
|
||||
## v1.0.15 - More control of verification, grpclb, stackdriver metrics
|
||||
|
||||
Published 2018-06-01 14:20:32 +0000 UTC
|
||||
|
||||
Facilities were added to the `x509` package to control whether verification checks are applied.
|
||||
|
||||
Log server requests are now balanced using `gRPClb`.
|
||||
|
||||
For Kubernetes, metrics can be published to Stackdriver monitoring.
|
||||
|
||||
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
|
||||
|
||||
## v1.0.14 - SQLite Removed, LeafHashForLeaf
|
||||
|
||||
Published 2018-06-01 14:15:37 +0000 UTC
|
||||
|
||||
Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
|
||||
|
||||
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
|
||||
|
||||
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
|
||||
|
||||
## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
|
||||
|
||||
Published 2018-06-01 14:15:21 +0000 UTC
|
||||
|
||||
Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
|
||||
|
||||
Updates were made to GCE ingress and health checks.
|
||||
|
||||
The log list utility can verify signatures.
|
||||
|
||||
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
|
||||
|
||||
## v1.0.12 - Client / util updates & CTFE fixes
|
||||
|
||||
Published 2018-06-01 14:13:42 +0000 UTC
|
||||
|
||||
The CT client can now use a JSON loglist to find logs.
|
||||
|
||||
CTFE had a fix applied for preissued precerts.
|
||||
|
||||
A DNS client was added and CT client was extended to support DNS retrieval.
|
||||
|
||||
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
|
||||
|
||||
## v1.0.11 - Kubernetes CI / Integration fixes
|
||||
|
||||
Published 2018-06-01 14:12:18 +0000 UTC
|
||||
|
||||
Updates to Kubernetes configs, mostly related to running a CI instance.
|
||||
|
||||
Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
|
||||
|
||||
## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
|
||||
|
||||
Published 2018-06-01 14:09:47 +0000 UTC
|
||||
|
||||
The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
|
||||
|
||||
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
|
||||
|
||||
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
|
||||
|
||||
## v1.0.9 - Scanner, x509, utility and client fixes
|
||||
|
||||
Published 2018-06-01 14:11:13 +0000 UTC
|
||||
|
||||
The `scanner` utility now displays throughput stats.
|
||||
|
||||
Build instructions and README files were updated.
|
||||
|
||||
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
|
||||
|
||||
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
|
||||
|
||||
## v1.0.8 - Client fixes, align with trillian repo
|
||||
|
||||
Published 2018-06-01 14:06:44 +0000 UTC
|
||||
|
||||
|
||||
|
||||
Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
|
||||
|
||||
## v1.0.7 - CTFE fixes
|
||||
|
||||
Published 2018-06-01 14:06:13 +0000 UTC
|
||||
|
||||
An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
|
||||
|
||||
Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
|
||||
|
||||
## v1.0.6 - crlcheck improvements / other fixes
|
||||
|
||||
Published 2018-06-01 14:04:22 +0000 UTC
|
||||
|
||||
The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
|
||||
|
||||
Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
|
||||
|
||||
## v1.0.5 - X509 and asn1 fixes
|
||||
|
||||
Published 2018-06-01 14:02:58 +0000 UTC
|
||||
|
||||
This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated.
|
||||
|
||||
Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
|
||||
|
||||
## v1.0.4 - Multi log backend configs
|
||||
|
||||
Published 2018-06-01 14:02:07 +0000 UTC
|
||||
|
||||
Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
|
||||
|
||||
Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
|
||||
|
||||
## v1.0.3 - Hammer updates, use standard context
|
||||
|
||||
Published 2018-06-01 14:01:11 +0000 UTC
|
||||
|
||||
After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
|
||||
|
||||
Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
|
||||
|
||||
## v1.0.2 - Go 1.9
|
||||
|
||||
Published 2018-06-01 14:00:00 +0000 UTC
|
||||
|
||||
Go 1.9 is now required to build the code.
|
||||
|
||||
Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
|
||||
|
||||
## v1.0.1 - Hammer and client improvements
|
||||
|
||||
Published 2018-06-01 13:59:29 +0000 UTC
|
||||
|
||||
|
||||
|
||||
Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
|
||||
|
||||
## v1.0 - First Trillian CT Release
|
||||
|
||||
Published 2018-06-01 13:59:00 +0000 UTC
|
||||
|
||||
This is the point that corresponds to the 1.0 release in the trillian repo.
|
||||
|
||||
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)
|
||||
|
||||
|
|
@ -47,6 +47,7 @@ Oliver Weidner <Oliver.Weidner@gmail.com>
|
|||
Pascal Leroy <phl@google.com>
|
||||
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
|
||||
Paul Lietar <lietar@google.com>
|
||||
Pavel Kalinnikov <pkalinnikov@google.com> <pavelkalinnikov@gmail.com>
|
||||
Pierre Phaneuf <pphaneuf@google.com>
|
||||
Rob Percival <robpercival@google.com>
|
||||
Rob Stradling <rob@comodo.com>
|
||||
|
|
|
|||
|
|
@ -11,9 +11,9 @@ repository requires Go version 1.9.
|
|||
- [Repository Structure](#repository-structure)
|
||||
- [Trillian CT Personality](#trillian-ct-personality)
|
||||
- [Working on the Code](#working-on-the-code)
|
||||
- [Running Codebase Checks](#running-codebase-checks)
|
||||
- [Rebuilding Generated Code](#rebuilding-generated-code)
|
||||
- [Updating Vendor Code](#updating-vendor-code)
|
||||
- [Running Codebase Checks](#running-codebase-checks)
|
||||
|
||||
## Repository Structure
|
||||
|
||||
|
|
@ -29,57 +29,44 @@ The main parts of the repository are:
|
|||
[pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
|
||||
- `tls` holds a library for processing TLS-encoded data as described in
|
||||
[RFC 5246](https://tools.ietf.org/html/rfc5246).
|
||||
- `x509util` provides additional utilities for dealing with
|
||||
- `x509util/` provides additional utilities for dealing with
|
||||
`x509.Certificate`s.
|
||||
- CT client libraries:
|
||||
- The top-level `ct` package (in `.`) holds types and utilities for working
|
||||
with CT data structures defined in
|
||||
[RFC 6962](https://tools.ietf.org/html/rfc6962).
|
||||
- `client/` and `jsonclient/` hold libraries that allow access to CT Logs
|
||||
via entrypoints described in
|
||||
via HTTP entrypoints described in
|
||||
[section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
|
||||
- `dnsclient/` has a library that allows access to CT Logs over
|
||||
[DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md).
|
||||
- `scanner/` holds a library for scanning the entire contents of an existing
|
||||
CT Log.
|
||||
- CT Personality for [Trillian](https://github.com/google/trillian):
|
||||
- `trillian/` holds code that allows a Certificate Transparency Log to be
|
||||
run using a Trillian Log as its back-end -- see
|
||||
[below](#trillian-ct-personality).
|
||||
- Command line tools:
|
||||
- `./client/ctclient` allows interaction with a CT Log
|
||||
- `./client/ctclient` allows interaction with a CT Log.
|
||||
- `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT
|
||||
Log to be verified.
|
||||
- `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
|
||||
of interest; please be polite when running this tool against a Log.
|
||||
- `./x509util/certcheck` allows display and verification of certificates
|
||||
- `./x509util/crlcheck` allows display and verification of certificate
|
||||
revocation lists (CRLs).
|
||||
- CT Personality for [Trillian](https://github.com/google/trillian):
|
||||
- `trillian/` holds code that allows a Certificate Transparency Log to be
|
||||
run using a Trillian Log as its back-end -- see
|
||||
[below](#trillian-ct-personality).
|
||||
- Other libraries related to CT:
|
||||
- `ctutil/` holds utility functions for validating and verifying CT data
|
||||
structures.
|
||||
- `loglist/` has a library for reading
|
||||
[JSON lists of CT Logs](https://www.certificate-transparency.org/known-logs).
|
||||
|
||||
|
||||
## Trillian CT Personality
|
||||
|
||||
The `trillian/` subdirectory holds code and scripts for running a CT Log based
|
||||
on the [Trillian](https://github.com/google/trillian) general transparency Log.
|
||||
|
||||
The main code for the CT personality is held in `trillian/ctfe`; this code
|
||||
responds to HTTP requests on the
|
||||
[CT API paths](https://tools.ietf.org/html/rfc6962#section-4) and translates
|
||||
them to the equivalent gRPC API requests to the Trillian Log.
|
||||
|
||||
This obviously relies on the gRPC API definitions at
|
||||
`github.com/google/trillian`; the code also uses common libraries from the
|
||||
Trillian project for:
|
||||
- exposing monitoring and statistics via an `interface` and corresponding
|
||||
Prometheus implementation (`github.com/google/trillian/monitoring/...`)
|
||||
- dealing with cryptographic keys (`github.com/google/trillian/crypto/...`).
|
||||
|
||||
The `trillian/integration/` directory holds scripts and tests for running the whole
|
||||
system locally. In particular:
|
||||
- `trillian/integration/ct_integration_test.sh` brings up local processes
|
||||
running a Trillian Log server, signer and a CT personality, and exercises the
|
||||
complete set of RFC 6962 API entrypoints.
|
||||
- `trillian/integration/ct_hammer_test.sh` brings up a complete system and runs
|
||||
a continuous randomized test of the CT entrypoints.
|
||||
|
||||
These scripts require a local database instance to be configured as described
|
||||
in the [Trillian instructions](https://github.com/google/trillian#mysql-setup).
|
||||
on the [Trillian](https://github.com/google/trillian) general transparency Log,
|
||||
and is [documented separately](trillian/README.md).
|
||||
|
||||
|
||||
## Working on the Code
|
||||
|
|
@ -90,6 +77,27 @@ dependencies and tools, described in the following sections. The
|
|||
for the required tools and scripts, as it may be more up-to-date than this
|
||||
document.
|
||||
|
||||
### Running Codebase Checks
|
||||
|
||||
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
|
||||
and tests over the codebase; please ensure this script passes before sending
|
||||
pull requests for review.
|
||||
|
||||
```bash
|
||||
# Install gometalinter and all linters
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install
|
||||
|
||||
# Run code generation, build, test and linters
|
||||
./scripts/presubmit.sh
|
||||
|
||||
# Run build, test and linters but skip code generation
|
||||
./scripts/presubmit.sh --no-generate
|
||||
|
||||
# Or just run the linters alone:
|
||||
gometalinter --config=gometalinter.json ./...
|
||||
```
|
||||
|
||||
### Rebuilding Generated Code
|
||||
|
||||
Some of the CT Go code is autogenerated from other files:
|
||||
|
|
@ -121,24 +129,3 @@ upstream repository does not guarantee back-compatibility between the tip
|
|||
`master` branch and the current stable release). See
|
||||
[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code)
|
||||
for how to update vendored subtrees.
|
||||
|
||||
|
||||
### Running Codebase Checks
|
||||
|
||||
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
|
||||
and tests over the codebase.
|
||||
|
||||
```bash
|
||||
# Install gometalinter and all linters
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install
|
||||
|
||||
# Run code generation, build, test and linters
|
||||
./scripts/presubmit.sh
|
||||
|
||||
# Run build, test and linters but skip code generation
|
||||
./scripts/presubmit.sh --no-generate
|
||||
|
||||
# Or just run the linters alone:
|
||||
gometalinter --config=gometalinter.json ./...
|
||||
```
|
||||
|
|
|
|||
|
|
@ -9,9 +9,20 @@
|
|||
// http://luca.ntop.org/Teaching/Appunti/asn1.html.
|
||||
//
|
||||
// This is a fork of the Go standard library ASN.1 implementation
|
||||
// (encoding/asn1). The main difference is that this version tries to correct
|
||||
// for errors (e.g. use of tagPrintableString when the string data is really
|
||||
// ISO8859-1 - a common error present in many x509 certificates in the wild.)
|
||||
// (encoding/asn1), with the aim of relaxing checks for various things
|
||||
// that are common errors present in many X.509 certificates in the
|
||||
// wild.
|
||||
//
|
||||
// Main differences:
|
||||
// - Extra "lax" tag that recursively applies and relaxes some strict
|
||||
// checks:
|
||||
// - parsePrintableString() copes with invalid PrintableString contents,
|
||||
// e.g. use of tagPrintableString when the string data is really
|
||||
// ISO8859-1.
|
||||
// - checkInteger() allows integers that are not minimally encoded (and
|
||||
// so are not correct DER).
|
||||
// - parseObjectIdentifier() allows zero-length OIDs.
|
||||
// - Better diagnostics on which particular field causes errors.
|
||||
package asn1
|
||||
|
||||
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
|
||||
|
|
@ -31,7 +42,6 @@ import (
|
|||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
|
@ -94,13 +104,16 @@ func parseBool(bytes []byte, fieldName string) (ret bool, err error) {
|
|||
|
||||
// checkInteger returns nil if the given bytes are a valid DER-encoded
|
||||
// INTEGER and an error otherwise.
|
||||
func checkInteger(bytes []byte, fieldName string) error {
|
||||
func checkInteger(bytes []byte, lax bool, fieldName string) error {
|
||||
if len(bytes) == 0 {
|
||||
return StructuralError{"empty integer", fieldName}
|
||||
}
|
||||
if len(bytes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if lax {
|
||||
return nil
|
||||
}
|
||||
if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
|
||||
return StructuralError{"integer not minimally-encoded", fieldName}
|
||||
}
|
||||
|
|
@ -109,8 +122,8 @@ func checkInteger(bytes []byte, fieldName string) error {
|
|||
|
||||
// parseInt64 treats the given bytes as a big-endian, signed integer and
|
||||
// returns the result.
|
||||
func parseInt64(bytes []byte, fieldName string) (ret int64, err error) {
|
||||
err = checkInteger(bytes, fieldName)
|
||||
func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) {
|
||||
err = checkInteger(bytes, lax, fieldName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -132,11 +145,11 @@ func parseInt64(bytes []byte, fieldName string) (ret int64, err error) {
|
|||
|
||||
// parseInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseInt32(bytes []byte, fieldName string) (int32, error) {
|
||||
if err := checkInteger(bytes, fieldName); err != nil {
|
||||
func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) {
|
||||
if err := checkInteger(bytes, lax, fieldName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ret64, err := parseInt64(bytes, fieldName)
|
||||
ret64, err := parseInt64(bytes, lax, fieldName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -150,8 +163,8 @@ var bigOne = big.NewInt(1)
|
|||
|
||||
// parseBigInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseBigInt(bytes []byte, fieldName string) (*big.Int, error) {
|
||||
if err := checkInteger(bytes, fieldName); err != nil {
|
||||
func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) {
|
||||
if err := checkInteger(bytes, lax, fieldName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := new(big.Int)
|
||||
|
|
@ -270,8 +283,11 @@ func (oi ObjectIdentifier) String() string {
|
|||
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
|
||||
// returns it. An object identifier is a sequence of variable length integers
|
||||
// that are assigned in a hierarchy.
|
||||
func parseObjectIdentifier(bytes []byte, fieldName string) (s []int, err error) {
|
||||
func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) {
|
||||
if len(bytes) == 0 {
|
||||
if lax {
|
||||
return ObjectIdentifier{}, nil
|
||||
}
|
||||
err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName}
|
||||
return
|
||||
}
|
||||
|
|
@ -415,10 +431,25 @@ func isNumeric(b byte) bool {
|
|||
|
||||
// parsePrintableString parses an ASN.1 PrintableString from the given byte
|
||||
// array and returns it.
|
||||
func parsePrintableString(bytes []byte, fieldName string) (ret string, err error) {
|
||||
func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) {
|
||||
for _, b := range bytes {
|
||||
if !isPrintable(b, allowAsterisk, allowAmpersand) {
|
||||
err = SyntaxError{"PrintableString contains invalid character", fieldName}
|
||||
if !lax {
|
||||
err = SyntaxError{"PrintableString contains invalid character", fieldName}
|
||||
} else {
|
||||
// Might be an ISO8859-1 string stuffed in, check if it
|
||||
// would be valid and assume that's what's happened if so,
|
||||
// otherwise try T.61, failing that give up and just assign
|
||||
// the bytes
|
||||
switch {
|
||||
case couldBeISO8859_1(bytes):
|
||||
ret, err = iso8859_1ToUTF8(bytes), nil
|
||||
case couldBeT61(bytes):
|
||||
ret, err = parseT61String(bytes)
|
||||
default:
|
||||
err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -592,7 +623,7 @@ func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagA
|
|||
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
|
||||
// a number of ASN.1 values from the given byte slice and returns them as a
|
||||
// slice of Go values of the given type.
|
||||
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, fieldName string) (ret reflect.Value, err error) {
|
||||
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) {
|
||||
matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
|
||||
if !ok {
|
||||
err = StructuralError{"unknown Go type for slice", fieldName}
|
||||
|
|
@ -631,7 +662,7 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type
|
|||
numElements++
|
||||
}
|
||||
ret = reflect.MakeSlice(sliceType, numElements, numElements)
|
||||
params := fieldParameters{}
|
||||
params := fieldParameters{lax: lax}
|
||||
offset := 0
|
||||
for i := 0; i < numElements; i++ {
|
||||
offset, err = parseField(ret.Index(i), bytes, offset, params)
|
||||
|
|
@ -735,22 +766,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
innerBytes := bytes[offset : offset+t.length]
|
||||
switch t.tag {
|
||||
case TagPrintableString:
|
||||
result, err = parsePrintableString(innerBytes, params.name)
|
||||
if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
|
||||
// Probably an ISO8859-1 string stuffed in, check if it
|
||||
// would be valid and assume that's what's happened if so,
|
||||
// otherwise try T.61, failing that give up and just assign
|
||||
// the bytes
|
||||
switch {
|
||||
case couldBeISO8859_1(innerBytes):
|
||||
result, err = iso8859_1ToUTF8(innerBytes), nil
|
||||
case couldBeT61(innerBytes):
|
||||
result, err = parseT61String(innerBytes)
|
||||
default:
|
||||
result = nil
|
||||
err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
|
||||
}
|
||||
}
|
||||
result, err = parsePrintableString(innerBytes, params.lax, params.name)
|
||||
case TagNumericString:
|
||||
result, err = parseNumericString(innerBytes, params.name)
|
||||
case TagIA5String:
|
||||
|
|
@ -760,11 +776,11 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
case TagUTF8String:
|
||||
result, err = parseUTF8String(innerBytes)
|
||||
case TagInteger:
|
||||
result, err = parseInt64(innerBytes, params.name)
|
||||
result, err = parseInt64(innerBytes, params.lax, params.name)
|
||||
case TagBitString:
|
||||
result, err = parseBitString(innerBytes, params.name)
|
||||
case TagOID:
|
||||
result, err = parseObjectIdentifier(innerBytes, params.name)
|
||||
result, err = parseObjectIdentifier(innerBytes, params.lax, params.name)
|
||||
case TagUTCTime:
|
||||
result, err = parseUTCTime(innerBytes)
|
||||
case TagGeneralizedTime:
|
||||
|
|
@ -873,6 +889,12 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
matchAnyClassAndTag = false
|
||||
}
|
||||
|
||||
if !params.explicit && params.private && params.tag != nil {
|
||||
expectedClass = ClassPrivate
|
||||
expectedTag = *params.tag
|
||||
matchAnyClassAndTag = false
|
||||
}
|
||||
|
||||
// We have unwrapped any explicit tagging at this point.
|
||||
if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
|
||||
(!matchAny && t.isCompound != compoundType) {
|
||||
|
|
@ -899,7 +921,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
v.Set(reflect.ValueOf(result))
|
||||
return
|
||||
case objectIdentifierType:
|
||||
newSlice, err1 := parseObjectIdentifier(innerBytes, params.name)
|
||||
newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name)
|
||||
v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
|
||||
if err1 == nil {
|
||||
reflect.Copy(v, reflect.ValueOf(newSlice))
|
||||
|
|
@ -927,7 +949,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
err = err1
|
||||
return
|
||||
case enumeratedType:
|
||||
parsedInt, err1 := parseInt32(innerBytes, params.name)
|
||||
parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
|
||||
if err1 == nil {
|
||||
v.SetInt(int64(parsedInt))
|
||||
}
|
||||
|
|
@ -937,7 +959,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
v.SetBool(true)
|
||||
return
|
||||
case bigIntType:
|
||||
parsedInt, err1 := parseBigInt(innerBytes, params.name)
|
||||
parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name)
|
||||
if err1 == nil {
|
||||
v.Set(reflect.ValueOf(parsedInt))
|
||||
}
|
||||
|
|
@ -954,13 +976,13 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
return
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
if val.Type().Size() == 4 {
|
||||
parsedInt, err1 := parseInt32(innerBytes, params.name)
|
||||
parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
|
||||
if err1 == nil {
|
||||
val.SetInt(int64(parsedInt))
|
||||
}
|
||||
err = err1
|
||||
} else {
|
||||
parsedInt, err1 := parseInt64(innerBytes, params.name)
|
||||
parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name)
|
||||
if err1 == nil {
|
||||
val.SetInt(parsedInt)
|
||||
}
|
||||
|
|
@ -992,6 +1014,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
}
|
||||
innerParams := parseFieldParameters(field.Tag.Get("asn1"))
|
||||
innerParams.name = field.Name
|
||||
innerParams.lax = params.lax
|
||||
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams)
|
||||
if err != nil {
|
||||
return
|
||||
|
|
@ -1008,7 +1031,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
reflect.Copy(val, reflect.ValueOf(innerBytes))
|
||||
return
|
||||
}
|
||||
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.name)
|
||||
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name)
|
||||
if err1 == nil {
|
||||
val.Set(newSlice)
|
||||
}
|
||||
|
|
@ -1018,7 +1041,7 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
|
|||
var v string
|
||||
switch universalTag {
|
||||
case TagPrintableString:
|
||||
v, err = parsePrintableString(innerBytes, params.name)
|
||||
v, err = parsePrintableString(innerBytes, params.lax, params.name)
|
||||
case TagNumericString:
|
||||
v, err = parseNumericString(innerBytes, params.name)
|
||||
case TagIA5String:
|
||||
|
|
@ -1110,11 +1133,13 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
|
|||
// The following tags on struct fields have special meaning to Unmarshal:
|
||||
//
|
||||
// application specifies that an APPLICATION tag is used
|
||||
// private specifies that a PRIVATE tag is used
|
||||
// default:x sets the default value for optional integer fields (only used if optional is also present)
|
||||
// explicit specifies that an additional, explicit tag wraps the implicit one
|
||||
// optional marks the field as ASN.1 OPTIONAL
|
||||
// set causes a SET, rather than a SEQUENCE type to be expected
|
||||
// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
|
||||
// lax relax strict encoding checks for this field, and for any fields within it
|
||||
//
|
||||
// If the type of the first field of a structure is RawContent then the raw
|
||||
// ASN1 contents of the struct will be stored in it.
|
||||
|
|
|
|||
|
|
@ -75,12 +75,14 @@ type fieldParameters struct {
|
|||
optional bool // true iff the field is OPTIONAL
|
||||
explicit bool // true iff an EXPLICIT tag is in use.
|
||||
application bool // true iff an APPLICATION tag is in use.
|
||||
private bool // true iff a PRIVATE tag is in use.
|
||||
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
|
||||
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
|
||||
stringType int // the string tag to use when marshaling.
|
||||
timeType int // the time tag to use when marshaling.
|
||||
set bool // true iff this should be encoded as a SET
|
||||
omitEmpty bool // true iff this should be omitted if empty when marshaling.
|
||||
lax bool // true iff unmarshalling should skip some error checks
|
||||
name string // name of field for better diagnostics
|
||||
|
||||
// Invariants:
|
||||
|
|
@ -131,8 +133,15 @@ func parseFieldParameters(str string) (ret fieldParameters) {
|
|||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "private":
|
||||
ret.private = true
|
||||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "omitempty":
|
||||
ret.omitEmpty = true
|
||||
case part == "lax":
|
||||
ret.lax = true
|
||||
}
|
||||
}
|
||||
return
|
||||
|
|
|
|||
|
|
@ -631,6 +631,8 @@ func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
|
|||
if params.tag != nil {
|
||||
if params.application {
|
||||
class = ClassApplication
|
||||
} else if params.private {
|
||||
class = ClassPrivate
|
||||
} else {
|
||||
class = ClassContextSpecific
|
||||
}
|
||||
|
|
|
|||
98
vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
generated
vendored
98
vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
generated
vendored
|
|
@ -1,22 +1,14 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: multilog.proto
|
||||
|
||||
/*
|
||||
Package configpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
multilog.proto
|
||||
|
||||
It has these top-level messages:
|
||||
TemporalLogConfig
|
||||
LogShardConfig
|
||||
*/
|
||||
package configpb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
|
@ -32,13 +24,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
// TemporalLogConfig is a set of LogShardConfig messages, whose
|
||||
// time limits should be contiguous.
|
||||
type TemporalLogConfig struct {
|
||||
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard" json:"shard,omitempty"`
|
||||
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
|
||||
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*TemporalLogConfig) ProtoMessage() {}
|
||||
func (*TemporalLogConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
|
||||
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*TemporalLogConfig) ProtoMessage() {}
|
||||
func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33e545c6d900a512, []int{0}
|
||||
}
|
||||
|
||||
func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *TemporalLogConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TemporalLogConfig.Merge(m, src)
|
||||
}
|
||||
func (m *TemporalLogConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_TemporalLogConfig.Size(m)
|
||||
}
|
||||
func (m *TemporalLogConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
|
||||
if m != nil {
|
||||
|
|
@ -50,23 +65,46 @@ func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
|
|||
// LogShardConfig describes the acceptable date range for a single shard of a temporal
|
||||
// log.
|
||||
type LogShardConfig struct {
|
||||
Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"`
|
||||
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
|
||||
// The log's public key in DER-encoded PKIX form.
|
||||
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
|
||||
// not_after_start defines the start of the range of acceptable NotAfter
|
||||
// values, inclusive.
|
||||
// Leaving this unset implies no lower bound to the range.
|
||||
NotAfterStart *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart" json:"not_after_start,omitempty"`
|
||||
NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
|
||||
// not_after_limit defines the end of the range of acceptable NotAfter values,
|
||||
// exclusive.
|
||||
// Leaving this unset implies no upper bound to the range.
|
||||
NotAfterLimit *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit" json:"not_after_limit,omitempty"`
|
||||
NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
|
||||
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogShardConfig) ProtoMessage() {}
|
||||
func (*LogShardConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
|
||||
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogShardConfig) ProtoMessage() {}
|
||||
func (*LogShardConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33e545c6d900a512, []int{1}
|
||||
}
|
||||
|
||||
func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LogShardConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LogShardConfig.Merge(m, src)
|
||||
}
|
||||
func (m *LogShardConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_LogShardConfig.Size(m)
|
||||
}
|
||||
func (m *LogShardConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *LogShardConfig) GetUri() string {
|
||||
if m != nil {
|
||||
|
|
@ -82,14 +120,14 @@ func (m *LogShardConfig) GetPublicKeyDer() []byte {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *LogShardConfig) GetNotAfterStart() *google_protobuf.Timestamp {
|
||||
func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.NotAfterStart
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogShardConfig) GetNotAfterLimit() *google_protobuf.Timestamp {
|
||||
func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.NotAfterLimit
|
||||
}
|
||||
|
|
@ -101,9 +139,9 @@ func init() {
|
|||
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("multilog.proto", fileDescriptor0) }
|
||||
func init() { proto.RegisterFile("multilog.proto", fileDescriptor_33e545c6d900a512) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
var fileDescriptor_33e545c6d900a512 = []byte{
|
||||
// 241 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
|
||||
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
|
||||
|
|
|
|||
|
|
@ -41,11 +41,7 @@ func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.Ge
|
|||
}
|
||||
|
||||
var resp ct.GetEntriesResponse
|
||||
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -66,7 +62,7 @@ func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogE
|
|||
for i, entry := range resp.Entries {
|
||||
index := start + int64(i)
|
||||
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
|
||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
||||
if x509.IsFatal(err) {
|
||||
return nil, err
|
||||
}
|
||||
entries[i] = *logEntry
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package client
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
|
@ -35,11 +34,19 @@ type LogClient struct {
|
|||
jsonclient.JSONClient
|
||||
}
|
||||
|
||||
// CheckLogClient is an interface that allows (just) checking of various log contents.
|
||||
type CheckLogClient interface {
|
||||
BaseURI() string
|
||||
GetSTH(context.Context) (*ct.SignedTreeHead, error)
|
||||
GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
|
||||
GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
|
||||
}
|
||||
|
||||
// New constructs a new LogClient instance.
|
||||
// |uri| is the base URI of the CT log instance to interact with, e.g.
|
||||
// http://ct.googleapis.com/pilot
|
||||
// https://ct.googleapis.com/pilot
|
||||
// |hc| is the underlying client to be used for HTTP requests to the CT log.
|
||||
// |opts| can be used to provide a customer logger interface and a public key
|
||||
// |opts| can be used to provide a custom logger interface and a public key
|
||||
// for signature verification.
|
||||
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
|
||||
logClient, err := jsonclient.New(uri, hc, opts)
|
||||
|
|
@ -49,18 +56,8 @@ func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, erro
|
|||
return &LogClient{*logClient}, err
|
||||
}
|
||||
|
||||
// RspError represents an error that occurred when processing a response from a server,
|
||||
// and also includes key details from the http.Response that triggered the error.
|
||||
type RspError struct {
|
||||
Err error
|
||||
StatusCode int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Error formats the RspError instance, focusing on the error.
|
||||
func (e RspError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
// RspError represents a server error including HTTP information.
|
||||
type RspError = jsonclient.RspError
|
||||
|
||||
// Attempts to add |chain| to the log, using the api end-point specified by
|
||||
// |path|. If provided context expires before submission is complete an
|
||||
|
|
@ -74,9 +71,6 @@ func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType
|
|||
|
||||
httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -131,9 +125,6 @@ func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCe
|
|||
var resp ct.AddChainResponse
|
||||
httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var ds ct.DigitallySigned
|
||||
|
|
@ -164,40 +155,18 @@ func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
|
|||
var resp ct.GetSTHResponse
|
||||
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
sth := ct.SignedTreeHead{
|
||||
TreeSize: resp.TreeSize,
|
||||
Timestamp: resp.Timestamp,
|
||||
}
|
||||
|
||||
if len(resp.SHA256RootHash) != sha256.Size {
|
||||
return nil, RspError{
|
||||
Err: fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(resp.SHA256RootHash)),
|
||||
StatusCode: httpRsp.StatusCode,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
copy(sth.SHA256RootHash[:], resp.SHA256RootHash)
|
||||
|
||||
var ds ct.DigitallySigned
|
||||
if rest, err := tls.Unmarshal(resp.TreeHeadSignature, &ds); err != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
} else if len(rest) > 0 {
|
||||
return nil, RspError{
|
||||
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
|
||||
StatusCode: httpRsp.StatusCode,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
sth.TreeHeadSignature = ds
|
||||
if err := c.VerifySTHSignature(sth); err != nil {
|
||||
sth, err := resp.ToSignedTreeHead()
|
||||
if err != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return &sth, nil
|
||||
|
||||
if err := c.VerifySTHSignature(*sth); err != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return sth, nil
|
||||
}
|
||||
|
||||
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
|
||||
|
|
@ -232,11 +201,7 @@ func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64)
|
|||
"second": strconv.FormatUint(second, base10),
|
||||
}
|
||||
var resp ct.GetSTHConsistencyResponse
|
||||
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Consistency, nil
|
||||
|
|
@ -251,11 +216,7 @@ func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize ui
|
|||
"hash": b64Hash,
|
||||
}
|
||||
var resp ct.GetProofByHashResponse
|
||||
httpRsp, body, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
|
|
@ -266,9 +227,6 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
|
|||
var resp ct.GetRootsResponse
|
||||
httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
|
||||
if err != nil {
|
||||
if httpRsp != nil {
|
||||
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var roots []ct.ASN1Cert
|
||||
|
|
@ -281,3 +239,17 @@ func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
|
|||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
|
||||
func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
|
||||
base10 := 10
|
||||
params := map[string]string{
|
||||
"leaf_index": strconv.FormatUint(index, base10),
|
||||
"tree_size": strconv.FormatUint(treeSize, base10),
|
||||
}
|
||||
var resp ct.GetEntryAndProofResponse
|
||||
if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
ct "github.com/google/certificate-transparency-go"
|
||||
"github.com/google/certificate-transparency-go/client/configpb"
|
||||
|
|
@ -106,7 +106,7 @@ func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*Tem
|
|||
}
|
||||
clients := make([]*LogClient, 0, len(cfg.Shard))
|
||||
for i, shard := range cfg.Shard {
|
||||
opts := jsonclient.Options{}
|
||||
opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"}
|
||||
opts.PublicKeyDER = shard.GetPublicKeyDer()
|
||||
c, err := New(shard.Uri, hc, opts)
|
||||
if err != nil {
|
||||
|
|
|
|||
63
vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
generated
vendored
Normal file
63
vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
substitutions:
|
||||
_CLUSTER_NAME: trillian-opensource-ci
|
||||
_MASTER_ZONE: us-central1-a
|
||||
steps:
|
||||
- id: build_ctfe
|
||||
name: gcr.io/cloud-builders/docker
|
||||
args:
|
||||
- build
|
||||
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
|
||||
- --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
|
||||
- .
|
||||
waitFor: ["-"]
|
||||
- id: push_ctfe
|
||||
name: gcr.io/cloud-builders/docker
|
||||
args:
|
||||
- push
|
||||
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
|
||||
waitFor:
|
||||
- build_ctfe
|
||||
- id: tag_latest_ctfe
|
||||
name: gcr.io/cloud-builders/gcloud
|
||||
args:
|
||||
- container
|
||||
- images
|
||||
- add-tag
|
||||
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
|
||||
- gcr.io/${PROJECT_ID}/ctfe:latest
|
||||
waitFor:
|
||||
- push_ctfe
|
||||
- id: build_envsubst
|
||||
name: gcr.io/cloud-builders/docker
|
||||
args:
|
||||
- build
|
||||
- trillian/examples/deployment/docker/envsubst
|
||||
- -t
|
||||
- envsubst
|
||||
waitFor: ["-"]
|
||||
- id: envsubst_kubernetes_configs
|
||||
name: envsubst
|
||||
args:
|
||||
- trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
|
||||
- trillian/examples/deployment/kubernetes/ctfe-service.yaml
|
||||
- trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
|
||||
env:
|
||||
- PROJECT_ID=${PROJECT_ID}
|
||||
- IMAGE_TAG=${COMMIT_SHA}
|
||||
waitFor:
|
||||
- build_envsubst
|
||||
- id: update_kubernetes_configs
|
||||
name: gcr.io/cloud-builders/kubectl
|
||||
args:
|
||||
- apply
|
||||
- -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
|
||||
- -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
|
||||
- -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
|
||||
env:
|
||||
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
|
||||
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
|
||||
waitFor:
|
||||
- envsubst_kubernetes_configs
|
||||
- push_ctfe
|
||||
images:
|
||||
- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
|
||||
10
vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
generated
vendored
Normal file
10
vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
steps:
|
||||
- id: build_ctfe
|
||||
name: gcr.io/cloud-builders/docker
|
||||
args:
|
||||
- build
|
||||
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
|
||||
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
|
||||
- .
|
||||
images:
|
||||
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
{
|
||||
"Deadline": "60s",
|
||||
"Linters": {
|
||||
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
|
||||
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE"
|
||||
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
|
||||
"unforked": "./scripts/check_unforked.sh:PATH:LINE:MESSAGE"
|
||||
},
|
||||
"Enable": [
|
||||
"forked",
|
||||
|
|
@ -11,6 +13,7 @@
|
|||
"golint",
|
||||
"license",
|
||||
"misspell",
|
||||
"unforked",
|
||||
"vet"
|
||||
],
|
||||
"Exclude": [
|
||||
|
|
|
|||
|
|
@ -53,11 +53,12 @@ type backoffer interface {
|
|||
// JSONClient provides common functionality for interacting with a JSON server
|
||||
// that uses cryptographic signatures.
|
||||
type JSONClient struct {
|
||||
uri string // the base URI of the server. e.g. http://ct.googleapis/pilot
|
||||
uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
|
||||
httpClient *http.Client // used to interact with the server via HTTP
|
||||
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
|
||||
logger Logger // interface to use for logging warnings and errors
|
||||
backoff backoffer // object used to store and calculate backoff information
|
||||
userAgent string // If set, this is sent as the UserAgent header.
|
||||
}
|
||||
|
||||
// Logger is a simple logging interface used to log internal errors and warnings
|
||||
|
|
@ -75,6 +76,8 @@ type Options struct {
|
|||
PublicKey string
|
||||
// DER format public key to use for signature verification.
|
||||
PublicKeyDER []byte
|
||||
// UserAgent, if set, will be sent as the User-Agent header with each request.
|
||||
UserAgent string
|
||||
}
|
||||
|
||||
// ParsePublicKey parses and returns the public key contained in opts.
|
||||
|
|
@ -105,6 +108,19 @@ func (bl *basicLogger) Printf(msg string, args ...interface{}) {
|
|||
log.Printf(msg, args...)
|
||||
}
|
||||
|
||||
// RspError represents an error that occurred when processing a response from a server,
|
||||
// and also includes key details from the http.Response that triggered the error.
|
||||
type RspError struct {
|
||||
Err error
|
||||
StatusCode int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Error formats the RspError instance, focusing on the error.
|
||||
func (e RspError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// New constructs a new JSONClient instance, for the given base URI, using the
|
||||
// given http.Client object (if provided) and the Options object.
|
||||
// If opts does not specify a public key, signatures will not be verified.
|
||||
|
|
@ -136,14 +152,19 @@ func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
|
|||
Verifier: verifier,
|
||||
logger: logger,
|
||||
backoff: &backoff{},
|
||||
userAgent: opts.UserAgent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BaseURI returns the base URI that the JSONClient makes queries to.
|
||||
func (c *JSONClient) BaseURI() string {
|
||||
return c.uri
|
||||
}
|
||||
|
||||
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
|
||||
// the response as a JSON representation of the rsp structure. Returns the
|
||||
// http.Response, the body of the response, and an error. Note that the
|
||||
// returned http.Response can be non-nil even when an error is returned,
|
||||
// in particular when the HTTP status is not OK or when the JSON parsing fails.
|
||||
// http.Response, the body of the response, and an error (which may be of
|
||||
// type RspError if the HTTP response was available).
|
||||
func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, errors.New("context.Context required")
|
||||
|
|
@ -158,6 +179,9 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(c.userAgent) != 0 {
|
||||
httpReq.Header.Set("User-Agent", c.userAgent)
|
||||
}
|
||||
|
||||
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
|
||||
if err != nil {
|
||||
|
|
@ -168,15 +192,15 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
|
|||
body, err := ioutil.ReadAll(httpRsp.Body)
|
||||
httpRsp.Body.Close()
|
||||
if err != nil {
|
||||
return httpRsp, body, fmt.Errorf("failed to read response body: %v", err)
|
||||
return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
|
||||
if httpRsp.StatusCode != http.StatusOK {
|
||||
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
|
||||
return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
|
||||
return httpRsp, body, err
|
||||
return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
|
||||
}
|
||||
|
||||
return httpRsp, body, nil
|
||||
|
|
@ -185,9 +209,7 @@ func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[st
|
|||
// PostAndParse makes a HTTP POST call to the given path, including the request
|
||||
// parameters, and attempts to parse the response as a JSON representation of
|
||||
// the rsp structure. Returns the http.Response, the body of the response, and
|
||||
// an error. Note that the returned http.Response can be non-nil even when an
|
||||
// error is returned, in particular when the HTTP status is not OK or when the
|
||||
// JSON parsing fails.
|
||||
// an error (which may be of type RspError if the HTTP response was available).
|
||||
func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, errors.New("context.Context required")
|
||||
|
|
@ -202,6 +224,9 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(c.userAgent) != 0 {
|
||||
httpReq.Header.Set("User-Agent", c.userAgent)
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
|
||||
|
|
@ -213,12 +238,15 @@ func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp int
|
|||
httpRsp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return httpRsp, body, err
|
||||
if httpRsp != nil {
|
||||
return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if httpRsp.StatusCode == http.StatusOK {
|
||||
if err = json.Unmarshal(body, &rsp); err != nil {
|
||||
return httpRsp, body, err
|
||||
return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
|
||||
}
|
||||
}
|
||||
return httpRsp, body, nil
|
||||
|
|
@ -255,7 +283,7 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
|
|||
return nil, nil, err
|
||||
}
|
||||
wait := c.backoff.set(nil)
|
||||
c.logger.Printf("Request failed, backing-off for %s: %s", wait, err)
|
||||
c.logger.Printf("Request failed, backing-off on %s for %s: %s", c.uri, wait, err)
|
||||
} else {
|
||||
switch {
|
||||
case httpRsp.StatusCode == http.StatusOK:
|
||||
|
|
@ -279,7 +307,10 @@ func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req
|
|||
wait := c.backoff.set(backoff)
|
||||
c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status)
|
||||
default:
|
||||
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
|
||||
return nil, nil, RspError{
|
||||
StatusCode: httpRsp.StatusCode,
|
||||
Body: body,
|
||||
Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)}
|
||||
}
|
||||
}
|
||||
if err := c.waitForBackoff(ctx); err != nil {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/certificate-transparency-go/tls"
|
||||
"github.com/google/certificate-transparency-go/x509"
|
||||
|
|
@ -127,7 +128,7 @@ func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timesta
|
|||
chain := make([]*x509.Certificate, count)
|
||||
for i := range chain {
|
||||
cert, err := x509.ParseCertificate(rawChain[i].Data)
|
||||
if err != nil {
|
||||
if x509.IsFatal(err) {
|
||||
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
|
||||
}
|
||||
chain[i] = cert
|
||||
|
|
@ -189,6 +190,53 @@ func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, time
|
|||
return &leaf, nil
|
||||
}
|
||||
|
||||
// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
|
||||
// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
|
||||
// contains embedded SCTs. It is assumed that the timestamp provided is from
|
||||
// one of the SCTs embedded within the leaf certificate.
|
||||
func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
|
||||
// For building the leaf for a certificate and SCT where the SCT is embedded
|
||||
// in the certificate, we need to build the original precertificate TBS
|
||||
// data. First, parse the leaf cert and its issuer.
|
||||
if len(chain) < 2 {
|
||||
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
|
||||
}
|
||||
issuer := chain[1]
|
||||
cert := chain[0]
|
||||
|
||||
// Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
|
||||
// extension.
|
||||
tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
|
||||
}
|
||||
|
||||
return &MerkleTreeLeaf{
|
||||
Version: V1,
|
||||
LeafType: TimestampedEntryLeafType,
|
||||
TimestampedEntry: &TimestampedEntry{
|
||||
EntryType: PrecertLogEntryType,
|
||||
Timestamp: timestamp,
|
||||
PrecertEntry: &PreCert{
|
||||
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
|
||||
TBSCertificate: tbs,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
|
||||
func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
|
||||
leafData, err := tls.Marshal(*leaf)
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
|
||||
}
|
||||
|
||||
data := append([]byte{TreeLeafPrefix}, leafData...)
|
||||
leafHash := sha256.Sum256(data)
|
||||
return leafHash, nil
|
||||
}
|
||||
|
||||
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
|
||||
// certificate transparency extended key usage.
|
||||
func IsPreIssuer(issuer *x509.Certificate) bool {
|
||||
|
|
@ -200,56 +248,100 @@ func IsPreIssuer(issuer *x509.Certificate) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data after JSON parsing)
|
||||
// into a LogEntry object (which includes x509.Certificate objects, after TLS and ASN.1 parsing).
|
||||
// Note that this function may return a valid LogEntry object and a non-nil error value, when
|
||||
// the error indicates a non-fatal parsing error (of type x509.NonFatalErrors).
|
||||
func LogEntryFromLeaf(index int64, leafEntry *LeafEntry) (*LogEntry, error) {
|
||||
var leaf MerkleTreeLeaf
|
||||
if rest, err := tls.Unmarshal(leafEntry.LeafInput, &leaf); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf for index %d: %v", index, err)
|
||||
// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
|
||||
// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
|
||||
func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
|
||||
ret := RawLogEntry{Index: index}
|
||||
if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
|
||||
} else if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after MerkleTreeLeaf for index %d", len(rest), index)
|
||||
return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
|
||||
}
|
||||
|
||||
var err error
|
||||
entry := LogEntry{Index: index, Leaf: leaf}
|
||||
switch leaf.TimestampedEntry.EntryType {
|
||||
switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
|
||||
case X509LogEntryType:
|
||||
var certChain CertificateChain
|
||||
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &certChain); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal ExtraData for index %d: %v", index, err)
|
||||
if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
|
||||
} else if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after CertificateChain for index %d", len(rest), index)
|
||||
}
|
||||
entry.Chain = certChain.Entries
|
||||
entry.X509Cert, err = leaf.X509Certificate()
|
||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
||||
return nil, fmt.Errorf("failed to parse certificate in MerkleTreeLeaf for index %d: %v", index, err)
|
||||
return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
|
||||
}
|
||||
ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
|
||||
ret.Chain = certChain.Entries
|
||||
|
||||
case PrecertLogEntryType:
|
||||
var precertChain PrecertChainEntry
|
||||
if rest, err := tls.Unmarshal(leafEntry.ExtraData, &precertChain); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry for index %d: %v", index, err)
|
||||
if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
|
||||
} else if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after PrecertChainEntry for index %d", len(rest), index)
|
||||
return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
|
||||
}
|
||||
entry.Chain = precertChain.CertificateChain
|
||||
ret.Cert = precertChain.PreCertificate
|
||||
ret.Chain = precertChain.CertificateChain
|
||||
|
||||
default:
|
||||
// TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
|
||||
// are not errors. We should revisit how we process this case.
|
||||
return nil, fmt.Errorf("unknown entry type: %v", eType)
|
||||
}
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
|
||||
// (pre-)certificate.
|
||||
//
|
||||
// Note that this function may return a valid LogEntry object and a non-nil
|
||||
// error value, when the error indicates a non-fatal parsing error.
|
||||
func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
|
||||
var err error
|
||||
entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
|
||||
|
||||
switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
|
||||
case X509LogEntryType:
|
||||
entry.X509Cert, err = rle.Leaf.X509Certificate()
|
||||
if x509.IsFatal(err) {
|
||||
return nil, fmt.Errorf("failed to parse certificate: %v", err)
|
||||
}
|
||||
|
||||
case PrecertLogEntryType:
|
||||
var tbsCert *x509.Certificate
|
||||
tbsCert, err = leaf.Precertificate()
|
||||
if _, ok := err.(x509.NonFatalErrors); !ok && err != nil {
|
||||
return nil, fmt.Errorf("failed to parse precertificate in MerkleTreeLeaf for index %d: %v", index, err)
|
||||
tbsCert, err = rle.Leaf.Precertificate()
|
||||
if x509.IsFatal(err) {
|
||||
return nil, fmt.Errorf("failed to parse precertificate: %v", err)
|
||||
}
|
||||
entry.Precert = &Precertificate{
|
||||
Submitted: precertChain.PreCertificate,
|
||||
IssuerKeyHash: leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
||||
Submitted: rle.Cert,
|
||||
IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
||||
TBSCertificate: tbsCert,
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("saw unknown entry type at index %d: %v", index, leaf.TimestampedEntry.EntryType)
|
||||
return nil, fmt.Errorf("unknown entry type: %v", eType)
|
||||
}
|
||||
// err may hold a x509.NonFatalErrors object.
|
||||
|
||||
// err may be non-nil for a non-fatal error.
|
||||
return &entry, err
|
||||
}
|
||||
|
||||
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
|
||||
// after JSON parsing) into a LogEntry object (which includes x509.Certificate
|
||||
// objects, after TLS and ASN.1 parsing).
|
||||
//
|
||||
// Note that this function may return a valid LogEntry object and a non-nil
|
||||
// error value, when the error indicates a non-fatal parsing error.
|
||||
func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
|
||||
rle, err := RawLogEntryFromLeaf(index, leaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rle.ToLogEntry()
|
||||
}
|
||||
|
||||
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
|
||||
// since UNIX epoch) to a Go Time.
|
||||
func TimestampToTime(ts uint64) time.Time {
|
||||
secs := int64(ts / 1000)
|
||||
msecs := int64(ts % 1000)
|
||||
return time.Unix(secs, msecs*1000000)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
|
|
@ -29,8 +29,10 @@ import (
|
|||
"github.com/google/certificate-transparency-go/x509"
|
||||
)
|
||||
|
||||
var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false,
|
||||
"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
|
||||
// AllowVerificationWithNonCompliantKeys may be set to true in order to allow
|
||||
// SignatureVerifier to use keys which are technically non-compliant with
|
||||
// RFC6962.
|
||||
var AllowVerificationWithNonCompliantKeys = false
|
||||
|
||||
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
|
||||
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
||||
|
|
@ -42,9 +44,18 @@ func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
|||
return k, sha256.Sum256(p.Bytes), rest, err
|
||||
}
|
||||
|
||||
// PublicKeyFromB64 parses a base64-encoded public key.
|
||||
func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
|
||||
der, err := base64.StdEncoding.DecodeString(b64PubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding public key: %s", err)
|
||||
}
|
||||
return x509.ParsePKIXPublicKey(der)
|
||||
}
|
||||
|
||||
// SignatureVerifier can verify signatures on SCTs and STHs
|
||||
type SignatureVerifier struct {
|
||||
pubKey crypto.PublicKey
|
||||
PubKey crypto.PublicKey
|
||||
}
|
||||
|
||||
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
|
||||
|
|
@ -53,7 +64,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
|||
case *rsa.PublicKey:
|
||||
if pkType.N.BitLen() < 2048 {
|
||||
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
if !AllowVerificationWithNonCompliantKeys {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
|
|
@ -62,7 +73,7 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
|||
params := *(pkType.Params())
|
||||
if params != *elliptic.P256().Params() {
|
||||
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
if !AllowVerificationWithNonCompliantKeys {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
|
|
@ -72,14 +83,12 @@ func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
|||
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
|
||||
}
|
||||
|
||||
return &SignatureVerifier{
|
||||
pubKey: pk,
|
||||
}, nil
|
||||
return &SignatureVerifier{PubKey: pk}, nil
|
||||
}
|
||||
|
||||
// VerifySignature verifies the given signature sig matches the data.
|
||||
func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
|
||||
return tls.VerifySignature(s.pubKey, data, sig)
|
||||
return tls.VerifySignature(s.PubKey, data, sig)
|
||||
}
|
||||
|
||||
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,13 @@
|
|||
|
||||
package tls
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DigitallySigned gives information about a signature, including the algorithm used
|
||||
// and the signature value. Defined in RFC 5246 s4.7.
|
||||
|
|
@ -94,3 +100,18 @@ func (s SignatureAlgorithm) String() string {
|
|||
return fmt.Sprintf("UNKNOWN(%d)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
|
||||
// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
|
||||
func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
|
||||
switch k.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
return ECDSA
|
||||
case *rsa.PublicKey:
|
||||
return RSA
|
||||
case *dsa.PublicKey:
|
||||
return DSA
|
||||
default:
|
||||
return Anonymous
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,6 +54,12 @@ func (e LogEntryType) String() string {
|
|||
}
|
||||
}
|
||||
|
||||
// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
|
||||
const (
|
||||
TreeLeafPrefix = byte(0x00)
|
||||
TreeNodePrefix = byte(0x01)
|
||||
)
|
||||
|
||||
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
|
||||
// enum { timestamped_entry(0), (255) } MerkleLeafType;
|
||||
type MerkleLeafType tls.Enum // tls:"maxval:255"
|
||||
|
|
@ -193,6 +199,25 @@ func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
|
|||
return d.FromBase64String(content)
|
||||
}
|
||||
|
||||
// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
|
||||
type RawLogEntry struct {
|
||||
// Index is a position of the entry in the log.
|
||||
Index int64
|
||||
// Leaf is a parsed Merkle leaf hash input.
|
||||
Leaf MerkleTreeLeaf
|
||||
// Cert is:
|
||||
// - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
|
||||
// - A precertificate if Leaf.TimestampedEntry.EntryType is
|
||||
// PrecertLogEntryType, in the form of a DER-encoded Certificate as
|
||||
// originally added (which includes the poison extension and a signature
|
||||
// generated over the pre-cert by the pre-cert issuer).
|
||||
// - Empty otherwise.
|
||||
Cert ASN1Cert
|
||||
// Chain is the issuing certificate chain starting with the issuer of Cert,
|
||||
// or an empty slice if Cert is empty.
|
||||
Chain []ASN1Cert
|
||||
}
|
||||
|
||||
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
|
||||
// in section 3.1, but note that this structure does *not* match the TLS structure
|
||||
// defined there (the TLS structure is never used directly in RFC6962).
|
||||
|
|
@ -273,6 +298,23 @@ type SignedTreeHead struct {
|
|||
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
|
||||
}
|
||||
|
||||
func (s SignedTreeHead) String() string {
|
||||
sigStr, err := s.TreeHeadSignature.Base64String()
|
||||
if err != nil {
|
||||
sigStr = tls.DigitallySigned(s.TreeHeadSignature).String()
|
||||
}
|
||||
|
||||
// If the LogID field in the SignedTreeHead is empty, don't include it in
|
||||
// the string.
|
||||
var logIDStr string
|
||||
if id, empty := s.LogID, (SHA256Hash{}); id != empty {
|
||||
logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}",
|
||||
logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr)
|
||||
}
|
||||
|
||||
// TreeHeadSignature holds the data over which the signature in an STH is
|
||||
// generated; see section 3.5
|
||||
type TreeHeadSignature struct {
|
||||
|
|
@ -368,7 +410,27 @@ func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
|
|||
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
|
||||
}
|
||||
|
||||
// APIEndpoint is a string that represents one of the Certificate Transparency
|
||||
// Log API endpoints.
|
||||
type APIEndpoint string
|
||||
|
||||
// Certificate Transparency Log API endpoints; see section 4.
|
||||
// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
|
||||
// changing these constants, may need to change those too.
|
||||
const (
|
||||
AddChainStr APIEndpoint = "add-chain"
|
||||
AddPreChainStr APIEndpoint = "add-pre-chain"
|
||||
GetSTHStr APIEndpoint = "get-sth"
|
||||
GetEntriesStr APIEndpoint = "get-entries"
|
||||
GetProofByHashStr APIEndpoint = "get-proof-by-hash"
|
||||
GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
|
||||
GetRootsStr APIEndpoint = "get-roots"
|
||||
GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
|
||||
)
|
||||
|
||||
// URI paths for Log requests; see section 4.
|
||||
// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
|
||||
// changing these constants, may need to change those too.
|
||||
const (
|
||||
AddChainPath = "/ct/v1/add-chain"
|
||||
AddPreChainPath = "/ct/v1/add-pre-chain"
|
||||
|
|
@ -415,6 +477,29 @@ type GetSTHResponse struct {
|
|||
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
|
||||
}
|
||||
|
||||
// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
|
||||
func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
|
||||
sth := SignedTreeHead{
|
||||
TreeSize: r.TreeSize,
|
||||
Timestamp: r.Timestamp,
|
||||
}
|
||||
|
||||
if len(r.SHA256RootHash) != sha256.Size {
|
||||
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
|
||||
}
|
||||
copy(sth.SHA256RootHash[:], r.SHA256RootHash)
|
||||
|
||||
var ds DigitallySigned
|
||||
if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
|
||||
return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
|
||||
} else if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
|
||||
}
|
||||
sth.TreeHeadSignature = ds
|
||||
|
||||
return &sth, nil
|
||||
}
|
||||
|
||||
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
|
||||
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
|
||||
// 'second'.)
|
||||
|
|
|
|||
|
|
@ -25,16 +25,43 @@ func NewCertPool() *CertPool {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *CertPool) copy() *CertPool {
|
||||
p := &CertPool{
|
||||
bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)),
|
||||
byName: make(map[string][]int, len(s.byName)),
|
||||
certs: make([]*Certificate, len(s.certs)),
|
||||
}
|
||||
for k, v := range s.bySubjectKeyId {
|
||||
indexes := make([]int, len(v))
|
||||
copy(indexes, v)
|
||||
p.bySubjectKeyId[k] = indexes
|
||||
}
|
||||
for k, v := range s.byName {
|
||||
indexes := make([]int, len(v))
|
||||
copy(indexes, v)
|
||||
p.byName[k] = indexes
|
||||
}
|
||||
copy(p.certs, s.certs)
|
||||
return p
|
||||
}
|
||||
|
||||
// SystemCertPool returns a copy of the system cert pool.
|
||||
//
|
||||
// Any mutations to the returned pool are not written to disk and do
|
||||
// not affect any other pool.
|
||||
//
|
||||
// New changes in the the system cert pool might not be reflected
|
||||
// in subsequent calls.
|
||||
func SystemCertPool() (*CertPool, error) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Issue 16736, 18609:
|
||||
return nil, errors.New("crypto/x509: system root pool is not available on Windows")
|
||||
}
|
||||
|
||||
if sysRoots := systemRootsPool(); sysRoots != nil {
|
||||
return sysRoots.copy(), nil
|
||||
}
|
||||
|
||||
return loadSystemRoots()
|
||||
}
|
||||
|
||||
|
|
@ -121,7 +148,7 @@ func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
|
|||
}
|
||||
|
||||
cert, err := ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
if IsFatal(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
37
vendor/github.com/google/certificate-transparency-go/x509/curves.go
generated
vendored
Normal file
37
vendor/github.com/google/certificate-transparency-go/x509/curves.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"math/big"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This file holds ECC curves that are not supported by the main Go crypto/elliptic
|
||||
// library, but which have been observed in certificates in the wild.
|
||||
|
||||
var initonce sync.Once
|
||||
var p192r1 *elliptic.CurveParams
|
||||
|
||||
func initAllCurves() {
|
||||
initSECP192R1()
|
||||
}
|
||||
|
||||
func initSECP192R1() {
|
||||
// See SEC-2, section 2.2.2
|
||||
p192r1 = &elliptic.CurveParams{Name: "P-192"}
|
||||
p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
|
||||
p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
|
||||
p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
|
||||
p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
|
||||
p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
|
||||
p192r1.BitSize = 192
|
||||
}
|
||||
|
||||
func secp192r1() elliptic.Curve {
|
||||
initonce.Do(initAllCurves)
|
||||
return p192r1
|
||||
}
|
||||
|
|
@ -7,7 +7,7 @@
|
|||
package x509
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
|
|
|
|||
2
vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go
generated
vendored
2
vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go
generated
vendored
|
|
@ -7,7 +7,7 @@
|
|||
package x509
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
|
|||
}
|
||||
|
||||
// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form.
|
||||
// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey.
|
||||
// The following key types are supported: *rsa.PrivateKey, *ecdsa.PrivateKey.
|
||||
// Unsupported key types result in an error.
|
||||
//
|
||||
// See RFC 5208.
|
||||
|
|
|
|||
|
|
@ -7,14 +7,12 @@
|
|||
package pkix
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/certificate-transparency-go/asn1"
|
||||
// END CT CHANGES
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/google/certificate-transparency-go/asn1"
|
||||
)
|
||||
|
||||
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
|
||||
|
|
@ -98,7 +96,7 @@ func (r RDNSequence) String() string {
|
|||
type RelativeDistinguishedNameSET []AttributeTypeAndValue
|
||||
|
||||
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
|
||||
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
|
||||
// https://tools.ietf.org/html/rfc5280#section-4.1.2.4
|
||||
type AttributeTypeAndValue struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value interface{}
|
||||
|
|
|
|||
20
vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
generated
vendored
Normal file
20
vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// For Go versions >= 1.11, the ExtraPolicyPara field in
|
||||
// syscall.CertChainPolicyPara is of type syscall.Pointer. See:
|
||||
// https://github.com/golang/go/commit/4869ec00e87ef
|
||||
|
||||
func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer {
|
||||
return (syscall.Pointer)(p)
|
||||
}
|
||||
17
vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
generated
vendored
Normal file
17
vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.11
|
||||
|
||||
package x509
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// For Go versions before 1.11, the ExtraPolicyPara field in
|
||||
// syscall.CertChainPolicyPara was of type uintptr. See:
|
||||
// https://github.com/golang/go/commit/4869ec00e87ef
|
||||
|
||||
func convertToPolicyParaType(p unsafe.Pointer) uintptr {
|
||||
return uintptr(p)
|
||||
}
|
||||
|
|
@ -14,12 +14,15 @@ import (
|
|||
"github.com/google/certificate-transparency-go/x509/pkix"
|
||||
)
|
||||
|
||||
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
|
||||
var (
|
||||
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
|
||||
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
|
||||
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
|
||||
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
|
||||
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
|
||||
)
|
||||
|
||||
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
|
||||
var (
|
||||
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
|
||||
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
|
||||
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
|
||||
|
|
@ -238,7 +241,7 @@ func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
|
|||
}
|
||||
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
|
||||
// RFC 5280 s5.2.7
|
||||
var aia []authorityInfoAccess
|
||||
var aia []accessDescription
|
||||
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
||||
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
|
||||
} else if len(rest) != 0 {
|
||||
|
|
|
|||
|
|
@ -19,4 +19,7 @@ func systemRootsPool() *CertPool {
|
|||
|
||||
func initSystemRoots() {
|
||||
systemRoots, systemRootsErr = loadSystemRoots()
|
||||
if systemRootsErr != nil {
|
||||
systemRoots = nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
package x509
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <errno.h>
|
||||
|
|
@ -16,58 +16,6 @@ package x509
|
|||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <Security/Security.h>
|
||||
|
||||
// FetchPEMRootsCTX509_MountainLion is the version of FetchPEMRoots from Go 1.6
|
||||
// which still works on OS X 10.8 (Mountain Lion).
|
||||
// It lacks support for admin & user cert domains.
|
||||
// See golang.org/issue/16473
|
||||
int FetchPEMRootsCTX509_MountainLion(CFDataRef *pemRoots) {
|
||||
if (pemRoots == NULL) {
|
||||
return -1;
|
||||
}
|
||||
CFArrayRef certs = NULL;
|
||||
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
|
||||
if (err != noErr) {
|
||||
return -1;
|
||||
}
|
||||
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
int i, ncerts = CFArrayGetCount(certs);
|
||||
for (i = 0; i < ncerts; i++) {
|
||||
CFDataRef data = NULL;
|
||||
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
|
||||
if (cert == NULL) {
|
||||
continue;
|
||||
}
|
||||
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
|
||||
// Once we support weak imports via cgo we should prefer that, and fall back to this
|
||||
// for older systems.
|
||||
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
if (err != noErr) {
|
||||
continue;
|
||||
}
|
||||
if (data != NULL) {
|
||||
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
|
||||
CFRelease(data);
|
||||
}
|
||||
}
|
||||
CFRelease(certs);
|
||||
*pemRoots = combinedData;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// useOldCodeCTX509 reports whether the running machine is OS X 10.8 Mountain Lion
|
||||
// or older. We only support Mountain Lion and higher, but we'll at least try our
|
||||
// best on older machines and continue to use the old code path.
|
||||
//
|
||||
// See golang.org/issue/16473
|
||||
int useOldCodeCTX509() {
|
||||
char str[256];
|
||||
size_t size = sizeof(str);
|
||||
memset(str, 0, size);
|
||||
sysctlbyname("kern.osrelease", str, &size, NULL, 0);
|
||||
// OS X 10.8 is osrelease "12.*", 10.7 is 11.*, 10.6 is 10.*.
|
||||
// We never supported things before that.
|
||||
return memcmp(str, "12.", 3) == 0 || memcmp(str, "11.", 3) == 0 || memcmp(str, "10.", 3) == 0;
|
||||
}
|
||||
|
||||
// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
|
||||
//
|
||||
|
|
@ -78,9 +26,7 @@ int useOldCodeCTX509() {
|
|||
// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
|
||||
// be released (using CFRelease) after we've consumed its content.
|
||||
int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
||||
if (useOldCodeCTX509()) {
|
||||
return FetchPEMRootsCTX509_MountainLion(pemRoots);
|
||||
}
|
||||
int i;
|
||||
|
||||
// Get certificates from all domains, not just System, this lets
|
||||
// the user add CAs to their "login" keychain, and Admins to add
|
||||
|
|
@ -101,7 +47,8 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
|
||||
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
for (int i = 0; i < numDomains; i++) {
|
||||
for (i = 0; i < numDomains; i++) {
|
||||
int j;
|
||||
CFArrayRef certs = NULL;
|
||||
OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs);
|
||||
if (err != noErr) {
|
||||
|
|
@ -109,7 +56,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
}
|
||||
|
||||
CFIndex numCerts = CFArrayGetCount(certs);
|
||||
for (int j = 0; j < numCerts; j++) {
|
||||
for (j = 0; j < numCerts; j++) {
|
||||
CFDataRef data = NULL;
|
||||
CFErrorRef errRef = NULL;
|
||||
CFArrayRef trustSettings = NULL;
|
||||
|
|
@ -124,6 +71,9 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
if (i == 0) {
|
||||
trustAsRoot = 1;
|
||||
} else {
|
||||
int k;
|
||||
CFIndex m;
|
||||
|
||||
// Certs found in the system domain are always trusted. If the user
|
||||
// configures "Never Trust" on such a cert, it will also be found in the
|
||||
// admin or user domain, causing it to be added to untrustedPemRoots. The
|
||||
|
|
@ -133,7 +83,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
// SecTrustServer.c, "user trust settings overrule admin trust settings",
|
||||
// so take the last trust settings array we find.
|
||||
// Skip the system domain since it is always trusted.
|
||||
for (int k = i; k < numDomains; k++) {
|
||||
for (k = i; k < numDomains; k++) {
|
||||
CFArrayRef domainTrustSettings = NULL;
|
||||
err = SecTrustSettingsCopyTrustSettings(cert, domains[k], &domainTrustSettings);
|
||||
if (err == errSecSuccess && domainTrustSettings != NULL) {
|
||||
|
|
@ -147,9 +97,9 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
// "this certificate must be verified to a known trusted certificate"; aka not a root.
|
||||
continue;
|
||||
}
|
||||
for (CFIndex k = 0; k < CFArrayGetCount(trustSettings); k++) {
|
||||
for (m = 0; m < CFArrayGetCount(trustSettings); m++) {
|
||||
CFNumberRef cfNum;
|
||||
CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, k);
|
||||
CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m);
|
||||
if (CFDictionaryGetValueIfPresent(tSetting, policy, (const void**)&cfNum)){
|
||||
SInt32 result = 0;
|
||||
CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
|
||||
|
|
@ -187,10 +137,7 @@ int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
|
|||
}
|
||||
}
|
||||
|
||||
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
|
||||
// Once we support weak imports via cgo we should prefer that, and fall back to this
|
||||
// for older systems.
|
||||
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
if (err != noErr) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,12 +181,12 @@ func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
|
|||
}
|
||||
if err := cmd.Run(); err != nil {
|
||||
if debugExecDarwinRoots {
|
||||
println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject.CommonName, bytes.TrimSpace(stderr.Bytes())))
|
||||
println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject, bytes.TrimSpace(stderr.Bytes())))
|
||||
}
|
||||
return false
|
||||
}
|
||||
if debugExecDarwinRoots {
|
||||
println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject.CommonName))
|
||||
println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
10
vendor/github.com/google/certificate-transparency-go/x509/root_js.go
generated
vendored
Normal file
10
vendor/github.com/google/certificate-transparency-go/x509/root_js.go
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build js,wasm
|
||||
|
||||
package x509
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{}
|
||||
|
|
@ -33,5 +33,8 @@ func loadSystemRoots() (*CertPool, error) {
|
|||
bestErr = err
|
||||
}
|
||||
}
|
||||
if bestErr == nil {
|
||||
return roots, nil
|
||||
}
|
||||
return nil, bestErr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build dragonfly freebsd linux nacl netbsd openbsd solaris
|
||||
// +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris
|
||||
|
||||
package x509
|
||||
|
||||
|
|
@ -80,7 +80,7 @@ func loadSystemRoots() (*CertPool, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if len(roots.certs) > 0 {
|
||||
if len(roots.certs) > 0 || firstErr == nil {
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContex
|
|||
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
|
||||
|
||||
para := &syscall.CertChainPolicyPara{
|
||||
ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
|
||||
ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)),
|
||||
}
|
||||
para.Size = uint32(unsafe.Sizeof(*para))
|
||||
|
||||
|
|
|
|||
242
vendor/github.com/google/certificate-transparency-go/x509/rpki.go
generated
vendored
Normal file
242
vendor/github.com/google/certificate-transparency-go/x509/rpki.go
generated
vendored
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/certificate-transparency-go/asn1"
|
||||
)
|
||||
|
||||
// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
|
||||
// where the BitLength field holds the prefix length.
|
||||
type IPAddressPrefix asn1.BitString
|
||||
|
||||
// IPAddressRange describes an (inclusive) IP address range.
|
||||
type IPAddressRange struct {
|
||||
Min IPAddressPrefix
|
||||
Max IPAddressPrefix
|
||||
}
|
||||
|
||||
// Most relevant values for AFI from:
|
||||
// http://www.iana.org/assignments/address-family-numbers.
|
||||
const (
|
||||
IPv4AddressFamilyIndicator = uint16(1)
|
||||
IPv6AddressFamilyIndicator = uint16(2)
|
||||
)
|
||||
|
||||
// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
|
||||
type IPAddressFamilyBlocks struct {
|
||||
// AFI holds an address family indicator from
|
||||
// http://www.iana.org/assignments/address-family-numbers.
|
||||
AFI uint16
|
||||
// SAFI holds a subsequent address family indicator from
|
||||
// http://www.iana.org/assignments/safi-namespace.
|
||||
SAFI byte
|
||||
// InheritFromIssuer indicates that the set of addresses should
|
||||
// be taken from the issuer's certificate.
|
||||
InheritFromIssuer bool
|
||||
// AddressPrefixes holds prefixes if InheritFromIssuer is false.
|
||||
AddressPrefixes []IPAddressPrefix
|
||||
// AddressRanges holds ranges if InheritFromIssuer is false.
|
||||
AddressRanges []IPAddressRange
|
||||
}
|
||||
|
||||
// Internal types for asn1 unmarshalling.
|
||||
type ipAddressFamily struct {
|
||||
AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
|
||||
Choice asn1.RawValue
|
||||
}
|
||||
|
||||
// Internally, use raw asn1.BitString rather than the IPAddressPrefix
|
||||
// type alias (so that asn1.Unmarshal() decodes properly).
|
||||
type ipAddressRange struct {
|
||||
Min asn1.BitString
|
||||
Max asn1.BitString
|
||||
}
|
||||
|
||||
func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
|
||||
// RFC 3779 2.2.3
|
||||
// IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
|
||||
//
|
||||
// IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
|
||||
// addressFamily OCTET STRING (SIZE (2..3)),
|
||||
// ipAddressChoice IPAddressChoice }
|
||||
//
|
||||
// IPAddressChoice ::= CHOICE {
|
||||
// inherit NULL, -- inherit from issuer --
|
||||
// addressesOrRanges SEQUENCE OF IPAddressOrRange }
|
||||
//
|
||||
// IPAddressOrRange ::= CHOICE {
|
||||
// addressPrefix IPAddress,
|
||||
// addressRange IPAddressRange }
|
||||
//
|
||||
// IPAddressRange ::= SEQUENCE {
|
||||
// min IPAddress,
|
||||
// max IPAddress }
|
||||
//
|
||||
// IPAddress ::= BIT STRING
|
||||
|
||||
var addrBlocks []ipAddressFamily
|
||||
if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
|
||||
return nil
|
||||
} else if len(rest) != 0 {
|
||||
nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
|
||||
return nil
|
||||
}
|
||||
|
||||
var results []*IPAddressFamilyBlocks
|
||||
for i, block := range addrBlocks {
|
||||
var fam IPAddressFamilyBlocks
|
||||
if l := len(block.AddressFamily); l < 2 || l > 3 {
|
||||
nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
|
||||
continue
|
||||
}
|
||||
fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
|
||||
if len(block.AddressFamily) > 2 {
|
||||
fam.SAFI = block.AddressFamily[2]
|
||||
}
|
||||
// IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
|
||||
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
|
||||
if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
|
||||
fam.InheritFromIssuer = true
|
||||
results = append(results, &fam)
|
||||
continue
|
||||
}
|
||||
|
||||
var addrRanges []asn1.RawValue
|
||||
if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
|
||||
continue
|
||||
}
|
||||
for j, ar := range addrRanges {
|
||||
// Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
|
||||
// tags -- here, either BIT STRING or SEQUENCE.
|
||||
switch ar.Tag {
|
||||
case asn1.TagBitString:
|
||||
// BIT STRING for single prefix IPAddress
|
||||
var val asn1.BitString
|
||||
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
|
||||
continue
|
||||
}
|
||||
fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
|
||||
|
||||
case asn1.TagSequence:
|
||||
var val ipAddressRange
|
||||
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
|
||||
continue
|
||||
}
|
||||
fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
|
||||
|
||||
default:
|
||||
nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
|
||||
}
|
||||
}
|
||||
results = append(results, &fam)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
|
||||
// domain identifiers).
|
||||
type ASIDRange struct {
|
||||
Min int
|
||||
Max int
|
||||
}
|
||||
|
||||
// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
|
||||
// domain identifiers).
|
||||
type ASIdentifiers struct {
|
||||
// InheritFromIssuer indicates that the set of AS identifiers should
|
||||
// be taken from the issuer's certificate.
|
||||
InheritFromIssuer bool
|
||||
// ASIDs holds AS identifiers if InheritFromIssuer is false.
|
||||
ASIDs []int
|
||||
// ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
|
||||
ASIDRanges []ASIDRange
|
||||
}
|
||||
|
||||
type asIdentifiers struct {
|
||||
ASNum asn1.RawValue `asn1:"optional,tag:0"`
|
||||
RDI asn1.RawValue `asn1:"optional,tag:1"`
|
||||
}
|
||||
|
||||
func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
|
||||
// RFC 3779 2.3.2
|
||||
// ASIdentifierChoice ::= CHOICE {
|
||||
// inherit NULL, -- inherit from issuer --
|
||||
// asIdsOrRanges SEQUENCE OF ASIdOrRange }
|
||||
// ASIdOrRange ::= CHOICE {
|
||||
// id ASId,
|
||||
// range ASRange }
|
||||
// ASRange ::= SEQUENCE {
|
||||
// min ASId,
|
||||
// max ASId }
|
||||
// ASId ::= INTEGER
|
||||
if len(val.FullBytes) == 0 { // OPTIONAL
|
||||
return nil
|
||||
}
|
||||
// ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
|
||||
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
|
||||
if bytes.Equal(val.Bytes, asn1.NullBytes) {
|
||||
return &ASIdentifiers{InheritFromIssuer: true}
|
||||
}
|
||||
var ids []asn1.RawValue
|
||||
if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
|
||||
return nil
|
||||
} else if len(rest) != 0 {
|
||||
nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
|
||||
return nil
|
||||
}
|
||||
var asID ASIdentifiers
|
||||
for i, id := range ids {
|
||||
// Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
|
||||
// tags -- here, either INTEGER or SEQUENCE.
|
||||
switch id.Tag {
|
||||
case asn1.TagInteger:
|
||||
var val int
|
||||
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
|
||||
continue
|
||||
}
|
||||
asID.ASIDs = append(asID.ASIDs, val)
|
||||
|
||||
case asn1.TagSequence:
|
||||
var val ASIDRange
|
||||
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
|
||||
continue
|
||||
}
|
||||
asID.ASIDRanges = append(asID.ASIDRanges, val)
|
||||
|
||||
default:
|
||||
nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
|
||||
}
|
||||
}
|
||||
return &asID
|
||||
}
|
||||
|
||||
func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
|
||||
// RFC 3779 2.3.2
|
||||
// ASIdentifiers ::= SEQUENCE {
|
||||
// asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
|
||||
// rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
|
||||
var asIDs asIdentifiers
|
||||
if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
|
||||
return nil, nil
|
||||
} else if len(rest) != 0 {
|
||||
nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
|
||||
return nil, nil
|
||||
}
|
||||
return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
|
||||
}
|
||||
|
|
@ -72,11 +72,12 @@ func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *e
|
|||
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
|
||||
}
|
||||
|
||||
var nfe NonFatalErrors
|
||||
var curve elliptic.Curve
|
||||
if namedCurveOID != nil {
|
||||
curve = namedCurveFromOID(*namedCurveOID)
|
||||
curve = namedCurveFromOID(*namedCurveOID, &nfe)
|
||||
} else {
|
||||
curve = namedCurveFromOID(privKey.NamedCurveOID)
|
||||
curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
|
||||
}
|
||||
if curve == nil {
|
||||
return nil, errors.New("x509: unknown elliptic curve")
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
|
@ -17,6 +18,9 @@ import (
|
|||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ignoreCN disables interpreting Common Name as a hostname. See issue 24151.
|
||||
var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1")
|
||||
|
||||
type InvalidReason int
|
||||
|
||||
const (
|
||||
|
|
@ -41,21 +45,25 @@ const (
|
|||
NameMismatch
|
||||
// NameConstraintsWithoutSANs results when a leaf certificate doesn't
|
||||
// contain a Subject Alternative Name extension, but a CA certificate
|
||||
// contains name constraints.
|
||||
// contains name constraints, and the Common Name can be interpreted as
|
||||
// a hostname.
|
||||
//
|
||||
// You can avoid this error by setting the experimental GODEBUG environment
|
||||
// variable to "x509ignoreCN=1", disabling Common Name matching entirely.
|
||||
// This behavior might become the default in the future.
|
||||
NameConstraintsWithoutSANs
|
||||
// UnconstrainedName results when a CA certificate contains permitted
|
||||
// name constraints, but leaf certificate contains a name of an
|
||||
// unsupported or unconstrained type.
|
||||
UnconstrainedName
|
||||
// TooManyConstraints results when the number of comparision operations
|
||||
// TooManyConstraints results when the number of comparison operations
|
||||
// needed to check a certificate exceeds the limit set by
|
||||
// VerifyOptions.MaxConstraintComparisions. This limit exists to
|
||||
// prevent pathological certificates can consuming excessive amounts of
|
||||
// CPU time to verify.
|
||||
TooManyConstraints
|
||||
// CANotAuthorizedForExtKeyUsage results when an intermediate or root
|
||||
// certificate does not permit an extended key usage that is claimed by
|
||||
// the leaf certificate.
|
||||
// certificate does not permit a requested extended key usage.
|
||||
CANotAuthorizedForExtKeyUsage
|
||||
)
|
||||
|
||||
|
|
@ -80,7 +88,7 @@ func (e CertificateInvalidError) Error() string {
|
|||
case TooManyIntermediates:
|
||||
return "x509: too many intermediates for path length constraint"
|
||||
case IncompatibleUsage:
|
||||
return "x509: certificate specifies an incompatible key usage: " + e.Detail
|
||||
return "x509: certificate specifies an incompatible key usage"
|
||||
case NameMismatch:
|
||||
return "x509: issuer name does not match subject from issuing certificate"
|
||||
case NameConstraintsWithoutSANs:
|
||||
|
|
@ -101,6 +109,12 @@ type HostnameError struct {
|
|||
func (h HostnameError) Error() string {
|
||||
c := h.Certificate
|
||||
|
||||
if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) &&
|
||||
matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) {
|
||||
// This would have validated, if it weren't for the validHostname check on Common Name.
|
||||
return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName
|
||||
}
|
||||
|
||||
var valid string
|
||||
if ip := net.ParseIP(h.Host); ip != nil {
|
||||
// Trying to validate an IP
|
||||
|
|
@ -114,10 +128,10 @@ func (h HostnameError) Error() string {
|
|||
valid += san.String()
|
||||
}
|
||||
} else {
|
||||
if c.hasSANExtension() {
|
||||
valid = strings.Join(c.DNSNames, ", ")
|
||||
} else {
|
||||
if c.commonNameAsHostname() {
|
||||
valid = c.Subject.CommonName
|
||||
} else {
|
||||
valid = strings.Join(c.DNSNames, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -174,19 +188,28 @@ var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificat
|
|||
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
|
||||
// because other PKIX verification APIs have ended up needing many options.
|
||||
type VerifyOptions struct {
|
||||
DNSName string
|
||||
Intermediates *CertPool
|
||||
Roots *CertPool // if nil, the system roots are used
|
||||
CurrentTime time.Time // if zero, the current time is used
|
||||
DisableTimeChecks bool
|
||||
// KeyUsage specifies which Extended Key Usage values are acceptable.
|
||||
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a
|
||||
// constraint down the chain which mirrors Windows CryptoAPI behavior,
|
||||
// but not the spec. To accept any key usage, include ExtKeyUsageAny.
|
||||
DNSName string
|
||||
Intermediates *CertPool
|
||||
Roots *CertPool // if nil, the system roots are used
|
||||
CurrentTime time.Time // if zero, the current time is used
|
||||
// Options to disable various verification checks.
|
||||
DisableTimeChecks bool
|
||||
DisableCriticalExtensionChecks bool
|
||||
DisableNameChecks bool
|
||||
DisableEKUChecks bool
|
||||
DisablePathLenChecks bool
|
||||
DisableNameConstraintChecks bool
|
||||
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
|
||||
// certificate is accepted if it contains any of the listed values. An empty
|
||||
// list means ExtKeyUsageServerAuth. To accept any key usage, include
|
||||
// ExtKeyUsageAny.
|
||||
//
|
||||
// Certificate chains are required to nest these extended key usage values.
|
||||
// (This matches the Windows CryptoAPI behavior, but not the spec.)
|
||||
KeyUsages []ExtKeyUsage
|
||||
// MaxConstraintComparisions is the maximum number of comparisons to
|
||||
// perform when checking a given certificate's name constraints. If
|
||||
// zero, a sensible default is used. This limit prevents pathalogical
|
||||
// zero, a sensible default is used. This limit prevents pathological
|
||||
// certificates from consuming excessive amounts of CPU time when
|
||||
// validating.
|
||||
MaxConstraintComparisions int
|
||||
|
|
@ -544,49 +567,14 @@ func (c *Certificate) checkNameConstraints(count *int,
|
|||
return nil
|
||||
}
|
||||
|
||||
// ekuPermittedBy returns true iff the given extended key usage is permitted by
|
||||
// the given EKU from a certificate. Normally, this would be a simple
|
||||
// comparison plus a special case for the “any” EKU. But, in order to support
|
||||
// existing certificates, some exceptions are made.
|
||||
func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
|
||||
if certEKU == ExtKeyUsageAny || eku == certEKU {
|
||||
return true
|
||||
}
|
||||
|
||||
// Some exceptions are made to support existing certificates. Firstly,
|
||||
// the ServerAuth and SGC EKUs are treated as a group.
|
||||
mapServerAuthEKUs := func(eku ExtKeyUsage) ExtKeyUsage {
|
||||
if eku == ExtKeyUsageNetscapeServerGatedCrypto || eku == ExtKeyUsageMicrosoftServerGatedCrypto {
|
||||
return ExtKeyUsageServerAuth
|
||||
}
|
||||
return eku
|
||||
}
|
||||
|
||||
eku = mapServerAuthEKUs(eku)
|
||||
certEKU = mapServerAuthEKUs(certEKU)
|
||||
|
||||
if eku == certEKU ||
|
||||
// ServerAuth in a CA permits ClientAuth in the leaf.
|
||||
(eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
|
||||
// Any CA may issue an OCSP responder certificate.
|
||||
eku == ExtKeyUsageOCSPSigning ||
|
||||
// Code-signing CAs can use Microsoft's commercial and
|
||||
// kernel-mode EKUs.
|
||||
((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isValid performs validity checks on c given that it is a candidate to append
|
||||
// to the chain in currentChain.
|
||||
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
|
||||
if len(c.UnhandledCriticalExtensions) > 0 {
|
||||
if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 {
|
||||
return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]}
|
||||
}
|
||||
|
||||
if len(currentChain) > 0 {
|
||||
if !opts.DisableNameChecks && len(currentChain) > 0 {
|
||||
child := currentChain[len(currentChain)-1]
|
||||
if !bytes.Equal(child.RawIssuer, c.RawSubject) {
|
||||
return CertificateInvalidError{c, NameMismatch, ""}
|
||||
|
|
@ -617,24 +605,22 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||
leaf = currentChain[0]
|
||||
}
|
||||
|
||||
if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() {
|
||||
sanExtension, ok := leaf.getSANExtension()
|
||||
if !ok {
|
||||
// This is the deprecated, legacy case of depending on
|
||||
// the CN as a hostname. Chains modern enough to be
|
||||
// using name constraints should not be depending on
|
||||
// CNs.
|
||||
return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
|
||||
}
|
||||
|
||||
err := forEachSAN(sanExtension, func(tag int, data []byte) error {
|
||||
checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints()
|
||||
if checkNameConstraints && leaf.commonNameAsHostname() {
|
||||
// This is the deprecated, legacy case of depending on the commonName as
|
||||
// a hostname. We don't enforce name constraints against the CN, but
|
||||
// VerifyHostname will look for hostnames in there if there are no SANs.
|
||||
// In order to ensure VerifyHostname will not accept an unchecked name,
|
||||
// return an error here.
|
||||
return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
|
||||
} else if checkNameConstraints && leaf.hasSANExtension() {
|
||||
err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
|
||||
switch tag {
|
||||
case nameTypeEmail:
|
||||
name := string(data)
|
||||
mailbox, ok := parseRFC2821Mailbox(name)
|
||||
if !ok {
|
||||
// This certificate should not have parsed.
|
||||
return errors.New("x509: internal error: rfc822Name SAN failed to parse")
|
||||
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
|
||||
|
|
@ -646,6 +632,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||
|
||||
case nameTypeDNS:
|
||||
name := string(data)
|
||||
if _, ok := domainToReverseLabels(name); !ok {
|
||||
return fmt.Errorf("x509: cannot parse dnsName %q", name)
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
|
||||
func(parsedName, constraint interface{}) (bool, error) {
|
||||
return matchDomainConstraint(parsedName.(string), constraint.(string))
|
||||
|
|
@ -692,59 +682,6 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||
}
|
||||
}
|
||||
|
||||
checkEKUs := certType == intermediateCertificate
|
||||
|
||||
// If no extended key usages are specified, then all are acceptable.
|
||||
if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
|
||||
checkEKUs = false
|
||||
}
|
||||
|
||||
// If the “any” key usage is permitted, then no more checks are needed.
|
||||
if checkEKUs {
|
||||
for _, caEKU := range c.ExtKeyUsage {
|
||||
comparisonCount++
|
||||
if caEKU == ExtKeyUsageAny {
|
||||
checkEKUs = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if checkEKUs {
|
||||
NextEKU:
|
||||
for _, eku := range leaf.ExtKeyUsage {
|
||||
if comparisonCount > maxConstraintComparisons {
|
||||
return CertificateInvalidError{c, TooManyConstraints, ""}
|
||||
}
|
||||
|
||||
for _, caEKU := range c.ExtKeyUsage {
|
||||
comparisonCount++
|
||||
if ekuPermittedBy(eku, caEKU) {
|
||||
continue NextEKU
|
||||
}
|
||||
}
|
||||
|
||||
oid, _ := oidFromExtKeyUsage(eku)
|
||||
return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", oid)}
|
||||
}
|
||||
|
||||
NextUnknownEKU:
|
||||
for _, eku := range leaf.UnknownExtKeyUsage {
|
||||
if comparisonCount > maxConstraintComparisons {
|
||||
return CertificateInvalidError{c, TooManyConstraints, ""}
|
||||
}
|
||||
|
||||
for _, caEKU := range c.UnknownExtKeyUsage {
|
||||
comparisonCount++
|
||||
if caEKU.Equal(eku) {
|
||||
continue NextUnknownEKU
|
||||
}
|
||||
}
|
||||
|
||||
return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", eku)}
|
||||
}
|
||||
}
|
||||
|
||||
// KeyUsage status flags are ignored. From Engineering Security, Peter
|
||||
// Gutmann: A European government CA marked its signing certificates as
|
||||
// being valid for encryption only, but no-one noticed. Another
|
||||
|
|
@ -766,7 +703,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
|||
return CertificateInvalidError{c, NotAuthorizedToSign, ""}
|
||||
}
|
||||
|
||||
if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
||||
if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
||||
numIntermediates := len(currentChain) - 1
|
||||
if numIntermediates > c.MaxPathLen {
|
||||
return CertificateInvalidError{c, TooManyIntermediates, ""}
|
||||
|
|
@ -833,36 +770,6 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
|
|||
}
|
||||
}
|
||||
|
||||
requestedKeyUsages := make([]ExtKeyUsage, len(opts.KeyUsages))
|
||||
copy(requestedKeyUsages, opts.KeyUsages)
|
||||
if len(requestedKeyUsages) == 0 {
|
||||
requestedKeyUsages = append(requestedKeyUsages, ExtKeyUsageServerAuth)
|
||||
}
|
||||
|
||||
// If no key usages are specified, then any are acceptable.
|
||||
checkEKU := len(c.ExtKeyUsage) > 0
|
||||
|
||||
for _, eku := range requestedKeyUsages {
|
||||
if eku == ExtKeyUsageAny {
|
||||
checkEKU = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if checkEKU {
|
||||
NextUsage:
|
||||
for _, eku := range requestedKeyUsages {
|
||||
for _, leafEKU := range c.ExtKeyUsage {
|
||||
if ekuPermittedBy(eku, leafEKU) {
|
||||
continue NextUsage
|
||||
}
|
||||
}
|
||||
|
||||
oid, _ := oidFromExtKeyUsage(eku)
|
||||
return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)}
|
||||
}
|
||||
}
|
||||
|
||||
var candidateChains [][]*Certificate
|
||||
if opts.Roots.contains(c) {
|
||||
candidateChains = append(candidateChains, []*Certificate{c})
|
||||
|
|
@ -872,7 +779,29 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
|
|||
}
|
||||
}
|
||||
|
||||
return candidateChains, nil
|
||||
keyUsages := opts.KeyUsages
|
||||
if len(keyUsages) == 0 {
|
||||
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
|
||||
}
|
||||
|
||||
// If any key usage is acceptable then we're done.
|
||||
for _, usage := range keyUsages {
|
||||
if usage == ExtKeyUsageAny {
|
||||
return candidateChains, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, candidate := range candidateChains {
|
||||
if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) {
|
||||
chains = append(chains, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
if len(chains) == 0 {
|
||||
return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
|
||||
}
|
||||
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
|
||||
|
|
@ -940,6 +869,64 @@ nextIntermediate:
|
|||
return
|
||||
}
|
||||
|
||||
// validHostname returns whether host is a valid hostname that can be matched or
|
||||
// matched against according to RFC 6125 2.2, with some leniency to accomodate
|
||||
// legacy values.
|
||||
func validHostname(host string) bool {
|
||||
host = strings.TrimSuffix(host, ".")
|
||||
|
||||
if len(host) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, part := range strings.Split(host, ".") {
|
||||
if part == "" {
|
||||
// Empty label.
|
||||
return false
|
||||
}
|
||||
if i == 0 && part == "*" {
|
||||
// Only allow full left-most wildcards, as those are the only ones
|
||||
// we match, and matching literal '*' characters is probably never
|
||||
// the expected behavior.
|
||||
continue
|
||||
}
|
||||
for j, c := range part {
|
||||
if 'a' <= c && c <= 'z' {
|
||||
continue
|
||||
}
|
||||
if '0' <= c && c <= '9' {
|
||||
continue
|
||||
}
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
continue
|
||||
}
|
||||
if c == '-' && j != 0 {
|
||||
continue
|
||||
}
|
||||
if c == '_' || c == ':' {
|
||||
// Not valid characters in hostnames, but commonly
|
||||
// found in deployments outside the WebPKI.
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// commonNameAsHostname reports whether the Common Name field should be
|
||||
// considered the hostname that the certificate is valid for. This is a legacy
|
||||
// behavior, disabled if the Subject Alt Name extension is present.
|
||||
//
|
||||
// It applies the strict validHostname check to the Common Name field, so that
|
||||
// certificates without SANs can still be validated against CAs with name
|
||||
// constraints if there is no risk the CN would be matched as a hostname.
|
||||
// See NameConstraintsWithoutSANs and issue 24151.
|
||||
func (c *Certificate) commonNameAsHostname() bool {
|
||||
return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName)
|
||||
}
|
||||
|
||||
func matchHostnames(pattern, host string) bool {
|
||||
host = strings.TrimSuffix(host, ".")
|
||||
pattern = strings.TrimSuffix(pattern, ".")
|
||||
|
|
@ -1020,16 +1007,79 @@ func (c *Certificate) VerifyHostname(h string) error {
|
|||
|
||||
lowered := toLowerCaseASCII(h)
|
||||
|
||||
if c.hasSANExtension() {
|
||||
if c.commonNameAsHostname() {
|
||||
if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
for _, match := range c.DNSNames {
|
||||
if matchHostnames(toLowerCaseASCII(match), lowered) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// If Subject Alt Name is given, we ignore the common name.
|
||||
} else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return HostnameError{c, h}
|
||||
}
|
||||
|
||||
func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
|
||||
usages := make([]ExtKeyUsage, len(keyUsages))
|
||||
copy(usages, keyUsages)
|
||||
|
||||
if len(chain) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
usagesRemaining := len(usages)
|
||||
|
||||
// We walk down the list and cross out any usages that aren't supported
|
||||
// by each certificate. If we cross out all the usages, then the chain
|
||||
// is unacceptable.
|
||||
|
||||
NextCert:
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
cert := chain[i]
|
||||
if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
|
||||
// The certificate doesn't have any extended key usage specified.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if usage == ExtKeyUsageAny {
|
||||
// The certificate is explicitly good for any usage.
|
||||
continue NextCert
|
||||
}
|
||||
}
|
||||
|
||||
const invalidUsage ExtKeyUsage = -1
|
||||
|
||||
NextRequestedUsage:
|
||||
for i, requestedUsage := range usages {
|
||||
if requestedUsage == invalidUsage {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if requestedUsage == usage {
|
||||
continue NextRequestedUsage
|
||||
} else if requestedUsage == ExtKeyUsageServerAuth &&
|
||||
(usage == ExtKeyUsageNetscapeServerGatedCrypto ||
|
||||
usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
|
||||
// In order to support COMODO
|
||||
// certificate chains, we have to
|
||||
// accept Netscape or Microsoft SGC
|
||||
// usages as equal to ServerAuth.
|
||||
continue NextRequestedUsage
|
||||
}
|
||||
}
|
||||
|
||||
usages[i] = invalidUsage
|
||||
usagesRemaining--
|
||||
if usagesRemaining == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,9 +8,39 @@
|
|||
// can be used to override the system default locations for the SSL certificate
|
||||
// file and SSL certificate files directory, respectively.
|
||||
//
|
||||
// This is a fork of the go library crypto/x509 package, it's more relaxed
|
||||
// about certificates that it'll accept, and exports the TBSCertificate
|
||||
// structure.
|
||||
// This is a fork of the Go library crypto/x509 package, primarily adapted for
|
||||
// use with Certificate Transparency. Main areas of difference are:
|
||||
//
|
||||
// - Life as a fork:
|
||||
// - Rename OS-specific cgo code so it doesn't clash with main Go library.
|
||||
// - Use local library imports (asn1, pkix) throughout.
|
||||
// - Add version-specific wrappers for Go version-incompatible code (in
|
||||
// nilref_*_darwin.go, ptr_*_windows.go).
|
||||
// - Laxer certificate parsing:
|
||||
// - Add options to disable various validation checks (times, EKUs etc).
|
||||
// - Use NonFatalErrors type for some errors and continue parsing; this
|
||||
// can be checked with IsFatal(err).
|
||||
// - Support for short bitlength ECDSA curves (in curves.go).
|
||||
// - Certificate Transparency specific function:
|
||||
// - Parsing and marshaling of SCTList extension.
|
||||
// - RemoveSCTList() function for rebuilding CT leaf entry.
|
||||
// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(),
|
||||
// ParseTBSCertificate(), IsPrecertificate()).
|
||||
// - Revocation list processing:
|
||||
// - Detailed CRL parsing (in revoked.go)
|
||||
// - Detailed error recording mechanism (in error.go, errors.go)
|
||||
// - Factor out parseDistributionPoints() for reuse.
|
||||
// - Factor out and generalize GeneralNames parsing (in names.go)
|
||||
// - Fix CRL commenting.
|
||||
// - RPKI support:
|
||||
// - Support for SubjectInfoAccess extension
|
||||
// - Support for RFC3779 extensions (in rpki.go)
|
||||
// - General improvements:
|
||||
// - Export and use OID values throughout.
|
||||
// - Export OIDFromNamedCurve().
|
||||
// - Export SignatureAlgorithmFromAI().
|
||||
// - Add OID value to UnhandledCriticalExtension error.
|
||||
// - Minor typo/lint fixes.
|
||||
package x509
|
||||
|
||||
import (
|
||||
|
|
@ -69,7 +99,16 @@ func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
|
|||
if algo == UnknownPublicKeyAlgorithm {
|
||||
return nil, errors.New("x509: unknown public key algorithm")
|
||||
}
|
||||
return parsePublicKey(algo, &pki)
|
||||
var nfe NonFatalErrors
|
||||
pub, err = parsePublicKey(algo, &pki, &nfe)
|
||||
if err != nil {
|
||||
return pub, err
|
||||
}
|
||||
// Treat non-fatal errors as fatal for this entrypoint.
|
||||
if len(nfe.Errors) > 0 {
|
||||
return nil, nfe.Errors[0]
|
||||
}
|
||||
return pub, nil
|
||||
}
|
||||
|
||||
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
|
||||
|
|
@ -432,10 +471,10 @@ func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
|
|||
// https://tools.ietf.org/html/rfc3447#section-8.1), that the
|
||||
// salt length matches the hash length, and that the trailer
|
||||
// field has the default value.
|
||||
if !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes) ||
|
||||
if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
|
||||
!params.MGF.Algorithm.Equal(oidMGF1) ||
|
||||
!mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
|
||||
!bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes) ||
|
||||
(len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
|
||||
params.TrailerField != 1 {
|
||||
return UnknownSignatureAlgorithm
|
||||
}
|
||||
|
|
@ -500,15 +539,21 @@ func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm
|
|||
// secp521r1 OBJECT IDENTIFIER ::= {
|
||||
// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
|
||||
//
|
||||
// NB: secp256r1 is equivalent to prime256v1
|
||||
// secp192r1 OBJECT IDENTIFIER ::= {
|
||||
// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
|
||||
// prime(1) 1 }
|
||||
//
|
||||
// NB: secp256r1 is equivalent to prime256v1,
|
||||
// secp192r1 is equivalent to ansix9p192r and prime192v1
|
||||
var (
|
||||
OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
|
||||
OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
|
||||
OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
|
||||
OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
|
||||
OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}
|
||||
)
|
||||
|
||||
func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
|
||||
func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve {
|
||||
switch {
|
||||
case oid.Equal(OIDNamedCurveP224):
|
||||
return elliptic.P224()
|
||||
|
|
@ -518,6 +563,9 @@ func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
|
|||
return elliptic.P384()
|
||||
case oid.Equal(OIDNamedCurveP521):
|
||||
return elliptic.P521()
|
||||
case oid.Equal(OIDNamedCurveP192):
|
||||
nfe.AddError(errors.New("insecure curve (secp192r1) specified"))
|
||||
return secp192r1()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -534,6 +582,8 @@ func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
|
|||
return OIDNamedCurveP384, true
|
||||
case elliptic.P521():
|
||||
return OIDNamedCurveP521, true
|
||||
case secp192r1():
|
||||
return OIDNamedCurveP192, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
|
|
@ -737,7 +787,13 @@ type Certificate struct {
|
|||
OCSPServer []string
|
||||
IssuingCertificateURL []string
|
||||
|
||||
// Subject Alternate Name values
|
||||
// Subject Information Access
|
||||
SubjectTimestamps []string
|
||||
SubjectCARepositories []string
|
||||
|
||||
// Subject Alternate Name values. (Note that these values may not be valid
|
||||
// if invalid values were contained within a parsed certificate. For
|
||||
// example, an element of DNSNames may not be a valid DNS domain name.)
|
||||
DNSNames []string
|
||||
EmailAddresses []string
|
||||
IPAddresses []net.IP
|
||||
|
|
@ -759,6 +815,9 @@ type Certificate struct {
|
|||
|
||||
PolicyIdentifiers []asn1.ObjectIdentifier
|
||||
|
||||
RPKIAddressRanges []*IPAddressFamilyBlocks
|
||||
RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers
|
||||
|
||||
// Certificate Transparency SCT extension contents; this is a TLS-encoded
|
||||
// SignedCertificateTimestampList (RFC 6962 s3.3).
|
||||
RawSCT []byte
|
||||
|
|
@ -792,6 +851,20 @@ func (c *Certificate) Equal(other *Certificate) bool {
|
|||
return bytes.Equal(c.Raw, other.Raw)
|
||||
}
|
||||
|
||||
// IsPrecertificate checks whether the certificate is a precertificate, by
|
||||
// checking for the presence of the CT Poison extension.
|
||||
func (c *Certificate) IsPrecertificate() bool {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
for _, ext := range c.Extensions {
|
||||
if ext.Id.Equal(OIDExtensionCTPoison) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Certificate) hasSANExtension() bool {
|
||||
return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions)
|
||||
}
|
||||
|
|
@ -880,23 +953,17 @@ func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature
|
|||
}
|
||||
|
||||
func (c *Certificate) hasNameConstraints() bool {
|
||||
for _, e := range c.Extensions {
|
||||
if len(e.Id) == 4 && e.Id[0] == OIDExtensionNameConstraints[0] && e.Id[1] == OIDExtensionNameConstraints[1] && e.Id[2] == OIDExtensionNameConstraints[2] && e.Id[3] == OIDExtensionNameConstraints[3] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return oidInExtensions(OIDExtensionNameConstraints, c.Extensions)
|
||||
}
|
||||
|
||||
func (c *Certificate) getSANExtension() ([]byte, bool) {
|
||||
func (c *Certificate) getSANExtension() []byte {
|
||||
for _, e := range c.Extensions {
|
||||
if len(e.Id) == 4 && e.Id[0] == OIDExtensionSubjectAltName[0] && e.Id[1] == OIDExtensionSubjectAltName[1] && e.Id[2] == OIDExtensionSubjectAltName[2] && e.Id[3] == OIDExtensionSubjectAltName[3] {
|
||||
return e.Value, true
|
||||
if e.Id.Equal(OIDExtensionSubjectAltName) {
|
||||
return e.Value
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error {
|
||||
|
|
@ -995,6 +1062,50 @@ func (h UnhandledCriticalExtension) Error() string {
|
|||
return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
|
||||
}
|
||||
|
||||
// removeExtension takes a DER-encoded TBSCertificate, removes the extension
|
||||
// specified by oid (preserving the order of other extensions), and returns the
|
||||
// result still as a DER-encoded TBSCertificate. This function will fail if
|
||||
// there is not exactly 1 extension of the type specified by the oid present.
|
||||
func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) {
|
||||
var tbs tbsCertificate
|
||||
rest, err := asn1.Unmarshal(tbsData, &tbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
|
||||
} else if rLen := len(rest); rLen > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
|
||||
}
|
||||
extAt := -1
|
||||
for i, ext := range tbs.Extensions {
|
||||
if ext.Id.Equal(oid) {
|
||||
if extAt != -1 {
|
||||
return nil, errors.New("multiple extensions of specified type present")
|
||||
}
|
||||
extAt = i
|
||||
}
|
||||
}
|
||||
if extAt == -1 {
|
||||
return nil, errors.New("no extension of specified type present")
|
||||
}
|
||||
tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...)
|
||||
// Clear out the asn1.RawContent so the re-marshal operation sees the
|
||||
// updated structure (rather than just copying the out-of-date DER data).
|
||||
tbs.Raw = nil
|
||||
|
||||
data, err := asn1.Marshal(tbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT
|
||||
// extension that contains the SCT list (preserving the order of other
|
||||
// extensions), and returns the result still as a DER-encoded TBSCertificate.
|
||||
// This function will fail if there is not exactly 1 CT SCT extension present.
|
||||
func RemoveSCTList(tbsData []byte) ([]byte, error) {
|
||||
return removeExtension(tbsData, OIDExtensionCTSCT)
|
||||
}
|
||||
|
||||
// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison
|
||||
// extension (preserving the order of other extensions), and returns the result
|
||||
// still as a DER-encoded TBSCertificate. This function will fail if there is
|
||||
|
|
@ -1019,27 +1130,18 @@ func RemoveCTPoison(tbsData []byte) ([]byte, error) {
|
|||
// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
|
||||
// intermediate.
|
||||
func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
|
||||
data, err := removeExtension(tbsData, OIDExtensionCTPoison)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tbs tbsCertificate
|
||||
rest, err := asn1.Unmarshal(tbsData, &tbs)
|
||||
rest, err := asn1.Unmarshal(data, &tbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
|
||||
} else if rLen := len(rest); rLen > 0 {
|
||||
return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
|
||||
}
|
||||
poisonAt := -1
|
||||
for i, ext := range tbs.Extensions {
|
||||
if ext.Id.Equal(OIDExtensionCTPoison) {
|
||||
if poisonAt != -1 {
|
||||
return nil, errors.New("multiple CT poison extensions present")
|
||||
}
|
||||
poisonAt = i
|
||||
}
|
||||
}
|
||||
if poisonAt == -1 {
|
||||
return nil, errors.New("no CT poison extension present")
|
||||
}
|
||||
tbs.Extensions = append(tbs.Extensions[:poisonAt], tbs.Extensions[poisonAt+1:]...)
|
||||
tbs.Raw = nil
|
||||
|
||||
if preIssuer != nil {
|
||||
// Update the precert's Issuer field. Use the RawIssuer rather than the
|
||||
|
|
@ -1092,9 +1194,13 @@ func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
|
|||
}
|
||||
tbs.Extensions = append(tbs.Extensions, authKeyIDExt)
|
||||
}
|
||||
|
||||
// Clear out the asn1.RawContent so the re-marshal operation sees the
|
||||
// updated structure (rather than just copying the out-of-date DER data).
|
||||
tbs.Raw = nil
|
||||
}
|
||||
|
||||
data, err := asn1.Marshal(tbs)
|
||||
data, err = asn1.Marshal(tbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
|
||||
}
|
||||
|
|
@ -1120,7 +1226,7 @@ const (
|
|||
)
|
||||
|
||||
// RFC 5280, 4.2.2.1
|
||||
type authorityInfoAccess struct {
|
||||
type accessDescription struct {
|
||||
Method asn1.ObjectIdentifier
|
||||
Location asn1.RawValue
|
||||
}
|
||||
|
|
@ -1137,27 +1243,32 @@ type distributionPointName struct {
|
|||
RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
|
||||
}
|
||||
|
||||
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
|
||||
func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) {
|
||||
asn1Data := keyData.PublicKey.RightAlign()
|
||||
switch algo {
|
||||
case RSA:
|
||||
// RSA public keys must have a NULL in the parameters
|
||||
// (https://tools.ietf.org/html/rfc3279#section-2.3.1).
|
||||
if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
|
||||
return nil, errors.New("x509: RSA key missing NULL parameters")
|
||||
nfe.AddError(errors.New("x509: RSA key missing NULL parameters"))
|
||||
}
|
||||
|
||||
p := new(pkcs1PublicKey)
|
||||
rest, err := asn1.Unmarshal(asn1Data, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var laxErr error
|
||||
rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax")
|
||||
if laxErr != nil {
|
||||
return nil, laxErr
|
||||
}
|
||||
nfe.AddError(err)
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after RSA public key")
|
||||
}
|
||||
|
||||
if p.N.Sign() <= 0 {
|
||||
return nil, errors.New("x509: RSA modulus is not a positive number")
|
||||
nfe.AddError(errors.New("x509: RSA modulus is not a positive number"))
|
||||
}
|
||||
if p.E <= 0 {
|
||||
return nil, errors.New("x509: RSA public exponent is not a positive number")
|
||||
|
|
@ -1172,7 +1283,12 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
|
|||
var p *big.Int
|
||||
rest, err := asn1.Unmarshal(asn1Data, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var laxErr error
|
||||
rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax")
|
||||
if laxErr != nil {
|
||||
return nil, laxErr
|
||||
}
|
||||
nfe.AddError(err)
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after DSA public key")
|
||||
|
|
@ -1203,14 +1319,14 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{
|
|||
namedCurveOID := new(asn1.ObjectIdentifier)
|
||||
rest, err := asn1.Unmarshal(paramsData, namedCurveOID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.New("x509: failed to parse ECDSA parameters as named curve")
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after ECDSA parameters")
|
||||
}
|
||||
namedCurve := namedCurveFromOID(*namedCurveOID)
|
||||
namedCurve := namedCurveFromOID(*namedCurveOID, nfe)
|
||||
if namedCurve == nil {
|
||||
return nil, errors.New("x509: unsupported elliptic curve")
|
||||
return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID)
|
||||
}
|
||||
x, y := elliptic.Unmarshal(namedCurve, asn1Data)
|
||||
if x == nil {
|
||||
|
|
@ -1235,7 +1351,7 @@ type NonFatalErrors struct {
|
|||
Errors []error
|
||||
}
|
||||
|
||||
// Adds an error to the list of errors contained by NonFatalErrors.
|
||||
// AddError adds an error to the list of errors contained by NonFatalErrors.
|
||||
func (e *NonFatalErrors) AddError(err error) {
|
||||
e.Errors = append(e.Errors, err)
|
||||
}
|
||||
|
|
@ -1250,11 +1366,25 @@ func (e NonFatalErrors) Error() string {
|
|||
return r
|
||||
}
|
||||
|
||||
// Returns true if |e| contains at least one error
|
||||
// HasError returns true if |e| contains at least one error
|
||||
func (e *NonFatalErrors) HasError() bool {
|
||||
return len(e.Errors) > 0
|
||||
}
|
||||
|
||||
// IsFatal indicates whether an error is fatal.
|
||||
func IsFatal(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if _, ok := err.(NonFatalErrors); ok {
|
||||
return false
|
||||
}
|
||||
if errs, ok := err.(*Errors); ok {
|
||||
return errs.Fatal()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func parseDistributionPoints(data []byte, crldp *[]string) error {
|
||||
// CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
|
||||
//
|
||||
|
|
@ -1337,17 +1467,9 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
|
|||
err = forEachSAN(value, func(tag int, data []byte) error {
|
||||
switch tag {
|
||||
case nameTypeEmail:
|
||||
mailbox := string(data)
|
||||
if _, ok := parseRFC2821Mailbox(mailbox); !ok {
|
||||
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
|
||||
}
|
||||
emailAddresses = append(emailAddresses, mailbox)
|
||||
emailAddresses = append(emailAddresses, string(data))
|
||||
case nameTypeDNS:
|
||||
domain := string(data)
|
||||
if _, ok := domainToReverseLabels(domain); !ok {
|
||||
return fmt.Errorf("x509: cannot parse dnsName %q", string(data))
|
||||
}
|
||||
dnsNames = append(dnsNames, domain)
|
||||
dnsNames = append(dnsNames, string(data))
|
||||
case nameTypeURI:
|
||||
uri, err := url.Parse(string(data))
|
||||
if err != nil {
|
||||
|
|
@ -1364,7 +1486,7 @@ func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddres
|
|||
case net.IPv4len, net.IPv6len:
|
||||
ipAddresses = append(ipAddresses, data)
|
||||
default:
|
||||
nfe.AddError(fmt.Errorf("x509: certificate contained IP address of length %d : %v", len(data), data))
|
||||
nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1399,7 +1521,7 @@ func isValidIPMask(mask []byte) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandled bool, err error) {
|
||||
func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) {
|
||||
// RFC 5280, 4.2.1.10
|
||||
|
||||
// NameConstraints ::= SEQUENCE {
|
||||
|
|
@ -1466,7 +1588,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||
trimmedDomain = trimmedDomain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)
|
||||
nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain))
|
||||
}
|
||||
dnsNames = append(dnsNames, domain)
|
||||
|
||||
|
|
@ -1503,7 +1625,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||
// it specifies an exact mailbox name.
|
||||
if strings.Contains(constraint, "@") {
|
||||
if _, ok := parseRFC2821Mailbox(constraint); !ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
||||
nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
|
||||
}
|
||||
} else {
|
||||
// Otherwise it's a domain name.
|
||||
|
|
@ -1512,7 +1634,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||
domain = domain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(domain); !ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
||||
nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
|
||||
}
|
||||
}
|
||||
emails = append(emails, constraint)
|
||||
|
|
@ -1536,7 +1658,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
|||
trimmedDomain = trimmedDomain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain)
|
||||
nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain))
|
||||
}
|
||||
uriDomains = append(uriDomains, domain)
|
||||
|
||||
|
|
@ -1575,7 +1697,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
out.PublicKeyAlgorithm =
|
||||
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
|
||||
var err error
|
||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
|
||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1585,12 +1707,22 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
|
||||
var issuer, subject pkix.RDNSequence
|
||||
if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
|
||||
return nil, err
|
||||
var laxErr error
|
||||
rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax")
|
||||
if laxErr != nil {
|
||||
return nil, laxErr
|
||||
}
|
||||
nfe.AddError(err)
|
||||
} else if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 subject")
|
||||
}
|
||||
if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
|
||||
return nil, err
|
||||
var laxErr error
|
||||
rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax")
|
||||
if laxErr != nil {
|
||||
return nil, laxErr
|
||||
}
|
||||
nfe.AddError(err)
|
||||
} else if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 subject")
|
||||
}
|
||||
|
|
@ -1651,7 +1783,7 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
}
|
||||
|
||||
case OIDExtensionNameConstraints[3]:
|
||||
unhandled, err = parseNameConstraintsExtension(out, e)
|
||||
unhandled, err = parseNameConstraintsExtension(out, e, &nfe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1682,10 +1814,21 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
// KeyPurposeId ::= OBJECT IDENTIFIER
|
||||
|
||||
var keyUsage []asn1.ObjectIdentifier
|
||||
if rest, err := asn1.Unmarshal(e.Value, &keyUsage); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
|
||||
if len(e.Value) == 0 {
|
||||
nfe.AddError(errors.New("x509: empty ExtendedKeyUsage"))
|
||||
} else {
|
||||
rest, err := asn1.Unmarshal(e.Value, &keyUsage)
|
||||
if err != nil {
|
||||
var laxErr error
|
||||
rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax")
|
||||
if laxErr != nil {
|
||||
return nil, laxErr
|
||||
}
|
||||
nfe.AddError(err)
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range keyUsage {
|
||||
|
|
@ -1725,12 +1868,15 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
}
|
||||
} else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
|
||||
// RFC 5280 4.2.2.1: Authority Information Access
|
||||
var aia []authorityInfoAccess
|
||||
var aia []accessDescription
|
||||
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 authority information")
|
||||
}
|
||||
if len(aia) == 0 {
|
||||
nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension"))
|
||||
}
|
||||
|
||||
for _, v := range aia {
|
||||
// GeneralName: uniformResourceIdentifier [6] IA5String
|
||||
|
|
@ -1743,6 +1889,34 @@ func parseCertificate(in *certificate) (*Certificate, error) {
|
|||
out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
|
||||
}
|
||||
}
|
||||
} else if e.Id.Equal(OIDExtensionSubjectInfoAccess) {
|
||||
// RFC 5280 4.2.2.2: Subject Information Access
|
||||
var sia []accessDescription
|
||||
if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) != 0 {
|
||||
return nil, errors.New("x509: trailing data after X.509 subject information")
|
||||
}
|
||||
if len(sia) == 0 {
|
||||
nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension"))
|
||||
}
|
||||
|
||||
for _, v := range sia {
|
||||
// TODO(drysdale): cope with non-URI types of GeneralName
|
||||
// GeneralName: uniformResourceIdentifier [6] IA5String
|
||||
if v.Location.Tag != 6 {
|
||||
continue
|
||||
}
|
||||
if v.Method.Equal(OIDSubjectInfoAccessTimestamp) {
|
||||
out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes))
|
||||
} else if v.Method.Equal(OIDSubjectInfoAccessCARepo) {
|
||||
out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes))
|
||||
}
|
||||
}
|
||||
} else if e.Id.Equal(OIDExtensionIPPrefixList) {
|
||||
out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe)
|
||||
} else if e.Id.Equal(OIDExtensionASList) {
|
||||
out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe)
|
||||
} else if e.Id.Equal(OIDExtensionCTSCT) {
|
||||
if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
|
||||
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
|
||||
|
|
@ -1787,6 +1961,8 @@ func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
|
|||
}
|
||||
|
||||
// ParseCertificate parses a single certificate from the given ASN.1 DER data.
|
||||
// This function can return both a Certificate and an error (in which case the
|
||||
// error will be of type NonFatalErrors).
|
||||
func ParseCertificate(asn1Data []byte) (*Certificate, error) {
|
||||
var cert certificate
|
||||
rest, err := asn1.Unmarshal(asn1Data, &cert)
|
||||
|
|
@ -1802,6 +1978,8 @@ func ParseCertificate(asn1Data []byte) (*Certificate, error) {
|
|||
|
||||
// ParseCertificates parses one or more certificates from the given ASN.1 DER
|
||||
// data. The certificates must be concatenated with no intermediate padding.
|
||||
// This function can return both a slice of Certificate and an error (in which
|
||||
// case the error will be of type NonFatalErrors).
|
||||
func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
|
||||
var v []*certificate
|
||||
|
||||
|
|
@ -1815,15 +1993,23 @@ func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
|
|||
v = append(v, cert)
|
||||
}
|
||||
|
||||
var nfe NonFatalErrors
|
||||
ret := make([]*Certificate, len(v))
|
||||
for i, ci := range v {
|
||||
cert, err := parseCertificate(ci)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errs, ok := err.(NonFatalErrors); !ok {
|
||||
return nil, err
|
||||
} else {
|
||||
nfe.Errors = append(nfe.Errors, errs.Errors...)
|
||||
}
|
||||
}
|
||||
ret[i] = cert
|
||||
}
|
||||
|
||||
if nfe.HasError() {
|
||||
return ret, nfe
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
|
@ -1875,15 +2061,23 @@ var (
|
|||
|
||||
OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
|
||||
OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
|
||||
|
||||
// OIDExtensionCTPoison is defined in RFC 6962 s3.1.
|
||||
OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
|
||||
// OIDExtensionCTSCT is defined in RFC 6962 s3.3.
|
||||
OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
|
||||
// OIDExtensionIPPrefixList is defined in RFC 3779 s2.
|
||||
OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7}
|
||||
// OIDExtensionASList is defined in RFC 3779 s3.
|
||||
OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8}
|
||||
)
|
||||
|
||||
var (
|
||||
OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
|
||||
OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
|
||||
OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3}
|
||||
OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5}
|
||||
OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0}
|
||||
)
|
||||
|
||||
// oidInExtensions returns whether an extension with the given oid exists in
|
||||
|
|
@ -1932,7 +2126,7 @@ func isIA5String(s string) error {
|
|||
}
|
||||
|
||||
func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
|
||||
ret = make([]pkix.Extension, 11 /* maximum number of elements. */)
|
||||
ret = make([]pkix.Extension, 12 /* maximum number of elements. */)
|
||||
n := 0
|
||||
|
||||
if template.KeyUsage != 0 &&
|
||||
|
|
@ -2017,15 +2211,15 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
|
|||
if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
|
||||
!oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
|
||||
ret[n].Id = OIDExtensionAuthorityInfoAccess
|
||||
var aiaValues []authorityInfoAccess
|
||||
var aiaValues []accessDescription
|
||||
for _, name := range template.OCSPServer {
|
||||
aiaValues = append(aiaValues, authorityInfoAccess{
|
||||
aiaValues = append(aiaValues, accessDescription{
|
||||
Method: OIDAuthorityInfoAccessOCSP,
|
||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
||||
})
|
||||
}
|
||||
for _, name := range template.IssuingCertificateURL {
|
||||
aiaValues = append(aiaValues, authorityInfoAccess{
|
||||
aiaValues = append(aiaValues, accessDescription{
|
||||
Method: OIDAuthorityInfoAccessIssuers,
|
||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
|
||||
})
|
||||
|
|
@ -2037,6 +2231,29 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
|
|||
n++
|
||||
}
|
||||
|
||||
if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 &&
|
||||
!oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) {
|
||||
ret[n].Id = OIDExtensionSubjectInfoAccess
|
||||
var siaValues []accessDescription
|
||||
for _, ts := range template.SubjectTimestamps {
|
||||
siaValues = append(siaValues, accessDescription{
|
||||
Method: OIDSubjectInfoAccessTimestamp,
|
||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)},
|
||||
})
|
||||
}
|
||||
for _, repo := range template.SubjectCARepositories {
|
||||
siaValues = append(siaValues, accessDescription{
|
||||
Method: OIDSubjectInfoAccessCARepo,
|
||||
Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)},
|
||||
})
|
||||
}
|
||||
ret[n].Value, err = asn1.Marshal(siaValues)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
|
||||
!oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
|
||||
ret[n].Id = OIDExtensionSubjectAltName
|
||||
|
|
@ -2203,7 +2420,8 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId
|
|||
}
|
||||
|
||||
// Adding another extension here? Remember to update the maximum number
|
||||
// of elements in the make() at the top of the function.
|
||||
// of elements in the make() at the top of the function and the list of
|
||||
// template fields used in CreateCertificate documentation.
|
||||
|
||||
return append(ret[:n], template.ExtraExtensions...), nil
|
||||
}
|
||||
|
|
@ -2290,12 +2508,26 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori
|
|||
var emptyASN1Subject = []byte{0x30, 0}
|
||||
|
||||
// CreateCertificate creates a new X.509v3 certificate based on a template.
|
||||
// The following members of template are used: AuthorityKeyId,
|
||||
// BasicConstraintsValid, DNSNames, ExcludedDNSDomains, ExtKeyUsage,
|
||||
// IsCA, KeyUsage, MaxPathLen, MaxPathLenZero, NotAfter, NotBefore,
|
||||
// PermittedDNSDomains, PermittedDNSDomainsCritical, SerialNumber,
|
||||
// SignatureAlgorithm, Subject, SubjectKeyId, UnknownExtKeyUsage,
|
||||
// and RawSCT.
|
||||
// The following members of template are used:
|
||||
// - SerialNumber
|
||||
// - Subject
|
||||
// - NotBefore, NotAfter
|
||||
// - SignatureAlgorithm
|
||||
// - For extensions:
|
||||
// - KeyUsage
|
||||
// - ExtKeyUsage, UnknownExtKeyUsage
|
||||
// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero
|
||||
// - SubjectKeyId
|
||||
// - AuthorityKeyId
|
||||
// - OCSPServer, IssuingCertificateURL
|
||||
// - SubjectTimestamps, SubjectCARepositories
|
||||
// - DNSNames, EmailAddresses, IPAddresses, URIs
|
||||
// - PolicyIdentifiers
|
||||
// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical,
|
||||
// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains
|
||||
// - CRLDistributionPoints
|
||||
// - RawSCT, SCTList
|
||||
// - ExtraExtensions
|
||||
//
|
||||
// The certificate is signed by parent. If parent is equal to template then the
|
||||
// certificate is self-signed. The parameter pub is the public key of the
|
||||
|
|
@ -2569,7 +2801,7 @@ func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawVal
|
|||
return rawAttributes, nil
|
||||
}
|
||||
|
||||
// parseRawAttributes Unmarshals RawAttributes intos AttributeTypeAndValueSETs.
|
||||
// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
|
||||
func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
|
||||
var attributes []pkix.AttributeTypeAndValueSET
|
||||
for _, rawAttr := range rawAttributes {
|
||||
|
|
@ -2617,9 +2849,18 @@ func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error)
|
|||
}
|
||||
|
||||
// CreateCertificateRequest creates a new certificate request based on a
|
||||
// template. The following members of template are used: Attributes, DNSNames,
|
||||
// EmailAddresses, ExtraExtensions, IPAddresses, URIs, SignatureAlgorithm, and
|
||||
// Subject. The private key is the private key of the signer.
|
||||
// template. The following members of template are used:
|
||||
//
|
||||
// - Attributes
|
||||
// - DNSNames
|
||||
// - EmailAddresses
|
||||
// - ExtraExtensions
|
||||
// - IPAddresses
|
||||
// - URIs
|
||||
// - SignatureAlgorithm
|
||||
// - Subject
|
||||
//
|
||||
// The private key is the private key of the signer.
|
||||
//
|
||||
// The returned slice is the certificate request in DER encoding.
|
||||
//
|
||||
|
|
@ -2662,70 +2903,57 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
|
|||
|
||||
extensions = append(extensions, template.ExtraExtensions...)
|
||||
|
||||
var attributes []pkix.AttributeTypeAndValueSET
|
||||
attributes = append(attributes, template.Attributes...)
|
||||
// Make a copy of template.Attributes because we may alter it below.
|
||||
attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
|
||||
for _, attr := range template.Attributes {
|
||||
values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
|
||||
copy(values, attr.Value)
|
||||
attributes = append(attributes, pkix.AttributeTypeAndValueSET{
|
||||
Type: attr.Type,
|
||||
Value: values,
|
||||
})
|
||||
}
|
||||
|
||||
extensionsAppended := false
|
||||
if len(extensions) > 0 {
|
||||
// specifiedExtensions contains all the extensions that we
|
||||
// found specified via template.Attributes.
|
||||
specifiedExtensions := make(map[string]bool)
|
||||
|
||||
for _, atvSet := range template.Attributes {
|
||||
if !atvSet.Type.Equal(oidExtensionRequest) {
|
||||
// Append the extensions to an existing attribute if possible.
|
||||
for _, atvSet := range attributes {
|
||||
if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// specifiedExtensions contains all the extensions that we
|
||||
// found specified via template.Attributes.
|
||||
specifiedExtensions := make(map[string]bool)
|
||||
|
||||
for _, atvs := range atvSet.Value {
|
||||
for _, atv := range atvs {
|
||||
specifiedExtensions[atv.Type.String()] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atvs := make([]pkix.AttributeTypeAndValue, 0, len(extensions))
|
||||
for _, e := range extensions {
|
||||
if specifiedExtensions[e.Id.String()] {
|
||||
// Attributes already contained a value for
|
||||
// this extension and it takes priority.
|
||||
continue
|
||||
newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
|
||||
newValue = append(newValue, atvSet.Value[0]...)
|
||||
|
||||
for _, e := range extensions {
|
||||
if specifiedExtensions[e.Id.String()] {
|
||||
// Attributes already contained a value for
|
||||
// this extension and it takes priority.
|
||||
continue
|
||||
}
|
||||
|
||||
newValue = append(newValue, pkix.AttributeTypeAndValue{
|
||||
// There is no place for the critical
|
||||
// flag in an AttributeTypeAndValue.
|
||||
Type: e.Id,
|
||||
Value: e.Value,
|
||||
})
|
||||
}
|
||||
|
||||
atvs = append(atvs, pkix.AttributeTypeAndValue{
|
||||
// There is no place for the critical flag in a CSR.
|
||||
Type: e.Id,
|
||||
Value: e.Value,
|
||||
})
|
||||
}
|
||||
|
||||
// Append the extensions to an existing attribute if possible.
|
||||
appended := false
|
||||
for _, atvSet := range attributes {
|
||||
if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
atvSet.Value[0] = append(atvSet.Value[0], atvs...)
|
||||
appended = true
|
||||
atvSet.Value[0] = newValue
|
||||
extensionsAppended = true
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise, add a new attribute for the extensions.
|
||||
if !appended {
|
||||
attributes = append(attributes, pkix.AttributeTypeAndValueSET{
|
||||
Type: oidExtensionRequest,
|
||||
Value: [][]pkix.AttributeTypeAndValue{
|
||||
atvs,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
asn1Subject := template.RawSubject
|
||||
if len(asn1Subject) == 0 {
|
||||
asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rawAttributes, err := newRawAttributes(attributes)
|
||||
|
|
@ -2733,6 +2961,38 @@ func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv
|
|||
return
|
||||
}
|
||||
|
||||
// If not included in attributes, add a new attribute for the
|
||||
// extensions.
|
||||
if len(extensions) > 0 && !extensionsAppended {
|
||||
attr := struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value [][]pkix.Extension `asn1:"set"`
|
||||
}{
|
||||
Type: oidExtensionRequest,
|
||||
Value: [][]pkix.Extension{extensions},
|
||||
}
|
||||
|
||||
b, err := asn1.Marshal(attr)
|
||||
if err != nil {
|
||||
return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
|
||||
}
|
||||
|
||||
var rawValue asn1.RawValue
|
||||
if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawAttributes = append(rawAttributes, rawValue)
|
||||
}
|
||||
|
||||
asn1Subject := template.RawSubject
|
||||
if len(asn1Subject) == 0 {
|
||||
asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tbsCSR := tbsCertificateRequest{
|
||||
Version: 0, // PKCS #10, RFC 2986
|
||||
Subject: asn1.RawValue{FullBytes: asn1Subject},
|
||||
|
|
@ -2789,7 +3049,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
|
|||
|
||||
func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
|
||||
out := &CertificateRequest{
|
||||
Raw: in.Raw,
|
||||
Raw: in.Raw,
|
||||
RawTBSCertificateRequest: in.TBSCSR.Raw,
|
||||
RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
|
||||
RawSubject: in.TBSCSR.Subject.FullBytes,
|
||||
|
|
@ -2804,10 +3064,15 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
|
|||
}
|
||||
|
||||
var err error
|
||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey)
|
||||
var nfe NonFatalErrors
|
||||
out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Treat non-fatal errors as fatal here.
|
||||
if len(nfe.Errors) > 0 {
|
||||
return nil, nfe.Errors[0]
|
||||
}
|
||||
|
||||
var subject pkix.RDNSequence
|
||||
if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
|
||||
|
|
@ -2822,7 +3087,6 @@ func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var nfe NonFatalErrors
|
||||
for _, extension := range out.Extensions {
|
||||
if extension.Id.Equal(OIDExtensionSubjectAltName) {
|
||||
out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)
|
||||
|
|
|
|||
|
|
@ -11,13 +11,12 @@ package main
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
// START CT CHANGES
|
||||
"github.com/google/certificate-transparency-go/x509"
|
||||
"github.com/google/certificate-transparency-go/x509/pkix"
|
||||
// END CT CHANGES
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/google/certificate-transparency-go/x509"
|
||||
"github.com/google/certificate-transparency-go/x509/pkix"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
|||
Loading…
Reference in New Issue