mirror of https://github.com/docker/docs.git
Update godeps
Godeps now has the collection of dependencies from the different cmds Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
This commit is contained in:
parent
2aba401607
commit
a7164b638b
|
@ -2,7 +2,8 @@
|
|||
"ImportPath": "github.com/docker/vetinari",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Packages": [
|
||||
"./..."
|
||||
"github.com/docker/vetinari/cmd/notary",
|
||||
"github.com/docker/vetinari/cmd/vetinari-server"
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
|
@ -14,6 +15,15 @@
|
|||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/BurntSushi/toml",
|
||||
"Rev": "bd2bdf7f18f849530ef7a1c29a4290217cab32a1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/agl/ed25519",
|
||||
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
|
||||
|
@ -53,6 +63,48 @@
|
|||
"ImportPath": "github.com/endophage/gotuf/errors",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/pretty",
|
||||
"Comment": "go.weekly.2011-12-22-18-gbc9499c",
|
||||
"Rev": "bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/text",
|
||||
"Rev": "6807e777504f54ad073ecef66747de158294b639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/magiconair/properties",
|
||||
"Comment": "v1.5.3",
|
||||
"Rev": "624009598839a9432bd97bb75552389422357723"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "2caf8efc93669b6c43e0441cdc6aed17546c96f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cast",
|
||||
"Rev": "4d07383ffe94b5e5a6fa3af9211374a4507a0184"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "312092086bed4968099259622145a0c9ae280064"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/jwalterweatherman",
|
||||
"Rev": "3d60171a64319ef63c78bd45bd60e6eab1e75f8b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "5644820622454e71517561946e3d94b9f9db6842"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/viper",
|
||||
"Rev": "be5ff3e4840cf692388bde7a057595a474ef379e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/gotuf/keys",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
|
@ -101,6 +153,10 @@
|
|||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "97f42dd262e97f4632986eddbc74c19fa022ea08"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
|
@ -0,0 +1,12 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
|
@ -0,0 +1,220 @@
|
|||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/mojombo/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml)
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# Implements the TOML test suite interface
|
||||
|
||||
This is an implementation of the interface expected by
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for my
|
||||
[toml parser written in Go](https://github.com/BurntSushi/toml).
|
||||
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
|
||||
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Compatible with `toml-test` version
|
||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
||||
|
90
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
Normal file
90
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
// Command toml-test-decoder satisfies the toml-test interface for testing
|
||||
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
|
||||
log.Fatalf("Error decoding TOML: %s", err)
|
||||
}
|
||||
|
||||
typedTmp := translate(tmp)
|
||||
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
|
||||
log.Fatalf("Error encoding JSON: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(tomlData interface{}) interface{} {
|
||||
switch orig := tomlData.(type) {
|
||||
case map[string]interface{}:
|
||||
typed := make(map[string]interface{}, len(orig))
|
||||
for k, v := range orig {
|
||||
typed[k] = translate(v)
|
||||
}
|
||||
return typed
|
||||
case []map[string]interface{}:
|
||||
typed := make([]map[string]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v).(map[string]interface{})
|
||||
}
|
||||
return typed
|
||||
case []interface{}:
|
||||
typed := make([]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v)
|
||||
}
|
||||
|
||||
// We don't really need to tag arrays, but let's be future proof.
|
||||
// (If TOML ever supports tuples, we'll need this.)
|
||||
return tag("array", typed)
|
||||
case time.Time:
|
||||
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
|
||||
case bool:
|
||||
return tag("bool", fmt.Sprintf("%v", orig))
|
||||
case int64:
|
||||
return tag("integer", fmt.Sprintf("%d", orig))
|
||||
case float64:
|
||||
return tag("float", fmt.Sprintf("%v", orig))
|
||||
case string:
|
||||
return tag("string", orig)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Unknown type: %T", tomlData))
|
||||
}
|
||||
|
||||
func tag(typeName string, data interface{}) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": typeName,
|
||||
"value": data,
|
||||
}
|
||||
}
|
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# Implements the TOML test suite interface for TOML encoders
|
||||
|
||||
This is an implementation of the interface expected by
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for the
|
||||
[TOML encoder](https://github.com/BurntSushi/toml).
|
||||
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
|
||||
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
||||
Compatible with `toml-test` version
|
||||
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
|
||||
|
131
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Command toml-test-encoder satisfies the toml-test interface for testing
|
||||
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
|
||||
log.Fatalf("Error decoding JSON: %s", err)
|
||||
}
|
||||
|
||||
tomlData := translate(tmp)
|
||||
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
|
||||
log.Fatalf("Error encoding TOML: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(typedJson interface{}) interface{} {
|
||||
switch v := typedJson.(type) {
|
||||
case map[string]interface{}:
|
||||
if len(v) == 2 && in("type", v) && in("value", v) {
|
||||
return untag(v)
|
||||
}
|
||||
m := make(map[string]interface{}, len(v))
|
||||
for k, v2 := range v {
|
||||
m[k] = translate(v2)
|
||||
}
|
||||
return m
|
||||
case []interface{}:
|
||||
tabArray := make([]map[string]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := translate(v[i]).(map[string]interface{}); ok {
|
||||
tabArray[i] = m
|
||||
} else {
|
||||
log.Fatalf("JSON arrays may only contain objects. This " +
|
||||
"corresponds to only tables being allowed in " +
|
||||
"TOML table arrays.")
|
||||
}
|
||||
}
|
||||
return tabArray
|
||||
}
|
||||
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func untag(typed map[string]interface{}) interface{} {
|
||||
t := typed["type"].(string)
|
||||
v := typed["value"]
|
||||
switch t {
|
||||
case "string":
|
||||
return v.(string)
|
||||
case "integer":
|
||||
v := v.(string)
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
|
||||
}
|
||||
return n
|
||||
case "float":
|
||||
v := v.(string)
|
||||
f, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
|
||||
}
|
||||
return f
|
||||
case "datetime":
|
||||
v := v.(string)
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
|
||||
}
|
||||
return t
|
||||
case "bool":
|
||||
v := v.(string)
|
||||
switch v {
|
||||
case "true":
|
||||
return true
|
||||
case "false":
|
||||
return false
|
||||
}
|
||||
log.Fatalf("Could not parse '%s' as a boolean.", v)
|
||||
case "array":
|
||||
v := v.([]interface{})
|
||||
array := make([]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := v[i].(map[string]interface{}); ok {
|
||||
array[i] = untag(m)
|
||||
} else {
|
||||
log.Fatalf("Arrays may only contain other arrays or "+
|
||||
"primitive values, but found a '%T'.", m)
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
log.Fatalf("Unrecognized tag type '%s'.", t)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func in(key string, m map[string]interface{}) bool {
|
||||
_, ok := m[key]
|
||||
return ok
|
||||
}
|
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
22
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
# TOML Validator
|
||||
|
||||
If Go is installed, it's simple to try it out:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
You can see the types of every key in a TOML file with:
|
||||
|
||||
```bash
|
||||
tomlv -types some-toml-file.toml
|
||||
```
|
||||
|
||||
At the moment, only one error message is reported at a time. Error messages
|
||||
include line numbers. No output means that the files given are valid TOML, or
|
||||
there is a bug in `tomlv`.
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
|
||||
|
61
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
Normal file
61
Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Command tomlv validates TOML documents and prints each key's type.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
var (
|
||||
flagTypes = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.BoolVar(&flagTypes, "types", flagTypes,
|
||||
"When set, the types of every defined key will be shown.")
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
|
||||
path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() < 1 {
|
||||
flag.Usage()
|
||||
}
|
||||
for _, f := range flag.Args() {
|
||||
var tmp interface{}
|
||||
md, err := toml.DecodeFile(f, &tmp)
|
||||
if err != nil {
|
||||
log.Fatalf("Error in '%s': %s", f, err)
|
||||
}
|
||||
if flagTypes {
|
||||
printTypes(md)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printTypes(md toml.MetaData) {
|
||||
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
for _, key := range md.Keys() {
|
||||
fmt.Fprintf(tabw, "%s%s\t%s\n",
|
||||
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
|
||||
}
|
||||
tabw.Flush()
|
||||
}
|
|
@ -0,0 +1,472 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var e = fmt.Errorf
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
return mismatch(rv, "map", mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return e("Type mismatch for '%s.%s': %s",
|
||||
rv.Type().String(), f.name, err)
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("Value '%d' is out of range for int8.", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("Value '%d' is out of range for int16.", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("Value '%d' is out of range for int32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("Value '%d' is out of range for uint8.", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("Value '%d' is out of range for uint16.", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("Value '%d' is out of range for uint32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanAddr() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("Expected %s but found '%T'.", expected, data)
|
||||
}
|
||||
|
||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
||||
user.Type().String(), expected, data)
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
|
@ -0,0 +1,540 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
}
|
||||
|
||||
func TestDecodeSimple(t *testing.T) {
|
||||
var testSimple = `
|
||||
age = 250
|
||||
andrew = "gallant"
|
||||
kait = "brady"
|
||||
now = 1987-07-05T05:45:00Z
|
||||
yesOrNo = true
|
||||
pi = 3.14
|
||||
colors = [
|
||||
["red", "green", "blue"],
|
||||
["cyan", "magenta", "yellow", "black"],
|
||||
]
|
||||
|
||||
[My.Cats]
|
||||
plato = "cat 1"
|
||||
cauchy = "cat 2"
|
||||
`
|
||||
|
||||
type cats struct {
|
||||
Plato string
|
||||
Cauchy string
|
||||
}
|
||||
type simple struct {
|
||||
Age int
|
||||
Colors [][]string
|
||||
Pi float64
|
||||
YesOrNo bool
|
||||
Now time.Time
|
||||
Andrew string
|
||||
Kait string
|
||||
My map[string]cats
|
||||
}
|
||||
|
||||
var val simple
|
||||
_, err := Decode(testSimple, &val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var answer = simple{
|
||||
Age: 250,
|
||||
Andrew: "gallant",
|
||||
Kait: "brady",
|
||||
Now: now,
|
||||
YesOrNo: true,
|
||||
Pi: 3.14,
|
||||
Colors: [][]string{
|
||||
{"red", "green", "blue"},
|
||||
{"cyan", "magenta", "yellow", "black"},
|
||||
},
|
||||
My: map[string]cats{
|
||||
"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(val, answer) {
|
||||
t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
|
||||
answer, val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEmbedded(t *testing.T) {
|
||||
type Dog struct{ Name string }
|
||||
type Age int
|
||||
|
||||
tests := map[string]struct {
|
||||
input string
|
||||
decodeInto interface{}
|
||||
wantDecoded interface{}
|
||||
}{
|
||||
"embedded struct": {
|
||||
input: `Name = "milton"`,
|
||||
decodeInto: &struct{ Dog }{},
|
||||
wantDecoded: &struct{ Dog }{Dog{"milton"}},
|
||||
},
|
||||
"embedded non-nil pointer to struct": {
|
||||
input: `Name = "milton"`,
|
||||
decodeInto: &struct{ *Dog }{},
|
||||
wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
|
||||
},
|
||||
"embedded nil pointer to struct": {
|
||||
input: ``,
|
||||
decodeInto: &struct{ *Dog }{},
|
||||
wantDecoded: &struct{ *Dog }{nil},
|
||||
},
|
||||
"embedded int": {
|
||||
input: `Age = -5`,
|
||||
decodeInto: &struct{ Age }{},
|
||||
wantDecoded: &struct{ Age }{-5},
|
||||
},
|
||||
}
|
||||
|
||||
for label, test := range tests {
|
||||
_, err := Decode(test.input, test.decodeInto)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
|
||||
t.Errorf("%s: want decoded == %+v, got %+v",
|
||||
label, test.wantDecoded, test.decodeInto)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTableArrays(t *testing.T) {
|
||||
var tomlTableArrays = `
|
||||
[[albums]]
|
||||
name = "Born to Run"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Jungleland"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Meeting Across the River"
|
||||
|
||||
[[albums]]
|
||||
name = "Born in the USA"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Glory Days"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Dancing in the Dark"
|
||||
`
|
||||
|
||||
type Song struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type Album struct {
|
||||
Name string
|
||||
Songs []Song
|
||||
}
|
||||
|
||||
type Music struct {
|
||||
Albums []Album
|
||||
}
|
||||
|
||||
expected := Music{[]Album{
|
||||
{"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
|
||||
{"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
||||
}}
|
||||
var got Music
|
||||
if _, err := Decode(tomlTableArrays, &got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, got) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Case insensitive matching tests.
|
||||
// A bit more comprehensive than needed given the current implementation,
|
||||
// but implementations change.
|
||||
// Probably still missing demonstrations of some ugly corner cases regarding
|
||||
// case insensitive matching and multiple fields.
|
||||
func TestCase(t *testing.T) {
|
||||
var caseToml = `
|
||||
tOpString = "string"
|
||||
tOpInt = 1
|
||||
tOpFloat = 1.1
|
||||
tOpBool = true
|
||||
tOpdate = 2006-01-02T15:04:05Z
|
||||
tOparray = [ "array" ]
|
||||
Match = "i should be in Match only"
|
||||
MatcH = "i should be in MatcH only"
|
||||
once = "just once"
|
||||
[nEst.eD]
|
||||
nEstedString = "another string"
|
||||
`
|
||||
|
||||
type InsensitiveEd struct {
|
||||
NestedString string
|
||||
}
|
||||
|
||||
type InsensitiveNest struct {
|
||||
Ed InsensitiveEd
|
||||
}
|
||||
|
||||
type Insensitive struct {
|
||||
TopString string
|
||||
TopInt int
|
||||
TopFloat float64
|
||||
TopBool bool
|
||||
TopDate time.Time
|
||||
TopArray []string
|
||||
Match string
|
||||
MatcH string
|
||||
Once string
|
||||
OncE string
|
||||
Nest InsensitiveNest
|
||||
}
|
||||
|
||||
tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
expected := Insensitive{
|
||||
TopString: "string",
|
||||
TopInt: 1,
|
||||
TopFloat: 1.1,
|
||||
TopBool: true,
|
||||
TopDate: tme,
|
||||
TopArray: []string{"array"},
|
||||
MatcH: "i should be in MatcH only",
|
||||
Match: "i should be in Match only",
|
||||
Once: "just once",
|
||||
OncE: "",
|
||||
Nest: InsensitiveNest{
|
||||
Ed: InsensitiveEd{NestedString: "another string"},
|
||||
},
|
||||
}
|
||||
var got Insensitive
|
||||
if _, err := Decode(caseToml, &got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, got) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointers(t *testing.T) {
|
||||
type Object struct {
|
||||
Type string
|
||||
Description string
|
||||
}
|
||||
|
||||
type Dict struct {
|
||||
NamedObject map[string]*Object
|
||||
BaseObject *Object
|
||||
Strptr *string
|
||||
Strptrs []*string
|
||||
}
|
||||
s1, s2, s3 := "blah", "abc", "def"
|
||||
expected := &Dict{
|
||||
Strptr: &s1,
|
||||
Strptrs: []*string{&s2, &s3},
|
||||
NamedObject: map[string]*Object{
|
||||
"foo": {"FOO", "fooooo!!!"},
|
||||
"bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
|
||||
},
|
||||
BaseObject: &Object{"BASE", "da base"},
|
||||
}
|
||||
|
||||
ex1 := `
|
||||
Strptr = "blah"
|
||||
Strptrs = ["abc", "def"]
|
||||
|
||||
[NamedObject.foo]
|
||||
Type = "FOO"
|
||||
Description = "fooooo!!!"
|
||||
|
||||
[NamedObject.bar]
|
||||
Type = "BAR"
|
||||
Description = "ba-ba-ba-ba-barrrr!!!"
|
||||
|
||||
[BaseObject]
|
||||
Type = "BASE"
|
||||
Description = "da base"
|
||||
`
|
||||
dict := new(Dict)
|
||||
_, err := Decode(ex1, dict)
|
||||
if err != nil {
|
||||
t.Errorf("Decode error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, dict) {
|
||||
t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
|
||||
}
|
||||
}
|
||||
|
||||
type sphere struct {
|
||||
Center [3]float64
|
||||
Radius float64
|
||||
}
|
||||
|
||||
func TestDecodeSimpleArray(t *testing.T) {
|
||||
var s1 sphere
|
||||
if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeArrayWrongSize(t *testing.T) {
|
||||
var s1 sphere
|
||||
if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
|
||||
t.Fatal("Expected array type mismatch error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeLargeIntoSmallInt(t *testing.T) {
|
||||
type table struct {
|
||||
Value int8
|
||||
}
|
||||
var tab table
|
||||
if _, err := Decode(`value = 500`, &tab); err == nil {
|
||||
t.Fatal("Expected integer out-of-bounds error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeSizedInts(t *testing.T) {
|
||||
type table struct {
|
||||
U8 uint8
|
||||
U16 uint16
|
||||
U32 uint32
|
||||
U64 uint64
|
||||
U uint
|
||||
I8 int8
|
||||
I16 int16
|
||||
I32 int32
|
||||
I64 int64
|
||||
I int
|
||||
}
|
||||
answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
|
||||
toml := `
|
||||
u8 = 1
|
||||
u16 = 1
|
||||
u32 = 1
|
||||
u64 = 1
|
||||
u = 1
|
||||
i8 = -1
|
||||
i16 = -1
|
||||
i32 = -1
|
||||
i64 = -1
|
||||
i = -1
|
||||
`
|
||||
var tab table
|
||||
if _, err := Decode(toml, &tab); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if answer != tab {
|
||||
t.Fatalf("Expected %#v but got %#v", answer, tab)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleMetaData_PrimitiveDecode() {
|
||||
var md MetaData
|
||||
var err error
|
||||
|
||||
var tomlBlob = `
|
||||
ranking = ["Springsteen", "J Geils"]
|
||||
|
||||
[bands.Springsteen]
|
||||
started = 1973
|
||||
albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
|
||||
|
||||
[bands.J Geils]
|
||||
started = 1970
|
||||
albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
|
||||
`
|
||||
|
||||
type band struct {
|
||||
Started int
|
||||
Albums []string
|
||||
}
|
||||
type classics struct {
|
||||
Ranking []string
|
||||
Bands map[string]Primitive
|
||||
}
|
||||
|
||||
// Do the initial decode. Reflection is delayed on Primitive values.
|
||||
var music classics
|
||||
if md, err = Decode(tomlBlob, &music); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// MetaData still includes information on Primitive values.
|
||||
fmt.Printf("Is `bands.Springsteen` defined? %v\n",
|
||||
md.IsDefined("bands", "Springsteen"))
|
||||
|
||||
// Decode primitive data into Go values.
|
||||
for _, artist := range music.Ranking {
|
||||
// A band is a primitive value, so we need to decode it to get a
|
||||
// real `band` value.
|
||||
primValue := music.Bands[artist]
|
||||
|
||||
var aBand band
|
||||
if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s started in %d.\n", artist, aBand.Started)
|
||||
}
|
||||
// Check to see if there were any fields left undecoded.
|
||||
// Note that this won't be empty before decoding the Primitive value!
|
||||
fmt.Printf("Undecoded: %q\n", md.Undecoded())
|
||||
|
||||
// Output:
|
||||
// Is `bands.Springsteen` defined? true
|
||||
// Springsteen started in 1973.
|
||||
// J Geils started in 1970.
|
||||
// Undecoded: []
|
||||
}
|
||||
|
||||
func ExampleDecode() {
|
||||
var tomlBlob = `
|
||||
# Some comments.
|
||||
[alpha]
|
||||
ip = "10.0.0.1"
|
||||
|
||||
[alpha.config]
|
||||
Ports = [ 8001, 8002 ]
|
||||
Location = "Toronto"
|
||||
Created = 1987-07-05T05:45:00Z
|
||||
|
||||
[beta]
|
||||
ip = "10.0.0.2"
|
||||
|
||||
[beta.config]
|
||||
Ports = [ 9001, 9002 ]
|
||||
Location = "New Jersey"
|
||||
Created = 1887-01-05T05:55:00Z
|
||||
`
|
||||
|
||||
type serverConfig struct {
|
||||
Ports []int
|
||||
Location string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string `toml:"ip"`
|
||||
Config serverConfig `toml:"config"`
|
||||
}
|
||||
|
||||
type servers map[string]server
|
||||
|
||||
var config servers
|
||||
if _, err := Decode(tomlBlob, &config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, name := range []string{"alpha", "beta"} {
|
||||
s := config[name]
|
||||
fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
|
||||
name, s.IP, s.Config.Location,
|
||||
s.Config.Created.Format("2006-01-02"))
|
||||
fmt.Printf("Ports: %v\n", s.Config.Ports)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
|
||||
// Ports: [8001 8002]
|
||||
// Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
|
||||
// Ports: [9001 9002]
|
||||
}
|
||||
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Example Unmarshaler shows how to decode TOML strings into your own
|
||||
// custom data type.
|
||||
func Example_unmarshaler() {
|
||||
blob := `
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
`
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Code to implement the TextUnmarshaler interface for `duration`:
|
||||
//
|
||||
// type duration struct {
|
||||
// time.Duration
|
||||
// }
|
||||
//
|
||||
// func (d *duration) UnmarshalText(text []byte) error {
|
||||
// var err error
|
||||
// d.Duration, err = time.ParseDuration(string(text))
|
||||
// return err
|
||||
// }
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
// Output:
|
||||
// Thunder Road (4m49s)
|
||||
// Stairway to Heaven (8m3s)
|
||||
}
|
||||
|
||||
// Example StrictDecoding shows how to detect whether there are keys in the
|
||||
// TOML document that weren't decoded into the value given. This is useful
|
||||
// for returning an error to the user if they've included extraneous fields
|
||||
// in their configuration.
|
||||
func Example_strictDecoding() {
|
||||
var blob = `
|
||||
key1 = "value1"
|
||||
key2 = "value2"
|
||||
key3 = "value3"
|
||||
`
|
||||
type config struct {
|
||||
Key1 string
|
||||
Key3 string
|
||||
}
|
||||
|
||||
var conf config
|
||||
md, err := Decode(blob, &conf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
|
||||
// Output:
|
||||
// Undecoded keys: ["key2"]
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/mojombo/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
|
@ -0,0 +1,521 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"can't encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"can't encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"can't encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"can't encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"TOML array element can't contain a table")
|
||||
errNoKey = errors.New(
|
||||
"top-level values must be a Go map or struct")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key, true)
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.String())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
if len(key) == 1 {
|
||||
// Output an extra new line between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
panicIfInvalidKey(key, true)
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.String())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexporded fields
|
||||
if f.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
frv := eindirect(frv)
|
||||
t := frv.Type()
|
||||
if t.Kind() != reflect.Struct {
|
||||
encPanic(errAnonNonStruct)
|
||||
}
|
||||
addFields(t, frv, f.Index)
|
||||
} else if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
keyName := sft.Tag.Get("toml")
|
||||
if keyName == "-" {
|
||||
continue
|
||||
}
|
||||
if keyName == "" {
|
||||
keyName = sft.Name
|
||||
}
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is used to
|
||||
// determine whether the types of array elements are mixed (which is forbidden).
|
||||
// If the Go value is nil, then it is illegal for it to be an array element, and
|
||||
// valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
} else {
|
||||
return tomlArray
|
||||
}
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key, false)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1])
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key, hash bool) {
|
||||
if hash {
|
||||
for _, k := range key {
|
||||
if !isValidTableName(k) {
|
||||
encPanic(e("Key '%s' is not a valid table name. Table names "+
|
||||
"cannot contain '[', ']' or '.'.", key.String()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !isValidKeyName(key[len(key)-1]) {
|
||||
encPanic(e("Key '%s' is not a name. Key names "+
|
||||
"cannot contain whitespace.", key.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidTableName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if r == '[' || r == ']' || r == '.' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range s {
|
||||
if unicode.IsSpace(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,506 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestEncodeRoundTrip(t *testing.T) {
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time
|
||||
Ipaddress net.IP
|
||||
}
|
||||
|
||||
var inputs = Config{
|
||||
13,
|
||||
[]string{"one", "two", "three"},
|
||||
3.145,
|
||||
[]int{11, 2, 3, 4},
|
||||
time.Now(),
|
||||
net.ParseIP("192.168.59.254"),
|
||||
}
|
||||
|
||||
var firstBuffer bytes.Buffer
|
||||
e := NewEncoder(&firstBuffer)
|
||||
err := e.Encode(inputs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var outputs Config
|
||||
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
|
||||
log.Printf("Could not decode:\n-----\n%s\n-----\n",
|
||||
firstBuffer.String())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// could test each value individually, but I'm lazy
|
||||
var secondBuffer bytes.Buffer
|
||||
e2 := NewEncoder(&secondBuffer)
|
||||
err = e2.Encode(outputs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if firstBuffer.String() != secondBuffer.String() {
|
||||
t.Error(
|
||||
firstBuffer.String(),
|
||||
"\n\n is not identical to\n\n",
|
||||
secondBuffer.String())
|
||||
}
|
||||
}
|
||||
|
||||
// XXX(burntsushi)
|
||||
// I think these tests probably should be removed. They are good, but they
|
||||
// ought to be obsolete by toml-test.
|
||||
func TestEncode(t *testing.T) {
|
||||
type Embedded struct {
|
||||
Int int `toml:"_int"`
|
||||
}
|
||||
type NonStruct int
|
||||
|
||||
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
|
||||
dateStr := "2014-05-11T19:30:40Z"
|
||||
|
||||
tests := map[string]struct {
|
||||
input interface{}
|
||||
wantOutput string
|
||||
wantError error
|
||||
}{
|
||||
"bool field": {
|
||||
input: struct {
|
||||
BoolTrue bool
|
||||
BoolFalse bool
|
||||
}{true, false},
|
||||
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
|
||||
},
|
||||
"int fields": {
|
||||
input: struct {
|
||||
Int int
|
||||
Int8 int8
|
||||
Int16 int16
|
||||
Int32 int32
|
||||
Int64 int64
|
||||
}{1, 2, 3, 4, 5},
|
||||
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
|
||||
},
|
||||
"uint fields": {
|
||||
input: struct {
|
||||
Uint uint
|
||||
Uint8 uint8
|
||||
Uint16 uint16
|
||||
Uint32 uint32
|
||||
Uint64 uint64
|
||||
}{1, 2, 3, 4, 5},
|
||||
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
|
||||
"\nUint64 = 5\n",
|
||||
},
|
||||
"float fields": {
|
||||
input: struct {
|
||||
Float32 float32
|
||||
Float64 float64
|
||||
}{1.5, 2.5},
|
||||
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
|
||||
},
|
||||
"string field": {
|
||||
input: struct{ String string }{"foo"},
|
||||
wantOutput: "String = \"foo\"\n",
|
||||
},
|
||||
"string field and unexported field": {
|
||||
input: struct {
|
||||
String string
|
||||
unexported int
|
||||
}{"foo", 0},
|
||||
wantOutput: "String = \"foo\"\n",
|
||||
},
|
||||
"datetime field in UTC": {
|
||||
input: struct{ Date time.Time }{date},
|
||||
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
|
||||
},
|
||||
"datetime field as primitive": {
|
||||
// Using a map here to fail if isStructOrMap() returns true for
|
||||
// time.Time.
|
||||
input: map[string]interface{}{
|
||||
"Date": date,
|
||||
"Int": 1,
|
||||
},
|
||||
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
|
||||
},
|
||||
"array fields": {
|
||||
input: struct {
|
||||
IntArray0 [0]int
|
||||
IntArray3 [3]int
|
||||
}{[0]int{}, [3]int{1, 2, 3}},
|
||||
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
|
||||
},
|
||||
"slice fields": {
|
||||
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
|
||||
nil, []int{}, []int{1, 2, 3},
|
||||
},
|
||||
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
|
||||
},
|
||||
"datetime slices": {
|
||||
input: struct{ DatetimeSlice []time.Time }{
|
||||
[]time.Time{date, date},
|
||||
},
|
||||
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
|
||||
dateStr, dateStr),
|
||||
},
|
||||
"nested arrays and slices": {
|
||||
input: struct {
|
||||
SliceOfArrays [][2]int
|
||||
ArrayOfSlices [2][]int
|
||||
SliceOfArraysOfSlices [][2][]int
|
||||
ArrayOfSlicesOfArrays [2][][2]int
|
||||
SliceOfMixedArrays [][2]interface{}
|
||||
ArrayOfMixedSlices [2][]interface{}
|
||||
}{
|
||||
[][2]int{{1, 2}, {3, 4}},
|
||||
[2][]int{{1, 2}, {3, 4}},
|
||||
[][2][]int{
|
||||
{
|
||||
{1, 2}, {3, 4},
|
||||
},
|
||||
{
|
||||
{5, 6}, {7, 8},
|
||||
},
|
||||
},
|
||||
[2][][2]int{
|
||||
{
|
||||
{1, 2}, {3, 4},
|
||||
},
|
||||
{
|
||||
{5, 6}, {7, 8},
|
||||
},
|
||||
},
|
||||
[][2]interface{}{
|
||||
{1, 2}, {"a", "b"},
|
||||
},
|
||||
[2][]interface{}{
|
||||
{1, 2}, {"a", "b"},
|
||||
},
|
||||
},
|
||||
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
|
||||
ArrayOfSlices = [[1, 2], [3, 4]]
|
||||
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
||||
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
|
||||
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
|
||||
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
|
||||
`,
|
||||
},
|
||||
"empty slice": {
|
||||
input: struct{ Empty []interface{} }{[]interface{}{}},
|
||||
wantOutput: "Empty = []\n",
|
||||
},
|
||||
"(error) slice with element type mismatch (string and integer)": {
|
||||
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"(error) slice with element type mismatch (integer and float)": {
|
||||
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"slice with elems of differing Go types, same TOML types": {
|
||||
input: struct {
|
||||
MixedInts []interface{}
|
||||
MixedFloats []interface{}
|
||||
}{
|
||||
[]interface{}{
|
||||
int(1), int8(2), int16(3), int32(4), int64(5),
|
||||
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
|
||||
},
|
||||
[]interface{}{float32(1.5), float64(2.5)},
|
||||
},
|
||||
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
|
||||
"MixedFloats = [1.5, 2.5]\n",
|
||||
},
|
||||
"(error) slice w/ element type mismatch (one is nested array)": {
|
||||
input: struct{ Mixed []interface{} }{
|
||||
[]interface{}{1, []interface{}{2}},
|
||||
},
|
||||
wantError: errArrayMixedElementTypes,
|
||||
},
|
||||
"(error) slice with 1 nil element": {
|
||||
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
|
||||
wantError: errArrayNilElement,
|
||||
},
|
||||
"(error) slice with 1 nil element (and other non-nil elements)": {
|
||||
input: struct{ NilElement []interface{} }{
|
||||
[]interface{}{1, nil},
|
||||
},
|
||||
wantError: errArrayNilElement,
|
||||
},
|
||||
"simple map": {
|
||||
input: map[string]int{"a": 1, "b": 2},
|
||||
wantOutput: "a = 1\nb = 2\n",
|
||||
},
|
||||
"map with interface{} value type": {
|
||||
input: map[string]interface{}{"a": 1, "b": "c"},
|
||||
wantOutput: "a = 1\nb = \"c\"\n",
|
||||
},
|
||||
"map with interface{} value type, some of which are structs": {
|
||||
input: map[string]interface{}{
|
||||
"a": struct{ Int int }{2},
|
||||
"b": 1,
|
||||
},
|
||||
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
|
||||
},
|
||||
"nested map": {
|
||||
input: map[string]map[string]int{
|
||||
"a": {"b": 1},
|
||||
"c": {"d": 2},
|
||||
},
|
||||
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
|
||||
},
|
||||
"nested struct": {
|
||||
input: struct{ Struct struct{ Int int } }{
|
||||
struct{ Int int }{1},
|
||||
},
|
||||
wantOutput: "[Struct]\n Int = 1\n",
|
||||
},
|
||||
"nested struct and non-struct field": {
|
||||
input: struct {
|
||||
Struct struct{ Int int }
|
||||
Bool bool
|
||||
}{struct{ Int int }{1}, true},
|
||||
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
|
||||
},
|
||||
"2 nested structs": {
|
||||
input: struct{ Struct1, Struct2 struct{ Int int } }{
|
||||
struct{ Int int }{1}, struct{ Int int }{2},
|
||||
},
|
||||
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
|
||||
},
|
||||
"deeply nested structs": {
|
||||
input: struct {
|
||||
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
|
||||
}{
|
||||
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
|
||||
struct{ Struct3 *struct{ Int int } }{nil},
|
||||
},
|
||||
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
|
||||
"\n\n[Struct2]\n",
|
||||
},
|
||||
"nested struct with nil struct elem": {
|
||||
input: struct {
|
||||
Struct struct{ Inner *struct{ Int int } }
|
||||
}{
|
||||
struct{ Inner *struct{ Int int } }{nil},
|
||||
},
|
||||
wantOutput: "[Struct]\n",
|
||||
},
|
||||
"nested struct with no fields": {
|
||||
input: struct {
|
||||
Struct struct{ Inner struct{} }
|
||||
}{
|
||||
struct{ Inner struct{} }{struct{}{}},
|
||||
},
|
||||
wantOutput: "[Struct]\n [Struct.Inner]\n",
|
||||
},
|
||||
"struct with tags": {
|
||||
input: struct {
|
||||
Struct struct {
|
||||
Int int `toml:"_int"`
|
||||
} `toml:"_struct"`
|
||||
Bool bool `toml:"_bool"`
|
||||
}{
|
||||
struct {
|
||||
Int int `toml:"_int"`
|
||||
}{1}, true,
|
||||
},
|
||||
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
|
||||
},
|
||||
"embedded struct": {
|
||||
input: struct{ Embedded }{Embedded{1}},
|
||||
wantOutput: "_int = 1\n",
|
||||
},
|
||||
"embedded *struct": {
|
||||
input: struct{ *Embedded }{&Embedded{1}},
|
||||
wantOutput: "_int = 1\n",
|
||||
},
|
||||
"nested embedded struct": {
|
||||
input: struct {
|
||||
Struct struct{ Embedded } `toml:"_struct"`
|
||||
}{struct{ Embedded }{Embedded{1}}},
|
||||
wantOutput: "[_struct]\n _int = 1\n",
|
||||
},
|
||||
"nested embedded *struct": {
|
||||
input: struct {
|
||||
Struct struct{ *Embedded } `toml:"_struct"`
|
||||
}{struct{ *Embedded }{&Embedded{1}}},
|
||||
wantOutput: "[_struct]\n _int = 1\n",
|
||||
},
|
||||
"array of tables": {
|
||||
input: struct {
|
||||
Structs []*struct{ Int int } `toml:"struct"`
|
||||
}{
|
||||
[]*struct{ Int int }{{1}, {3}},
|
||||
},
|
||||
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
|
||||
},
|
||||
"array of tables order": {
|
||||
input: map[string]interface{}{
|
||||
"map": map[string]interface{}{
|
||||
"zero": 5,
|
||||
"arr": []map[string]int{
|
||||
map[string]int{
|
||||
"friend": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
|
||||
},
|
||||
"(error) top-level slice": {
|
||||
input: []struct{ Int int }{{1}, {2}, {3}},
|
||||
wantError: errNoKey,
|
||||
},
|
||||
"(error) slice of slice": {
|
||||
input: struct {
|
||||
Slices [][]struct{ Int int }
|
||||
}{
|
||||
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
|
||||
},
|
||||
wantError: errArrayNoTable,
|
||||
},
|
||||
"(error) map no string key": {
|
||||
input: map[int]string{1: ""},
|
||||
wantError: errNonString,
|
||||
},
|
||||
"(error) anonymous non-struct": {
|
||||
input: struct{ NonStruct }{5},
|
||||
wantError: errAnonNonStruct,
|
||||
},
|
||||
"(error) empty key name": {
|
||||
input: map[string]int{"": 1},
|
||||
wantError: errAnything,
|
||||
},
|
||||
"(error) empty map name": {
|
||||
input: map[string]interface{}{
|
||||
"": map[string]int{"v": 1},
|
||||
},
|
||||
wantError: errAnything,
|
||||
},
|
||||
}
|
||||
for label, test := range tests {
|
||||
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeNestedTableArrays(t *testing.T) {
|
||||
type song struct {
|
||||
Name string `toml:"name"`
|
||||
}
|
||||
type album struct {
|
||||
Name string `toml:"name"`
|
||||
Songs []song `toml:"songs"`
|
||||
}
|
||||
type springsteen struct {
|
||||
Albums []album `toml:"albums"`
|
||||
}
|
||||
value := springsteen{
|
||||
[]album{
|
||||
{"Born to Run",
|
||||
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
|
||||
{"Born in the USA",
|
||||
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
|
||||
},
|
||||
}
|
||||
expected := `[[albums]]
|
||||
name = "Born to Run"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Jungleland"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Meeting Across the River"
|
||||
|
||||
[[albums]]
|
||||
name = "Born in the USA"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Glory Days"
|
||||
|
||||
[[albums.songs]]
|
||||
name = "Dancing in the Dark"
|
||||
`
|
||||
encodeExpected(t, "nested table arrays", value, expected, nil)
|
||||
}
|
||||
|
||||
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
|
||||
type Alpha struct {
|
||||
V int
|
||||
}
|
||||
type Beta struct {
|
||||
V int
|
||||
}
|
||||
type Conf struct {
|
||||
V int
|
||||
A Alpha
|
||||
B []Beta
|
||||
}
|
||||
|
||||
val := Conf{
|
||||
V: 1,
|
||||
A: Alpha{2},
|
||||
B: []Beta{{3}},
|
||||
}
|
||||
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
|
||||
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
|
||||
}
|
||||
|
||||
func encodeExpected(
|
||||
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
|
||||
) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewEncoder(&buf)
|
||||
err := enc.Encode(val)
|
||||
if err != wantErr {
|
||||
if wantErr != nil {
|
||||
if wantErr == errAnything && err != nil {
|
||||
return
|
||||
}
|
||||
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
|
||||
} else {
|
||||
t.Errorf("%s: Encode failed: %s", label, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if got := buf.String(); wantStr != got {
|
||||
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
|
||||
label, wantStr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEncoder_Encode() {
|
||||
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
|
||||
var config = map[string]interface{}{
|
||||
"date": date,
|
||||
"counts": []int{1, 1, 2, 3, 5, 8},
|
||||
"hash": map[string]string{
|
||||
"key1": "val1",
|
||||
"key2": "val2",
|
||||
},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err := NewEncoder(buf).Encode(config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(buf.String())
|
||||
|
||||
// Output:
|
||||
// counts = [1, 1, 2, 3, 5, 8]
|
||||
// date = 2010-03-14T18:00:00Z
|
||||
//
|
||||
// [hash]
|
||||
// key1 = "val1"
|
||||
// key2 = "val2"
|
||||
}
|
19
Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
18
Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
|
@ -0,0 +1,725 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input + "\n",
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.width = 0
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
func (lx *lexer) backup() {
|
||||
lx.pos -= lx.width
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (new lines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a new line for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
}
|
||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
||||
"comment or EOF, but got %q instead.", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
||||
"but got %q instead.", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case tableEnd:
|
||||
return lx.errorf("Unexpected end of table. (Tables cannot " +
|
||||
"be empty.)")
|
||||
case tableSep:
|
||||
return lx.errorf("Unexpected table separator. (Tables cannot " +
|
||||
"be empty.)")
|
||||
}
|
||||
return lexTableName
|
||||
}
|
||||
|
||||
// lexTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexTableName(lx *lexer) stateFn {
|
||||
switch lx.peek() {
|
||||
case tableStart:
|
||||
return lx.errorf("Table names cannot contain %q or %q.",
|
||||
tableStart, tableEnd)
|
||||
case tableEnd:
|
||||
lx.emit(itemText)
|
||||
lx.next()
|
||||
return lx.pop()
|
||||
case tableSep:
|
||||
lx.emit(itemText)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
}
|
||||
lx.next()
|
||||
return lexTableName
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
}
|
||||
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexKey consumes the text of a key. Assumes that the first character (which
|
||||
// is not whitespace) has already been consumed.
|
||||
func lexKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
|
||||
// XXX: Possible divergence from spec?
|
||||
// "Keys start with the first non-whitespace character and end with the
|
||||
// last non-whitespace character before the equals sign."
|
||||
// Note here that whitespace is either a tab or a space.
|
||||
// But we'll call it quits if we see a new line too.
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
}
|
||||
|
||||
// Let's also call it quits if we see an equals sign.
|
||||
if r == keySep {
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
}
|
||||
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key (up to the key separator).
|
||||
// Assumes that the first whitespace character after a key (or the '='
|
||||
// separator) has NOT been consumed.
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
||||
keySep, r)
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new lines.
|
||||
r := lx.next()
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case r == stringStart:
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case r == 't':
|
||||
return lexTrue
|
||||
case r == 'f':
|
||||
return lexFalse
|
||||
case r == '-':
|
||||
return lexNumberStart
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
return lx.errorf("Expected value but found %q instead.", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator %q.",
|
||||
arrayValTerm)
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == '\\':
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexStringEscape consumes an escaped character. It assumes that the preceding
|
||||
// '\\' has already been consumed.
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '/':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lexString
|
||||
case 'u':
|
||||
return lexStringUnicode
|
||||
}
|
||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
||||
"escape characters are allowed: "+
|
||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r)
|
||||
}
|
||||
|
||||
// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes
|
||||
// that the '\x' has already been consumed.
|
||||
func lexStringUnicode(lx *lexer) stateFn {
|
||||
var r rune
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected four hexadecimal digits after '\\x', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, float or datetime.
|
||||
// It assumes that NO negative sign has been consumed.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumberOrDate
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
return lexNumberOrDate
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
}
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found %q instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
||||
"but found %q instead.", f, r)
|
||||
}
|
||||
}
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a
|
||||
// negative sign has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isDigit(r):
|
||||
return lexNumber
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"%q instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
||||
// consumed.
|
||||
func lexConst(lx *lexer, s string) stateFn {
|
||||
for i := range s[1:] {
|
||||
if r := lx.next(); r != rune(s[i+1]) {
|
||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
||||
s[:i]+string(r))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
||||
// been consumed.
|
||||
func lexTrue(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "true"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
||||
// been consumed.
|
||||
func lexFalse(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "false"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
|
@ -0,0 +1,417 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d, key '%s': %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("Near line %d: %s", it.line, it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.expect(itemText)
|
||||
p.approxLine = kg.line
|
||||
|
||||
key := make(Key, 0)
|
||||
for ; kg.typ == itemText; kg = p.next() {
|
||||
key = append(key, kg.val)
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.expect(itemText)
|
||||
p.approxLine = kg.line
|
||||
|
||||
key := make(Key, 0)
|
||||
for ; kg.typ == itemText; kg = p.next() {
|
||||
key = append(key, kg.val)
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.expect(itemText)
|
||||
p.currentKey = kname.val
|
||||
p.approxLine = kname.line
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if err != nil {
|
||||
// See comment below for floats describing why we make a
|
||||
// distinction between a bug and a user error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid float, but it's possible that the float is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
//
|
||||
// This is also true for integers.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected float value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func replaceEscapes(s string) string {
|
||||
return strings.NewReplacer(
|
||||
"\\b", "\u0008",
|
||||
"\\t", "\u0009",
|
||||
"\\n", "\u000A",
|
||||
"\\f", "\u000C",
|
||||
"\\r", "\u000D",
|
||||
"\\\"", "\u0022",
|
||||
"\\/", "\u002F",
|
||||
"\\\\", "\u005C",
|
||||
).Replace(s)
|
||||
}
|
||||
|
||||
func (p *parser) replaceUnicode(s string) string {
|
||||
indexEsc := func() int {
|
||||
return strings.Index(s, "\\u")
|
||||
}
|
||||
for i := indexEsc(); i != -1; i = indexEsc() {
|
||||
asciiBytes := s[i+2 : i+6]
|
||||
s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(s string) string {
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
|
||||
// BUG(burntsushi)
|
||||
// I honestly don't understand how this works. I can't seem
|
||||
// to find a way to make this fail. I figured this would fail on invalid
|
||||
// UTF-8 characters like U+DCFF, but it doesn't.
|
||||
r := string(rune(hex))
|
||||
if !utf8.ValidString(r) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return string(r)
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
|
@ -0,0 +1,85 @@
|
|||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but arrays "+
|
||||
"must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
|
@ -0,0 +1,241 @@
|
|||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
name := sf.Tag.Get("toml")
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
logrus
|
|
@ -0,0 +1,8 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
||||
install:
|
||||
- go get -t ./...
|
|
@ -15,3 +15,4 @@ Trust servers are used as the authorities of the trust graph and allow caching p
|
|||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
||||
|
|
560
Godeps/_workspace/src/github.com/docker/libtrust/agent/PROTOCOL.agent
generated
vendored
Normal file
560
Godeps/_workspace/src/github.com/docker/libtrust/agent/PROTOCOL.agent
generated
vendored
Normal file
|
@ -0,0 +1,560 @@
|
|||
This describes the protocol used by OpenSSH's ssh-agent.
|
||||
|
||||
OpenSSH's agent supports managing keys for the standard SSH protocol
|
||||
2 as well as the legacy SSH protocol 1. Support for these key types
|
||||
is almost completely disjoint - in all but a few cases, operations on
|
||||
protocol 2 keys cannot see or affect protocol 1 keys and vice-versa.
|
||||
|
||||
Protocol 1 and protocol 2 keys are separated because of the differing
|
||||
cryptographic usage: protocol 1 private RSA keys are used to decrypt
|
||||
challenges that were encrypted with the corresponding public key,
|
||||
whereas protocol 2 RSA private keys are used to sign challenges with
|
||||
a private key for verification with the corresponding public key. It
|
||||
is considered unsound practice to use the same key for signing and
|
||||
encryption.
|
||||
|
||||
With a couple of exceptions, the protocol message names used in this
|
||||
document indicate which type of key the message relates to. SSH_*
|
||||
messages refer to protocol 1 keys only. SSH2_* messages refer to
|
||||
protocol 2 keys. Furthermore, the names also indicate whether the
|
||||
message is a request to the agent (*_AGENTC_*) or a reply from the
|
||||
agent (*_AGENT_*). Section 3 below contains the mapping of the
|
||||
protocol message names to their integer values.
|
||||
|
||||
1. Data types
|
||||
|
||||
Because of support for legacy SSH protocol 1 keys, OpenSSH's agent
|
||||
protocol makes use of some data types not defined in RFC 4251.
|
||||
|
||||
1.1 uint16
|
||||
|
||||
The "uint16" data type is a simple MSB-first 16 bit unsigned integer
|
||||
encoded in two bytes.
|
||||
|
||||
1.2 mpint1
|
||||
|
||||
The "mpint1" type represents an arbitrary precision integer (bignum).
|
||||
Its format is as follows:
|
||||
|
||||
uint16 bits
|
||||
byte[(bits + 7) / 8] bignum
|
||||
|
||||
"bignum" contains an unsigned arbitrary precision integer encoded as
|
||||
eight bits per byte in big-endian (MSB first) format.
|
||||
|
||||
Note the difference between the "mpint1" encoding and the "mpint"
|
||||
encoding defined in RFC 4251. Also note that the length of the encoded
|
||||
integer is specified in bits, not bytes and that the byte length of
|
||||
the integer must be calculated by rounding up the number of bits to the
|
||||
nearest eight.
|
||||
|
||||
2. Protocol Messages
|
||||
|
||||
All protocol messages are prefixed with their length in bytes, encoded
|
||||
as a 32 bit unsigned integer. Specifically:
|
||||
|
||||
uint32 message_length
|
||||
byte[message_length] message
|
||||
|
||||
The following message descriptions refer only to the content the
|
||||
"message" field.
|
||||
|
||||
2.1 Generic server responses
|
||||
|
||||
The following generic messages may be sent by the server in response to
|
||||
requests from the client. On success the agent may reply either with:
|
||||
|
||||
byte SSH_AGENT_SUCCESS
|
||||
|
||||
or a request-specific success message.
|
||||
|
||||
On failure, the agent may reply with:
|
||||
|
||||
byte SSH_AGENT_FAILURE
|
||||
|
||||
SSH_AGENT_FAILURE messages are also sent in reply to unknown request
|
||||
types.
|
||||
|
||||
2.2 Adding keys to the agent
|
||||
|
||||
Keys are added to the agent using the SSH_AGENTC_ADD_RSA_IDENTITY and
|
||||
SSH2_AGENTC_ADD_IDENTITY requests for protocol 1 and protocol 2 keys
|
||||
respectively.
|
||||
|
||||
Two variants of these requests are SSH_AGENTC_ADD_RSA_ID_CONSTRAINED
|
||||
and SSH2_AGENTC_ADD_ID_CONSTRAINED - these add keys with optional
|
||||
"constraints" on their usage.
|
||||
|
||||
OpenSSH may be built with support for keys hosted on a smartcard
|
||||
or other hardware security module. These keys may be added
|
||||
to the agent using the SSH_AGENTC_ADD_SMARTCARD_KEY and
|
||||
SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED requests.
|
||||
|
||||
2.2.1 Key constraints
|
||||
|
||||
The OpenSSH agent supports some basic optional constraints on key usage.
|
||||
At present there are two constraints defined.
|
||||
|
||||
The first constraint limits the validity duration of a key. It is
|
||||
encoded as:
|
||||
|
||||
byte SSH_AGENT_CONSTRAIN_LIFETIME
|
||||
uint32 seconds
|
||||
|
||||
Where "seconds" contains the number of seconds that the key shall remain
|
||||
valid measured from the moment that the agent receives it. After the
|
||||
validity period has expired, OpenSSH's agent will erase these keys from
|
||||
memory.
|
||||
|
||||
The second constraint requires the agent to seek explicit user
|
||||
confirmation before performing private key operations with the loaded
|
||||
key. This constraint is encoded as:
|
||||
|
||||
byte SSH_AGENT_CONSTRAIN_CONFIRM
|
||||
|
||||
Zero or more constraints may be specified when adding a key with one
|
||||
of the *_CONSTRAINED requests. Multiple constraints are appended
|
||||
consecutively to the end of the request:
|
||||
|
||||
byte constraint1_type
|
||||
.... constraint1_data
|
||||
byte constraint2_type
|
||||
.... constraint2_data
|
||||
....
|
||||
byte constraintN_type
|
||||
.... constraintN_data
|
||||
|
||||
Such a sequence of zero or more constraints will be referred to below
|
||||
as "constraint[]". Agents may determine whether there are constraints
|
||||
by checking whether additional data exists in the "add key" request
|
||||
after the key data itself. OpenSSH will refuse to add a key if it
|
||||
contains unknown constraints.
|
||||
|
||||
2.2.2 Add protocol 1 key
|
||||
|
||||
A client may add a protocol 1 key to an agent with the following
|
||||
request:
|
||||
|
||||
byte SSH_AGENTC_ADD_RSA_IDENTITY or
|
||||
SSH_AGENTC_ADD_RSA_ID_CONSTRAINED
|
||||
uint32 ignored
|
||||
mpint1 rsa_n
|
||||
mpint1 rsa_e
|
||||
mpint1 rsa_d
|
||||
mpint1 rsa_iqmp
|
||||
mpint1 rsa_q
|
||||
mpint1 rsa_p
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
Note that there is some redundancy in the key parameters; a key could be
|
||||
fully specified using just rsa_q, rsa_p and rsa_e at the cost of extra
|
||||
computation.
|
||||
|
||||
"key_constraints" may only be present if the request type is
|
||||
SSH_AGENTC_ADD_RSA_ID_CONSTRAINED.
|
||||
|
||||
The agent will reply with a SSH_AGENT_SUCCESS if the key has been
|
||||
successfully added or a SSH_AGENT_FAILURE if an error occurred.
|
||||
|
||||
2.2.3 Add protocol 2 key
|
||||
|
||||
The OpenSSH agent supports DSA, ECDSA and RSA keys for protocol 2. DSA
|
||||
keys may be added using the following request
|
||||
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ssh-dss"
|
||||
mpint dsa_p
|
||||
mpint dsa_q
|
||||
mpint dsa_g
|
||||
mpint dsa_public_key
|
||||
mpint dsa_private_key
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
DSA certificates may be added with:
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ssh-dss-cert-v00@openssh.com"
|
||||
string certificate
|
||||
mpint dsa_private_key
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
ECDSA keys may be added using the following request
|
||||
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ecdsa-sha2-nistp256" |
|
||||
"ecdsa-sha2-nistp384" |
|
||||
"ecdsa-sha2-nistp521"
|
||||
string ecdsa_curve_name
|
||||
string ecdsa_public_key
|
||||
mpint ecdsa_private
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
ECDSA certificates may be added with:
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ecdsa-sha2-nistp256-cert-v01@openssh.com" |
|
||||
"ecdsa-sha2-nistp384-cert-v01@openssh.com" |
|
||||
"ecdsa-sha2-nistp521-cert-v01@openssh.com"
|
||||
string certificate
|
||||
mpint ecdsa_private_key
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
RSA keys may be added with this request:
|
||||
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ssh-rsa"
|
||||
mpint rsa_n
|
||||
mpint rsa_e
|
||||
mpint rsa_d
|
||||
mpint rsa_iqmp
|
||||
mpint rsa_p
|
||||
mpint rsa_q
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
RSA certificates may be added with this request:
|
||||
|
||||
byte SSH2_AGENTC_ADD_IDENTITY or
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED
|
||||
string "ssh-rsa-cert-v00@openssh.com"
|
||||
string certificate
|
||||
mpint rsa_d
|
||||
mpint rsa_iqmp
|
||||
mpint rsa_p
|
||||
mpint rsa_q
|
||||
string key_comment
|
||||
constraint[] key_constraints
|
||||
|
||||
Note that the 'rsa_p' and 'rsa_q' parameters are sent in the reverse
|
||||
order to the protocol 1 add keys message. As with the corresponding
|
||||
protocol 1 "add key" request, the private key is overspecified to avoid
|
||||
redundant processing.
|
||||
|
||||
For DSA, ECDSA and RSA key add requests, "key_constraints" may only be
|
||||
present if the request type is SSH2_AGENTC_ADD_ID_CONSTRAINED.
|
||||
|
||||
The agent will reply with a SSH_AGENT_SUCCESS if the key has been
|
||||
successfully added or a SSH_AGENT_FAILURE if an error occurred.
|
||||
|
||||
2.2.4 Loading keys from a smartcard
|
||||
|
||||
The OpenSSH agent may have optional smartcard support built in to it. If
|
||||
so, it supports an operation to load keys from a smartcard. Technically,
|
||||
only the public components of the keys are loaded into the agent so
|
||||
this operation really arranges for future private key operations to be
|
||||
delegated to the smartcard.
|
||||
|
||||
byte SSH_AGENTC_ADD_SMARTCARD_KEY or
|
||||
SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED
|
||||
string reader_id
|
||||
string pin
|
||||
constraint[] key_constraints
|
||||
|
||||
"reader_id" is an identifier to a smartcard reader and "pin"
|
||||
is a PIN or passphrase used to unlock the private key(s) on the
|
||||
device. "key_constraints" may only be present if the request type is
|
||||
SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED.
|
||||
|
||||
This operation may load all SSH keys that are unlocked using the
|
||||
"pin" on the specified reader. The type of key loaded (protocol 1
|
||||
or protocol 2) will be specified by the smartcard itself, it is not
|
||||
client-specified.
|
||||
|
||||
The agent will reply with a SSH_AGENT_SUCCESS if one or more keys have
|
||||
been successfully loaded or a SSH_AGENT_FAILURE if an error occurred.
|
||||
The agent will also return SSH_AGENT_FAILURE if it does not support
|
||||
smartcards.
|
||||
|
||||
2.3 Removing multiple keys
|
||||
|
||||
A client may request that an agent delete all protocol 1 keys using the
|
||||
following request:
|
||||
|
||||
byte SSH_AGENTC_REMOVE_ALL_RSA_IDENTITIES
|
||||
|
||||
This message requests the deletion of all protocol 2 keys:
|
||||
|
||||
byte SSH2_AGENTC_REMOVE_ALL_IDENTITIES
|
||||
|
||||
On success, the agent will delete all keys of the requested type and
|
||||
reply with a SSH_AGENT_SUCCESS message. If an error occurred, the agent
|
||||
will reply with SSH_AGENT_FAILURE.
|
||||
|
||||
Note that, to delete all keys (both protocol 1 and 2), a client
|
||||
must send both a SSH_AGENTC_REMOVE_ALL_RSA_IDENTITIES and a
|
||||
SSH2_AGENTC_REMOVE_ALL_IDENTITIES request.
|
||||
|
||||
2.4 Removing specific keys
|
||||
|
||||
2.4.1 Removing a protocol 1 key
|
||||
|
||||
Removal of a protocol 1 key may be requested with the following message:
|
||||
|
||||
byte SSH_AGENTC_REMOVE_RSA_IDENTITY
|
||||
uint32 key_bits
|
||||
mpint1 rsa_e
|
||||
mpint1 rsa_n
|
||||
|
||||
Note that key_bits is strictly redundant, as it may be inferred by the
|
||||
length of rsa_n.
|
||||
|
||||
The agent will delete any private key matching the specified public key
|
||||
and return SSH_AGENT_SUCCESS. If no such key was found, the agent will
|
||||
return SSH_AGENT_FAILURE.
|
||||
|
||||
2.4.2 Removing a protocol 2 key
|
||||
|
||||
Protocol 2 keys may be removed with the following request:
|
||||
|
||||
byte SSH2_AGENTC_REMOVE_IDENTITY
|
||||
string key_blob
|
||||
|
||||
Where "key_blob" is encoded as per RFC 4253 section 6.6 "Public Key
|
||||
Algorithms" for any of the supported protocol 2 key types.
|
||||
|
||||
The agent will delete any private key matching the specified public key
|
||||
and return SSH_AGENT_SUCCESS. If no such key was found, the agent will
|
||||
return SSH_AGENT_FAILURE.
|
||||
|
||||
2.4.3 Removing keys loaded from a smartcard
|
||||
|
||||
A client may request that a server remove one or more smartcard-hosted
|
||||
keys using this message:
|
||||
|
||||
byte SSH_AGENTC_REMOVE_SMARTCARD_KEY
|
||||
string reader_id
|
||||
string pin
|
||||
|
||||
"reader_id" the an identifier to a smartcard reader and "pin" is a PIN
|
||||
or passphrase used to unlock the private key(s) on the device.
|
||||
|
||||
When this message is received, and if the agent supports
|
||||
smartcard-hosted keys, it will delete all keys that are hosted on the
|
||||
specified smartcard that may be accessed with the given "pin".
|
||||
|
||||
The agent will reply with a SSH_AGENT_SUCCESS if one or more keys have
|
||||
been successfully removed or a SSH_AGENT_FAILURE if an error occurred.
|
||||
The agent will also return SSH_AGENT_FAILURE if it does not support
|
||||
smartcards.
|
||||
|
||||
2.5 Requesting a list of known keys
|
||||
|
||||
An agent may be requested to list which keys it holds. Different
|
||||
requests exist for protocol 1 and protocol 2 keys.
|
||||
|
||||
2.5.1 Requesting a list of protocol 1 keys
|
||||
|
||||
To request a list of protocol 1 keys that are held in the agent, a
|
||||
client may send the following message:
|
||||
|
||||
byte SSH_AGENTC_REQUEST_RSA_IDENTITIES
|
||||
|
||||
The agent will reply with the following message:
|
||||
|
||||
byte SSH_AGENT_RSA_IDENTITIES_ANSWER
|
||||
uint32 num_keys
|
||||
|
||||
Followed by zero or more consecutive keys, encoded as:
|
||||
|
||||
uint32 bits
|
||||
mpint1 rsa_e
|
||||
mpint1 rsa_n
|
||||
string key_comment
|
||||
|
||||
2.5.2 Requesting a list of protocol 2 keys
|
||||
|
||||
A client may send the following message to request a list of
|
||||
protocol 2 keys that are stored in the agent:
|
||||
|
||||
byte SSH2_AGENTC_REQUEST_IDENTITIES
|
||||
|
||||
The agent will reply with the following message header:
|
||||
|
||||
byte SSH2_AGENT_IDENTITIES_ANSWER
|
||||
uint32 num_keys
|
||||
|
||||
Followed by zero or more consecutive keys, encoded as:
|
||||
|
||||
string key_blob
|
||||
string key_comment
|
||||
|
||||
Where "key_blob" is encoded as per RFC 4253 section 6.6 "Public Key
|
||||
Algorithms" for any of the supported protocol 2 key types.
|
||||
|
||||
2.6 Private key operations
|
||||
|
||||
The purpose of the agent is to perform private key operations, such as
|
||||
signing and encryption without requiring a passphrase to unlock the
|
||||
key and without allowing the private key itself to be exposed. There
|
||||
are separate requests for the protocol 1 and protocol 2 private key
|
||||
operations.
|
||||
|
||||
2.6.1 Protocol 1 private key challenge
|
||||
|
||||
The private key operation used in version 1 of the SSH protocol is
|
||||
decrypting a challenge that has been encrypted with a public key.
|
||||
It may be requested using this message:
|
||||
|
||||
byte SSH_AGENTC_RSA_CHALLENGE
|
||||
uint32 ignored
|
||||
mpint1 rsa_e
|
||||
mpint1 rsa_n
|
||||
mpint1 encrypted_challenge
|
||||
byte[16] session_id
|
||||
uint32 response_type /* must be 1 */
|
||||
|
||||
"rsa_e" and "rsa_n" are used to identify which private key to use.
|
||||
"encrypted_challenge" is a challenge blob that has (presumably)
|
||||
been encrypted with the public key and must be in the range
|
||||
1 <= encrypted_challenge < 2^256. "session_id" is the SSH protocol 1
|
||||
session ID (computed from the server host key, the server semi-ephemeral
|
||||
key and the session cookie).
|
||||
|
||||
"ignored" and "response_type" exist for compatibility with legacy
|
||||
implementations. "response_type" must be equal to 1; other response
|
||||
types are not supported.
|
||||
|
||||
On receiving this request, the server decrypts the "encrypted_challenge"
|
||||
using the private key matching the supplied (rsa_e, rsa_n) values. For
|
||||
the response derivation, the decrypted challenge is represented as an
|
||||
unsigned, big-endian integer encoded in a 32 byte buffer (i.e. values
|
||||
smaller than 2^248 will have leading 0 bytes).
|
||||
|
||||
The response value is then calculated as:
|
||||
|
||||
response = MD5(decrypted_challenge || session_id)
|
||||
|
||||
and returned in the following message
|
||||
|
||||
byte SSH_AGENT_RSA_RESPONSE
|
||||
byte[16] response
|
||||
|
||||
If the agent cannot find the key specified by the supplied (rsa_e,
|
||||
rsa_n) then it will return SSH_AGENT_FAILURE.
|
||||
|
||||
2.6.2 Protocol 2 private key signature request
|
||||
|
||||
A client may use the following message to request signing of data using
|
||||
a protocol 2 key:
|
||||
|
||||
byte SSH2_AGENTC_SIGN_REQUEST
|
||||
string key_blob
|
||||
string data
|
||||
uint32 flags
|
||||
|
||||
Where "key_blob" is encoded as per RFC 4253 section 6.6 "Public Key
|
||||
Algorithms" for any of the supported protocol 2 key types. "flags" is
|
||||
a bit-mask, but at present only one possible value is defined (see below
|
||||
for its meaning):
|
||||
|
||||
SSH_AGENT_OLD_SIGNATURE 1
|
||||
|
||||
Upon receiving this request, the agent will look up the private key that
|
||||
corresponds to the public key contained in key_blob. It will use this
|
||||
private key to sign the "data" and produce a signature blob using the
|
||||
key type-specific method described in RFC 4253 section 6.6 "Public Key
|
||||
Algorithms".
|
||||
|
||||
An exception to this is for "ssh-dss" keys where the "flags" word
|
||||
contains the value SSH_AGENT_OLD_SIGNATURE. In this case, a legacy
|
||||
signature encoding is used in lieu of the standard one. In this case,
|
||||
the DSA signature blob is encoded as:
|
||||
|
||||
byte[40] signature
|
||||
|
||||
The signature will be returned in the response message:
|
||||
|
||||
byte SSH2_AGENT_SIGN_RESPONSE
|
||||
string signature_blob
|
||||
|
||||
If the agent cannot find the key specified by the supplied key_blob then
|
||||
it will return SSH_AGENT_FAILURE.
|
||||
|
||||
2.7 Locking or unlocking an agent
|
||||
|
||||
The agent supports temporary locking with a passphrase to suspend
|
||||
processing of sensitive operations until it has been unlocked with the
|
||||
same passphrase. To lock an agent, a client send the following request:
|
||||
|
||||
byte SSH_AGENTC_LOCK
|
||||
string passphrase
|
||||
|
||||
Upon receipt of this message and if the agent is not already locked,
|
||||
it will suspend processing requests and return a SSH_AGENT_SUCCESS
|
||||
reply. If the agent is already locked, it will return SSH_AGENT_FAILURE.
|
||||
|
||||
While locked, the agent will refuse all requests except
|
||||
SSH_AGENTC_UNLOCK, SSH_AGENTC_REQUEST_RSA_IDENTITIES and
|
||||
SSH2_AGENTC_REQUEST_IDENTITIES. The "request identities" requests are
|
||||
treated specially by a locked agent: it will always return an empty list
|
||||
of keys.
|
||||
|
||||
To unlock an agent, a client may request:
|
||||
|
||||
byte SSH_AGENTC_UNLOCK
|
||||
string passphrase
|
||||
|
||||
If the passphrase matches and the agent is locked, then it will resume
|
||||
processing all requests and return SSH_AGENT_SUCCESS. If the agent
|
||||
is not locked or the passphrase does not match then it will return
|
||||
SSH_AGENT_FAILURE.
|
||||
|
||||
Locking and unlocking affects both protocol 1 and protocol 2 keys.
|
||||
|
||||
3. Protocol message numbers
|
||||
|
||||
3.1 Requests from client to agent for protocol 1 key operations
|
||||
|
||||
SSH_AGENTC_REQUEST_RSA_IDENTITIES 1
|
||||
SSH_AGENTC_RSA_CHALLENGE 3
|
||||
SSH_AGENTC_ADD_RSA_IDENTITY 7
|
||||
SSH_AGENTC_REMOVE_RSA_IDENTITY 8
|
||||
SSH_AGENTC_REMOVE_ALL_RSA_IDENTITIES 9
|
||||
SSH_AGENTC_ADD_RSA_ID_CONSTRAINED 24
|
||||
|
||||
3.2 Requests from client to agent for protocol 2 key operations
|
||||
|
||||
SSH2_AGENTC_REQUEST_IDENTITIES 11
|
||||
SSH2_AGENTC_SIGN_REQUEST 13
|
||||
SSH2_AGENTC_ADD_IDENTITY 17
|
||||
SSH2_AGENTC_REMOVE_IDENTITY 18
|
||||
SSH2_AGENTC_REMOVE_ALL_IDENTITIES 19
|
||||
SSH2_AGENTC_ADD_ID_CONSTRAINED 25
|
||||
|
||||
3.3 Key-type independent requests from client to agent
|
||||
|
||||
SSH_AGENTC_ADD_SMARTCARD_KEY 20
|
||||
SSH_AGENTC_REMOVE_SMARTCARD_KEY 21
|
||||
SSH_AGENTC_LOCK 22
|
||||
SSH_AGENTC_UNLOCK 23
|
||||
SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED 26
|
||||
|
||||
3.4 Generic replies from agent to client
|
||||
|
||||
SSH_AGENT_FAILURE 5
|
||||
SSH_AGENT_SUCCESS 6
|
||||
|
||||
3.5 Replies from agent to client for protocol 1 key operations
|
||||
|
||||
SSH_AGENT_RSA_IDENTITIES_ANSWER 2
|
||||
SSH_AGENT_RSA_RESPONSE 4
|
||||
|
||||
3.6 Replies from agent to client for protocol 2 key operations
|
||||
|
||||
SSH2_AGENT_IDENTITIES_ANSWER 12
|
||||
SSH2_AGENT_SIGN_RESPONSE 14
|
||||
|
||||
3.7 Key constraint identifiers
|
||||
|
||||
SSH_AGENT_CONSTRAIN_LIFETIME 1
|
||||
SSH_AGENT_CONSTRAIN_CONFIRM 2
|
||||
|
||||
$OpenBSD: PROTOCOL.agent,v 1.7 2013/01/02 00:33:49 djm Exp $
|
Binary file not shown.
|
@ -0,0 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"code.google.com/p/go.crypto/ssh/agent"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
agent := agent.NewClient(c)
|
||||
|
||||
log.Println("Listing agent keys")
|
||||
|
||||
log.Println("SSH")
|
||||
keys, err := agent.List()
|
||||
if err != nil {
|
||||
log.Fatalf("Error listing keys: %s", err)
|
||||
}
|
||||
for _, k := range keys {
|
||||
log.Printf("Key: %s", k.String())
|
||||
log.Printf("Type: %s", k.Type())
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAttE00a4G72DrFCOUhxlfxpTGCoIapwXlaTD3xUqNsjEPIkBg
|
||||
4egRWIfLNThA89SaFoe0nAHzpvmVzDzLqiV1dlQZG9NQjudmAu/lRBTp26MLQgT9
|
||||
yl7xFthyTOmkkysTwyS300DXrMjew9rjd7hc4qqxTorLI47lZtZPxdGh+jProOKW
|
||||
KnVJfXw3hfRkp9Y3TAarhcV8brRdlzoroWgxPEiZETRrAkGD+o4DmJozOPoL6L05
|
||||
ZAQ6PIY5IQ+oy0h9rkZ7hwAyUdqzod4EogD9mIAp4UrMQemYgMkxPGwSKHN/1KjU
|
||||
70AcKDayNti8/sGbBig8w35Ju7YuJKWPDBiEFQIDAQABAoIBAAGZ53qGqazjDmwI
|
||||
u75LLJmSqAFjjlQ0KwQ215S0yHTtFRLZuX+BBtXdqpv2uCrqi8byvVQ4eCfYO8Wk
|
||||
Kqx804CL0AR8X1JBGJWiAEIOZNX1oZ2caZSiLD5k+utNiJF5aRzmb228PadXwwu4
|
||||
GRHtXXLmx9LGSG0xTTUPX3d+FIsacYTuukgp73JF2YhNl1N1ThwCZINcYjyfOBHK
|
||||
geDisw8yLIKLb5QCrMKWl1kS3Z6104JfBq00sjm3Yi7JjPfObepP9HXvUB8p0J6d
|
||||
QYeHnqqqr2AkUiUBNXSt6GaxW0Mc5E/mnVzPa+jrKlIe2bmmQZt/lHJZsLT6F4Q0
|
||||
OUdA2cECgYEAz6R7/B0VTLX4PdZ15vU4ao5bSxu3Gxe3IxVB1+PQLJklpBCc6npk
|
||||
akrrt3STYgiWG+Dcz3cD3SKVs9oei5t8s2Db83XVfsBRNFFf9GIVncNc1ia+cJ7/
|
||||
PDcLmmY1U0oByJr4xoyA4wYARuxS5MP3PljFoDCeaU4APX8GQgBgc7kCgYEA4WSn
|
||||
LNUVPAR8CMf3926LQsnu7z3y6HkgmX/txUspb7ji3X1+eL/yKhIKsDFPppAUc/Tn
|
||||
+W6ef7RtcZPFhA1HAiqvwvzAh6YUvnQ6NVzXqb1X73Ub1wf/kBK4pbuP9trOvR2f
|
||||
b95iIq6qt22uDYaeGBqR6VDOdRe5Sbavlmv++T0CgYEAjE2NRWZ1bPcMPOR7mEqc
|
||||
C0wTneWlTRYyNe94JSOXF3uH9psPHFUFItlq+vQ3XieFHmNLcJfvsW1cEEHTH+w5
|
||||
T4+kL5awYswaUqqllqDsEETgpwdY6QpWVl6vZW2onJXGLMGiVa68rCny5/lpbZCI
|
||||
sMUVDE+tymPdP7BkDxboFWkCgYEAwmf9pba41QaNMUnBgGKRrW7dLu+A57EqRBvK
|
||||
qwR0Xg/bZe2LO31b05uyKlyt9d2KNqYDu+oEveOTDobFs2t8VssYxJcbPSUr+UXo
|
||||
uwQ0ZAyTn0+kTqlwbgUnzJNlzibWcNnvHh9zB7UePI5WtQXqsKGiPtra6LYotALl
|
||||
mlnWF4UCgYEAjFJG9KRuIUe9IzXI2XxA22/seaji9qy7/ojHHlo9iwQeb/OPJyp4
|
||||
sCHS/qiMAfMU6r7OyOfnI51TsP3gr92/VKo/RoXn3mxCjFUXe3sIxsArOZ8f38DM
|
||||
ww46KDGHFemUCeycnbjoZcnqO1V8Vdj6FFENPuq0UxS7NW+4dcxlfJE=
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC1jCCAcCgAwIBAgIRAOH/6qnnXARR27iWmrzfGAAwCwYJKoZIhvcNAQEFMBYx
|
||||
FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MDgwNTA3MDc1NFoXDTE3MDcyMDA3
|
||||
MDc1NFowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA
|
||||
A4IBDwAwggEKAoIBAQC20TTRrgbvYOsUI5SHGV/GlMYKghqnBeVpMPfFSo2yMQ8i
|
||||
QGDh6BFYh8s1OEDz1JoWh7ScAfOm+ZXMPMuqJXV2VBkb01CO52YC7+VEFOnbowtC
|
||||
BP3KXvEW2HJM6aSTKxPDJLfTQNesyN7D2uN3uFziqrFOissjjuVm1k/F0aH6M+ug
|
||||
4pYqdUl9fDeF9GSn1jdMBquFxXxutF2XOiuhaDE8SJkRNGsCQYP6jgOYmjM4+gvo
|
||||
vTlkBDo8hjkhD6jLSH2uRnuHADJR2rOh3gSiAP2YgCnhSsxB6ZiAyTE8bBIoc3/U
|
||||
qNTvQBwoNrI22Lz+wZsGKDzDfkm7ti4kpY8MGIQVAgMBAAGjIzAhMA4GA1UdDwEB
|
||||
/wQEAwIApDAPBgNVHRMBAf8EBTADAQH/MAsGCSqGSIb3DQEBBQOCAQEAqtoYLEuU
|
||||
unbLo48zOHVSit3tIAnZ775HDy1WWK7zL1AzbB0WwT1nrB+SzR8lGzYmmPJB8sCH
|
||||
rHBPpCF8MNqpZI8NOkS4ZcLF+WJhcZRnIsSz64lzFQThu2rAK7fhheAo3ONZYAYJ
|
||||
DkZZrF7IMH3vMaQGZESBKuYiTstz+xMRTXxDQdgdpKZ1q2XB9wBeHLu6HRSh0cR4
|
||||
0Ehs+NCFj50JkgzrxdXgSOlrSk2Icztb1MH89CuRPZNEbJbd1cU/kekxtkLzDtmX
|
||||
Kai2FoHdonB1m5IfBJ7n2h5sexs73Qymohc4W81OZivdB/I+OixMjbc3Pn65KmWA
|
||||
foD/K4Atw1090w==
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC+DCCAeKgAwIBAgIQHxTMkfKCDjxUR1lji0Hs/TALBgkqhkiG9w0BAQUwFjEU
|
||||
MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQwODA1MDYwMTU0WhcNMTcwNzIwMDYw
|
||||
MTU0WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBALgW92LOxgrH59EoMJTsBDuKp2w2vOaY+WT4UduqxWTtPaQv
|
||||
aeGmK4/1T562prRIG/dH9IR/wYR3fYXKUkcHP6FaPTUnGEhUcnjg8vFMwoYnGLUo
|
||||
nCBK0HiZMPKkjnWzkiVJgdpQt3pLIRAomT0ehtP6LfMsrJoof8MCI+anu450hV/6
|
||||
VATtmxy8FnivfiLFlroolPXKp5kQck7nJBhsIOY4BDT8qgehGXtpJuqZaBrhCY2S
|
||||
7sAzIZtemWYFQNvwJDsMmsMNBMur4SwA/kST0kTlczH2vI423CeTa2PsrZmoFQvt
|
||||
whohfdLTH1Ul3bbtCPnDD2StkqUs9/JTigyEaj8CAwEAAaNGMEQwDgYDVR0PAQH/
|
||||
BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDwYDVR0R
|
||||
BAgwBocEfwAAATALBgkqhkiG9w0BAQUDggEBAISM38fkqTAg5lP/fpgW6vmNNW+U
|
||||
VeQ/ZlXwQxzu+gmIli+PgPy/QM98xxs+SR3NwWTlh5yHDKHYPbmc1FRV5q8U1zvf
|
||||
DqDmyDEQTMCwWMOBRKwfGcmzV4WlZvxR96XQEpo0zfP61eMo7ditRraDD1PMBNGv
|
||||
4QqGJADKKHfqm53eS1tzd0MSBteah59OYLzF0hjXT/NL9P00XaotRznFmsjmura6
|
||||
0Xs6EKrudPTAdfCWBwundclq7d9BofU8/kBKwE8J7zCI6KA3fpGMutMojVIopIFO
|
||||
Bowka1TKV+2+8Nt1gqIvpvlX4E119BemIA/m9uZiSdYahffHniKBFiXsr8Y=
|
||||
-----END CERTIFICATE-----
|
27
Godeps/_workspace/src/github.com/docker/libtrust/b2d/client-key.pem
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libtrust/b2d/client-key.pem
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAvv7GjStV0fiOc1wf1hm2B3YGwTp6W0gCpYpOqEotetVOBf5c
|
||||
/9qbn4ihOZJK2GY69z6SchttmAL87EfhzFQjDifv0skjKHJkspyi80OJYeM+Tlj8
|
||||
4kHI3WXkD9K9UPO3JGkeNrTjOKIXbcQDOgYkDl6wYW6bdSIEyjxQ4+0jTZX0wYD4
|
||||
anSNJJXYA9UWq8B0GiwIrRF1378AjZzGyOuBhw1nj6AfAgsgVmNPu+Dh/5nteKEe
|
||||
HVz+iUOsiUmzZQ/4y+XxyFqe00+6gzF7dN18cRLatj9ifIsiSyEZUeF0vsqKyzG4
|
||||
HVG9o2R5uZoBOvyNMpUlxv9dVaSqCjHB3rELmwIDAQABAoIBAQCS6072lug8TXPe
|
||||
v3Xp7vs8QAkwKHiosPsANiUNhYn4nA6Zw7HsSlA95w8GR0d1sSYYvLRQPFMQhPPk
|
||||
GeNSl0IkCIqN3ZVC7sQRNTaP5FrQxspN4Fwe+7W0wUUu+tAQbnlp88Z/FKRZEy7s
|
||||
i8RjpQelWidZu4iwEMwYeU9MFUV7ztK5U+9NScu8d4svVbqZuBDrZiofBD++MYN4
|
||||
SvIzXCku+r67W2nN1a94/87MWGTUr8oGWbkvRIJ/23ifnvL7cQeqJ08VPr8BIj5w
|
||||
4su14xpITXEH02WpgTruEDrk2DO6wfoqAGJ/fZHRkDVIFSXd5miwurKQmL1Iursk
|
||||
FU98mPsBAoGBAOldZ9HDFX69+XGW4u80qkkaCHGrrKRDSHA1HfNEOu1nhyMD1vXW
|
||||
8xwOJ5H8yBrEMTSP2N8bdeEp8m/aKuleUpzIjxNyIbiw0m4v3p+lpbgEVKfC8L/B
|
||||
umU4HtRYgujvOfUFWEnvonQh9k1J/N5Ym93xXgs4WXzRG2ABeEmlsjGBAoGBANGF
|
||||
Ti2MdQYXf51Hgsw9oADiyEimbw6wEsLvK1sSA+lscTCyBdZyD+5PntnC5jcUtg1p
|
||||
moVFxHkyatVE3TffchTwjPgKAv0zKh3e6w9mJhRd1c9vNaTiIqPr0MyaftrZ1tSH
|
||||
DnAK8Lisd3zODaVawbPyOBPFfUnqWzYqIsPpWVMbAoGBAJOuWr8yV+4HR06NEDwU
|
||||
9hag9OsKm4ywT61C0Btc+HW46JpOGcEmMF92za/jarcVpZX/2l0GwcimhRgn1rQs
|
||||
ztcNTQ4fUhvROrokbRxW9dLIWtJ32WdgsBbSNyBmiyHiEqluonrXvdUEkRxEUFRd
|
||||
evYvgzZSiL8mjc5p37ertmMBAoGAWAuAsrtXYdv2EI3AdTtgRX8HUyA9gxqSh8Ah
|
||||
o/+KDUk4t5HXyincb/SdeDziqTrW1gQOnvqWeaeqJTzvCU+ojQwY8RKj9urNRAgt
|
||||
FyeB0QUtmCHPlR5CGEg8Uf6KWEU6dczbUFIInlI7VALy0Q22YHvk/Mn8wFbvRW+n
|
||||
dFRW6QkCgYEAtOoFUZSlJkU6LleX+M7dS6Jbd44SbCb+acirbl6eHKxEDLL2+MRM
|
||||
qZui45SqnCFnyWo8cgW4xqFeJVGoeTDFZK65Qup1IsBlrPODmTes5cSMPGdZ3brG
|
||||
BVCVcmuygGkMC/tGOmydVStZx0Spt0va96p/Na2x4akdscZKVTCCap4=
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC4DCCAcqgAwIBAgIRAJ6X6zjqpxM03GqlIXCYtpIwCwYJKoZIhvcNAQEFMBYx
|
||||
FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MDgwNTA3MDc1OFoXDTE3MDcyMDA3
|
||||
MDc1OFowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA
|
||||
A4IBDwAwggEKAoIBAQC+/saNK1XR+I5zXB/WGbYHdgbBOnpbSAKlik6oSi161U4F
|
||||
/lz/2pufiKE5kkrYZjr3PpJyG22YAvzsR+HMVCMOJ+/SySMocmSynKLzQ4lh4z5O
|
||||
WPziQcjdZeQP0r1Q87ckaR42tOM4ohdtxAM6BiQOXrBhbpt1IgTKPFDj7SNNlfTB
|
||||
gPhqdI0kldgD1RarwHQaLAitEXXfvwCNnMbI64GHDWePoB8CCyBWY0+74OH/me14
|
||||
oR4dXP6JQ6yJSbNlD/jL5fHIWp7TT7qDMXt03XxxEtq2P2J8iyJLIRlR4XS+yorL
|
||||
MbgdUb2jZHm5mgE6/I0ylSXG/11VpKoKMcHesQubAgMBAAGjLTArMA4GA1UdDwEB
|
||||
/wQEAwIAoDAMBgNVHRMBAf8EAjAAMAsGA1UdEQQEMAKCADALBgkqhkiG9w0BAQUD
|
||||
ggEBAKXqTtDj/F2AJj55trGsk/fc3LtYTOeq0I9JB45hNOjpixpBdsS1VxkxdTM5
|
||||
3wgUVEUyjQzBWtSBO5jRrDjOHsXqtqh83+Jh+GwUAf3mncGSss7JJ78kYe4zBxSi
|
||||
tgMc3KUi82ppoeiB0xsaEXfBRf6U+wJNVATpabJPeveMhaRVAd9gE5aLWrkZ09+b
|
||||
wR5y4YXZ0LncqWYnq03BAPn0eTDWgzJvXaSSyE9wFF847T9XRicwzsgTNXzqb4PU
|
||||
5s23GHfkIIv6PQzOUSpYTyCVWBdOk2qnhyUkeYCX+ZfwvJlb2NIwQjMwaysX6ZdJ
|
||||
T2q+prcDXnzOhwpsqUbTOD8RxOM=
|
||||
-----END CERTIFICATE-----
|
173
Godeps/_workspace/src/github.com/docker/libtrust/b2d/generate_cert.go
generated
vendored
Normal file
173
Godeps/_workspace/src/github.com/docker/libtrust/b2d/generate_cert.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
// Usage:
|
||||
// Generate CA
|
||||
// ./generate_cert --cert ca.pem --key ca-key.pem
|
||||
// Generate signed certificate
|
||||
// ./generate_cert --host 127.0.0.1 --cert cert.pem --key key.pem --ca ca.pem --ca-key ca-key.pem
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for")
|
||||
certFile = flag.String("cert", "", "Output file for certificate")
|
||||
keyFile = flag.String("key", "", "Output file for key")
|
||||
ca = flag.String("ca", "", "Certificate authority file to sign with")
|
||||
caKey = flag.String("ca-key", "", "Certificate authority key file to sign with")
|
||||
)
|
||||
|
||||
const (
|
||||
RSABITS = 2048
|
||||
VALIDFOR = 1080 * 24 * time.Hour
|
||||
ORG = "Boot2Docker"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *certFile == "" {
|
||||
log.Fatalf("Missing required parameter: --cert")
|
||||
}
|
||||
|
||||
if *keyFile == "" {
|
||||
log.Fatalf("Missing required parameter: --key")
|
||||
}
|
||||
|
||||
if *ca == "" {
|
||||
if *caKey != "" {
|
||||
log.Fatalf("Must provide both --ca and --ca-key")
|
||||
}
|
||||
if err := GenerateCA(*certFile, *keyFile); err != nil {
|
||||
log.Fatalf("Failured to generate CA: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := GenerateCert(strings.Split(*host, ","), *certFile, *keyFile, *ca, *caKey); err != nil {
|
||||
log.Fatalf("Failured to generate cert: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newCertificate creates a new template
|
||||
func newCertificate() *x509.Certificate {
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(time.Hour * 24 * 1080)
|
||||
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate serial number: %s", err)
|
||||
}
|
||||
|
||||
return &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{ORG},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
//ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateCA generates a new certificate authority
|
||||
// and stores the resulting certificate and key file
|
||||
// in the arguments.
|
||||
func GenerateCA(certFile, keyFile string) error {
|
||||
template := newCertificate()
|
||||
template.IsCA = true
|
||||
template.KeyUsage |= x509.KeyUsageCertSign
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, RSABITS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certOut, err := os.Create(certFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
|
||||
keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateCert generates a new certificate signed using the provided
|
||||
// certificate authority files and stores the result in the certificate
|
||||
// file and key provided. The provided host names are set to the
|
||||
// appropriate certificate fields.
|
||||
func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile string) error {
|
||||
template := newCertificate()
|
||||
for _, h := range hosts {
|
||||
if ip := net.ParseIP(h); ip != nil {
|
||||
template.IPAddresses = append(template.IPAddresses, ip)
|
||||
} else {
|
||||
template.DNSNames = append(template.DNSNames, h)
|
||||
}
|
||||
}
|
||||
|
||||
tlsCert, err := tls.LoadX509KeyPair(caFile, caKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, RSABITS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, x509Cert, &priv.PublicKey, tlsCert.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certOut, err := os.Create(certFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
|
||||
keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAuBb3Ys7GCsfn0SgwlOwEO4qnbDa85pj5ZPhR26rFZO09pC9p
|
||||
4aYrj/VPnramtEgb90f0hH/BhHd9hcpSRwc/oVo9NScYSFRyeODy8UzChicYtSic
|
||||
IErQeJkw8qSOdbOSJUmB2lC3ekshECiZPR6G0/ot8yysmih/wwIj5qe7jnSFX/pU
|
||||
BO2bHLwWeK9+IsWWuiiU9cqnmRByTuckGGwg5jgENPyqB6EZe2km6ploGuEJjZLu
|
||||
wDMhm16ZZgVA2/AkOwyaww0Ey6vhLAD+RJPSROVzMfa8jjbcJ5NrY+ytmagVC+3C
|
||||
GiF90tMfVSXdtu0I+cMPZK2SpSz38lOKDIRqPwIDAQABAoIBAC6B9tvqm1Pr7yAD
|
||||
RErLWcJlJCkNpymm6hLdPWj+usHlwdXx+JD+dzD2a7gQMwuG0DHn5tl2oUBDI94i
|
||||
ICk7ppKwBpigGN1lIEpzokzd9KMJy48xBEiQPhzJrvkOI8OOq1RqPrkV/VSTGHZJ
|
||||
m3U6ehuhr+wolC6aHrT4nHNQWu0BCqnLqbiwo1cDfH8bgKD+EF6uRZHuZOITFkY2
|
||||
rMhhI/gkVwVjDbyyJhIDALUkV3ovpZ4L+P9+8XJd7s6Y8gZjI9Ti2z8X2sUny4NS
|
||||
zbLuWYOaeHdFuGzTvFf48YNnB1sf8R/f/n3DkgDos+FkPLmTO0MNnXPmBB3R+B6R
|
||||
KzuLdikCgYEA85rRBuUsp3rNxxck4rNYIVg4/TcGUcQ959MrSadnB8QOJdBGUash
|
||||
XitPzHaAMYwsFAyb36qdhGtmqTQHtjYIl6alehN5g1g/clFceFu4riocdd/KKvVe
|
||||
qe6GagJx8BVDR8Oon61CRbi4/0LhYYfbf4ezN1PKbxlkfa5WMDtoH3MCgYEAwXTo
|
||||
7RH8ue9J5cSmtImX9Q3YVZy4gooYp4/teFX+da7cT2RbAsDoxFMY9Ka0ORHOFl1c
|
||||
74UIlqB2VdUhitgLWuqLhcaFFPyTbsJsuKi8RcP7sAA1YviweOsehBq7zpmaR7oF
|
||||
Ldb8It2W37PjIt1C2M5muyztd5FRLP63c5CnvwUCgYAK17g4C1lKdw/TrbcJlBv3
|
||||
F/spYhqY5xguSlrh416VnOdYTYXjuq84hsr9ecTI78lCdzE06l4qd7FRFKzHMm59
|
||||
eYBiB53f97yTNQXdoY99yQgNQxG3icZV3/UBoOw7WGbvOck2mjqZ6dfqHKr1cVuW
|
||||
uI6Ehuk6urGbWnghm3NN+wKBgQCkuSJYMleqDnkyvsZftUUBcP+CrkZhSKsDikbe
|
||||
jwJzPCf1JfNkndPUzxjRwFx2t5Vub6Lhg7ozX4BDR44fBiRtDocsqkPJAeObjMs8
|
||||
VNiy4tSdISBUHINPAoxiWc0OdrXqWxdr18GrAlkkM6y0lAgGSYEp4XB1vfOLx3t3
|
||||
WHWs7QKBgQCkTGUse1vFTfCzA5rTFzBWJM7bdaUCIymU2BQPbRFSgdx3kKbmnrab
|
||||
fOLOaVxMWAOAJ7PF1LlOyUlesLEJzqO+pYgRgnG3d6qz99Ytz3sk7uzVr3n/Ic68
|
||||
TwYlXLHGQmP7OyR2tMPVI0rSb5g5lbRdCOqMbHN+6ce10KcibdBF/g==
|
||||
-----END RSA PRIVATE KEY-----
|
27
Godeps/_workspace/src/github.com/docker/libtrust/b2d/server-key.pem
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libtrust/b2d/server-key.pem
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAqS5MaKMuR9OWhPbGw/tc1JmJgtEtNp5dSx5hlPEkSY8dsi5Z
|
||||
uFZvCqYK7uaNUlap4/ytlFDIozOnZs+MrgcM5Ek/nPNtol+pts777H3Xo3oChRnc
|
||||
X1/JXQ1tdVTT//LvUJmtkg6V6+iORuMnTMpia+Sl+7cgdW3EElfzPMY8J9RIUPBE
|
||||
NyedJyp3K+RM/m+KOzmYqkHcrKhhblbjLOr5a0zu+D8ilCG87QDAUUf1gbXOUi/S
|
||||
CYH/9v20sUbPqHr8Z1wuJLeNvKPFrdC2+mtJyhqyMaH7tgsyKOmqeHxQlkNbril2
|
||||
tvZRAzqaTDDYMfJDKAwEk345CJW1avkk6GBc2wIDAQABAoIBAHJGn74eW2iQ958g
|
||||
weono/DHNSdz+8Jk/kd7YhalPAmj7j8pJ9loKOFcfw4YFWWEvIUPbIA+gQgcH/tG
|
||||
PpRelsqs35c4ptmDHLQq4+g+qKX4CDO34C9cGDwOkd0rS3gbtKMQ6zk5OPN8xn4D
|
||||
ecxY+FpAvA1nCTnIxyRM3DQ1AvUzSe+UpLtdc/vliCJiFVpAtpok6fVo/Cxevf51
|
||||
dU1BIgZcJjsmro2/GR7e7l4c0C+ZHV0Perkdfw1fqZvpj0u3ca1dfwuWUVGc7VYf
|
||||
v7D5aFH8tw5bGHdM2ajPn3mpgJ+BNuIOdG1ipiUmrv4NoWNmyd2on9+Jy+2tILOx
|
||||
dcPKqwECgYEAz5/Y6qUuhp+23t4hynbD1YCX4RZtwtuqUef7BxeaRSocQrK15zlZ
|
||||
iOYeZe7/F6b/ueuN9q64lQBy8HvtWphNifhaXAW7SVwrzNDnbZWFVaQB7+Srj53u
|
||||
/sOgEKDgi5LoEUH4aQV1prMyajWNGkBX0lmdGELkyisTvvsu/BN8S/sCgYEA0Jlp
|
||||
lO0rT5xUsM0EsQvPmgSz3/+bGEgmmNoBt2Fnp7JuVsfJQDujFUvi2tQ2seDbVupc
|
||||
+LSbPudIUIdPch450IGQSZFLiN7fAePMXIUdUuZ6amHEz6+dtdl+kjlzUmmE7/VP
|
||||
M/o8A0F0Qkn3LoU6yH8fesJ9p6JCzIpJSJuFfKECgYA0v32Sj4i0dxs0n+ah026J
|
||||
bXQooVQdb0VkLbWe5aYx8DLh1xlTt0RB/YS3jPKOBcVubKPSii0m3chVxIZWv97j
|
||||
MI90VZhxWNNf46sIMjZ/vX0of0X+5Lb1Tqn5z4V8sEP5LnN36wUq5tfmF9jTVIl7
|
||||
TX0Vztjla/BGUvZq4GBxxQKBgEyXN5QN7OwjK1lrPYDAJG6ZsW3ajRjCTSGsUhsB
|
||||
8aw6MJc+bd9exkdogJf2eUqglAh6rr5GmgXjp8KhnAL7pCCxocfRSFmt81XfTUbf
|
||||
PWAV7fanhTr4cUC25elMnNIymjP87yLXizdqzgQU4mQ7WNULEATj6n0lb79oOvYk
|
||||
3ENhAoGBAKjsl7MDi7WokpshGkEIPTxccefWPPx6wtdvDw1Yy6nrc/4KapsIvRZc
|
||||
u9r9GBEodxOFbuPKFqsTehpMuSMaWuPhHs6txP3/q1q58rpeqmXD29zcdDTuHJoS
|
||||
ZKqBPsot328/q38DwqavP9PZsuLQ/M95uXwYR4tnNnbbQwDqn61B
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC5DCCAc6gAwIBAgIRAL3D43hlFPxMmrICmKJjxn4wCwYJKoZIhvcNAQEFMBYx
|
||||
FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MDgwNTA3MTAwM1oXDTE3MDcyMDA3
|
||||
MTAwM1owFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA
|
||||
A4IBDwAwggEKAoIBAQCpLkxooy5H05aE9sbD+1zUmYmC0S02nl1LHmGU8SRJjx2y
|
||||
Llm4Vm8Kpgru5o1SVqnj/K2UUMijM6dmz4yuBwzkST+c822iX6m2zvvsfdejegKF
|
||||
GdxfX8ldDW11VNP/8u9Qma2SDpXr6I5G4ydMymJr5KX7tyB1bcQSV/M8xjwn1EhQ
|
||||
8EQ3J50nKncr5Ez+b4o7OZiqQdysqGFuVuMs6vlrTO74PyKUIbztAMBRR/WBtc5S
|
||||
L9IJgf/2/bSxRs+oevxnXC4kt428o8Wt0Lb6a0nKGrIxofu2CzIo6ap4fFCWQ1uu
|
||||
KXa29lEDOppMMNgx8kMoDASTfjkIlbVq+SToYFzbAgMBAAGjMTAvMA4GA1UdDwEB
|
||||
/wQEAwIAoDAMBgNVHRMBAf8EAjAAMA8GA1UdEQQIMAaHBMCoO2gwCwYJKoZIhvcN
|
||||
AQEFA4IBAQC0ttuFbfSrzGdDcO/442nfpzQdO9wN0nee5OazHHTZlA+SaQ4VyZoS
|
||||
LeCdP+rRPIX0qmJ0Bf95nvkL7Mideg6IQsTTWz3e2pfcIdSZ1Gc9Bzx4cGf2yWWt
|
||||
mwi2uLAbAOKznNh8Ndc45zG614QYo3Cli/p91y+zJyDhS3ucERGCwC9ru+ngHBp4
|
||||
KrAfH4AUNRi5JRt0vHn3MXbl8Xmymt5FHeOtEM9iXCFBMtejYcngceOz8bRPXke2
|
||||
86TExl86jIHjAItEqydANhG7wNIylNOdXGCgQ4yQX1ImNiY2I4n7FeLh8vu8nCbR
|
||||
zwPv/s2m4xsr4ilA6xLq9Ur8K7FK4Q4a
|
||||
-----END CERTIFICATE-----
|
BIN
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/chainsign
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/chainsign
generated
vendored
Normal file
Binary file not shown.
9
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/ca.pem
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/ca.pem
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBNTCB3KADAgECAgEAMAoGCCqGSM49BAMCMBIxEDAOBgNVBAMTB0NBIFJvb3Qw
|
||||
HhcNMTQwOTIyMTYzNzU4WhcNMTQwOTI5MTYzNzU5WjASMRAwDgYDVQQDEwdDQSBS
|
||||
b290MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEaS6UJn72kZaD1U1+9wXVFtk/
|
||||
lqSOzq+snB6qslXpLE2C+6N1NjDoivZ87CPzJt6VYgeS+iZ63+AlH5Uiz+q1SKMj
|
||||
MCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwID
|
||||
SAAwRQIhALxyd72Aev/IAvXZpx7TLbLrGMrZGTEMjXMi1Gda9MAFAiA82SUmu3dR
|
||||
9d0VWY74lOlHsojsoi3keb4PeOMyyFFFaQ==
|
||||
-----END CERTIFICATE-----
|
10
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/cert.pem
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/cert.pem
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBPTCB5KADAgECAgEAMAoGCCqGSM49BAMCMBcxFTATBgNVBAMTDEludGVybWVk
|
||||
aWF0ZTAeFw0xNDA5MjIxNjM3NThaFw0xNDA5MjkxNjM3NTlaMBUxEzARBgNVBAMT
|
||||
ClRydXN0IENlcnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASR+jpYA6SPRkq+
|
||||
2B9FH23Qrqgsn2ScgfiU73PN9Kgn0xyStp2RHoDhyKoQKU5T1TPOdWxVan3+ljUx
|
||||
/O/1pKdSoyMwITAOBgNVHQ8BAf8EBAMCAIAwDwYDVR0TAQH/BAUwAwEB/zAKBggq
|
||||
hkjOPQQDAgNIADBFAiEA2NpSs22EiYwbZlkjcVuAryJlBbnRzYvX7IIkk9fSRYQC
|
||||
IH4YfZC3tvUWFxc4Spvic2uCHazAu+0Fzjs53OJxwirZ
|
||||
-----END CERTIFICATE-----
|
||||
---
|
45
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/chain.pem
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/chain.pem
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBPzCB5qADAgECAgEAMAoGCCqGSM49BAMCMBcxFTATBgNVBAMTDEludGVybWVk
|
||||
aWF0ZTAeFw0xNDA5MjIxNjM3NThaFw0xNDA5MjkxNjM3NTlaMBcxFTATBgNVBAMT
|
||||
DEludGVybWVkaWF0ZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABMac+uvHxGGH
|
||||
xCisqvN5+ZO6w9Q3BlPXu5r2GVXmo07DX15Dj894S4ByNMo8iluDyqK/8JS/1cq1
|
||||
S+Q0CE6yRw6jIzAhMA4GA1UdDwEB/wQEAwIABjAPBgNVHRMBAf8EBTADAQH/MAoG
|
||||
CCqGSM49BAMCA0gAMEUCIDHx2NPcyo4JiBKFRdCZiLTyncBDJIlWzOhprFbmm914
|
||||
AiEAjbxeVHWiO+Q/7nzQXpGzsnszaMdmL1LLSQuTf9MX+bc=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBPzCB5qADAgECAgEAMAoGCCqGSM49BAMCMBcxFTATBgNVBAMTDEludGVybWVk
|
||||
aWF0ZTAeFw0xNDA5MjIxNjM3NThaFw0xNDA5MjkxNjM3NTlaMBcxFTATBgNVBAMT
|
||||
DEludGVybWVkaWF0ZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJJo+KFt18Og
|
||||
0hbUmxnxBTBHg0FIxlWJMac1iNnqlH6hnPZf1a1uJJm/iNZNPxd+jHh+ZjInvTmU
|
||||
Fg7DiDXiyuOjIzAhMA4GA1UdDwEB/wQEAwIABjAPBgNVHRMBAf8EBTADAQH/MAoG
|
||||
CCqGSM49BAMCA0gAMEUCIHGyXQESb6pph5gz4ppgfD4br9F5TYM2/Fap+IK1Jhm1
|
||||
AiEAxamUnj4AWIHut2T5R3Xg2tn0q5JDtsB3drZ/MXoMepI=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBPzCB5qADAgECAgEAMAoGCCqGSM49BAMCMBcxFTATBgNVBAMTDEludGVybWVk
|
||||
aWF0ZTAeFw0xNDA5MjIxNjM3NThaFw0xNDA5MjkxNjM3NTlaMBcxFTATBgNVBAMT
|
||||
DEludGVybWVkaWF0ZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNIvT/bkf6rm
|
||||
RYNE5e53izqdm9HHSBXlSFQGWShDrmYmlJbJrzE7gdd6dWMaTi3TzUGJzVcvASNJ
|
||||
/tkZZW2uMaKjIzAhMA4GA1UdDwEB/wQEAwIABjAPBgNVHRMBAf8EBTADAQH/MAoG
|
||||
CCqGSM49BAMCA0gAMEUCIQCYu0lvqmvj+99qiyOc/vJWhT/oIKUu+h8dIU3bULt1
|
||||
AwIgKiMptAa9IJfXa+VvleIdWpdldJa5g1OvRaP+ANQ/MGA=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBPjCB5qADAgECAgEAMAoGCCqGSM49BAMCMBcxFTATBgNVBAMTDEludGVybWVk
|
||||
aWF0ZTAeFw0xNDA5MjIxNjM3NThaFw0xNDA5MjkxNjM3NTlaMBcxFTATBgNVBAMT
|
||||
DEludGVybWVkaWF0ZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABKx0nRr6QlXX
|
||||
xYLe1+BmJkI3moUdYxJr/6xfhnqJTPhk7UR8/h69BlAX7PE5Cp1j7q4bCoAAdARN
|
||||
csjpOKn1tpOjIzAhMA4GA1UdDwEB/wQEAwIABjAPBgNVHRMBAf8EBTADAQH/MAoG
|
||||
CCqGSM49BAMCA0cAMEQCIConoQP4XuaCoobZD3v0t0/WROD/wVDUBu5xILa8sSKe
|
||||
AiBJCWa8cXi2U6z6cc/RKcRaDOTmL+1Cs38aRUiU/+VCPw==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBOjCB4aADAgECAgEAMAoGCCqGSM49BAMCMBIxEDAOBgNVBAMTB0NBIFJvb3Qw
|
||||
HhcNMTQwOTIyMTYzNzU4WhcNMTQwOTI5MTYzNzU5WjAXMRUwEwYDVQQDEwxJbnRl
|
||||
cm1lZGlhdGUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQsGtLo05ztLyhrzzMu
|
||||
bziiYR7Y1qxhm6DlV9kaa7DHyuLpNOsEKNkW6gx/9g1AxVfDv2dcuoqnCErawdab
|
||||
+jgDoyMwITAOBgNVHQ8BAf8EBAMCAAYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjO
|
||||
PQQDAgNIADBFAiEAy+WumOj7I8iIhQNf+NQfV+LNBBoy4ufeeyaPGcRG3hICIFMW
|
||||
th+PDs1HgdeQvG1tPP9d7TBfMu7k1lVlCf7KcvKN
|
||||
-----END CERTIFICATE-----
|
BIN
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/generatecerts
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/generatecerts
generated
vendored
Normal file
Binary file not shown.
5
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/key.pem
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/key.pem
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIDmjTNf5Q5kuGCFrfsa60qLUKZDoSuIgS+GKOb9vRWFJoAoGCCqGSM49
|
||||
AwEHoUQDQgAEkfo6WAOkj0ZKvtgfRR9t0K6oLJ9knIH4lO9zzfSoJ9MckradkR6A
|
||||
4ciqEClOU9UzznVsVWp9/pY1Mfzv9aSnUg==
|
||||
-----END EC PRIVATE KEY-----
|
161
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/main.go
generated
vendored
Normal file
161
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/generatecerts/main.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
func generateTrustCA() (libtrust.PrivateKey, *x509.Certificate) {
|
||||
key, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "CA Root",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(24 * 7 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, cert,
|
||||
key.CryptoPublicKey(), key.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return key, cert
|
||||
}
|
||||
|
||||
func generateIntermediate(key libtrust.PublicKey, parentKey libtrust.PrivateKey, parent *x509.Certificate) *x509.Certificate {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "Intermediate",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(24 * 7 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, parent,
|
||||
key.CryptoPublicKey(), parentKey.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func generateTrustCert(key libtrust.PublicKey, parentKey libtrust.PrivateKey, parent *x509.Certificate) *x509.Certificate {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "Trust Cert",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(24 * 7 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, parent,
|
||||
key.CryptoPublicKey(), parentKey.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func generateTrustChain(key libtrust.PrivateKey, ca *x509.Certificate) (libtrust.PrivateKey, []*x509.Certificate) {
|
||||
parent := ca
|
||||
parentKey := key
|
||||
chain := make([]*x509.Certificate, 6)
|
||||
for i := 5; i > 0; i-- {
|
||||
intermediatekey, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chain[i] = generateIntermediate(intermediatekey, parentKey, parent)
|
||||
parent = chain[i]
|
||||
parentKey = intermediatekey
|
||||
}
|
||||
trustKey, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chain[0] = generateTrustCert(trustKey, parentKey, parent)
|
||||
|
||||
return trustKey, chain
|
||||
}
|
||||
|
||||
func main() {
|
||||
caKey, caCert := generateTrustCA()
|
||||
key, chain := generateTrustChain(caKey, caCert)
|
||||
|
||||
caf, err := os.OpenFile("ca.pem", os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening ca.pem: %s", err)
|
||||
}
|
||||
defer caf.Close()
|
||||
pem.Encode(caf, &pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})
|
||||
|
||||
chainf, err := os.OpenFile("chain.pem", os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening ca.pem: %s", err)
|
||||
}
|
||||
defer chainf.Close()
|
||||
for _, c := range chain[1:] {
|
||||
pem.Encode(chainf, &pem.Block{Type: "CERTIFICATE", Bytes: c.Raw})
|
||||
}
|
||||
|
||||
certf, err := os.OpenFile("cert.pem", os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening ca.pem: %s", err)
|
||||
}
|
||||
defer certf.Close()
|
||||
pem.Encode(certf, &pem.Block{Type: "CERTIFICATE", Bytes: chain[0].Raw})
|
||||
|
||||
err = libtrust.SaveKey("key.pem", key)
|
||||
if err != nil {
|
||||
log.Fatalf("Error saving key: %s", err)
|
||||
}
|
||||
|
||||
}
|
1
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/keys.txt
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/keys.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL derek
|
184
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/sign.go
generated
vendored
Normal file
184
Godeps/_workspace/src/github.com/docker/libtrust/chainsign/sign.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
var ca string
|
||||
var chain string
|
||||
var cert string
|
||||
var signKey string
|
||||
var validKeys string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&ca, "ca", "", "Certificate authorities (pem file)")
|
||||
flag.StringVar(&chain, "chain", "", "Certificate chain to include (pem file)")
|
||||
flag.StringVar(&cert, "cert", "", "Certificate used to sign")
|
||||
flag.StringVar(&signKey, "k", "", "Private key to use for signing (pem or JWS file)")
|
||||
flag.StringVar(&validKeys, "keys", "", "File containing list of valid keys and namespaces")
|
||||
}
|
||||
|
||||
func LoadValidKeys(filename string) (map[string][]string, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validKeys := make(map[string][]string)
|
||||
r := bufio.NewScanner(f)
|
||||
for r.Scan() {
|
||||
parts := strings.Split(r.Text(), " ")
|
||||
if len(parts) < 2 {
|
||||
return nil, errors.New("Invalid line input: expecting <KeyId> <namespace> ...")
|
||||
}
|
||||
validKeys[parts[0]] = parts[1:]
|
||||
}
|
||||
return validKeys, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if ca == "" {
|
||||
log.Fatal("Missing ca")
|
||||
}
|
||||
if chain == "" {
|
||||
log.Fatalf("Missing chain")
|
||||
}
|
||||
if cert == "" {
|
||||
log.Fatalf("Missing certificate")
|
||||
}
|
||||
if signKey == "" {
|
||||
log.Fatalf("Missing key")
|
||||
}
|
||||
if validKeys == "" {
|
||||
log.Fatalf("Missing valid keys")
|
||||
}
|
||||
|
||||
caPool, err := libtrust.LoadCertificatePool(ca)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading ca certs: %s", err)
|
||||
}
|
||||
|
||||
chainCerts, err := libtrust.LoadCertificateBundle(chain)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading chain certificates; %s", err)
|
||||
}
|
||||
chainPool := x509.NewCertPool()
|
||||
for _, cert := range chainCerts {
|
||||
chainPool.AddCert(cert)
|
||||
}
|
||||
|
||||
signCert, err := tls.LoadX509KeyPair(cert, signKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading key: %s", err)
|
||||
}
|
||||
if signCert.Certificate == nil {
|
||||
log.Fatalf("Signed Cert is empty")
|
||||
}
|
||||
|
||||
validKeyMap, err := LoadValidKeys(validKeys)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading valid keys: %s", err)
|
||||
}
|
||||
|
||||
verifyOptions := x509.VerifyOptions{
|
||||
Intermediates: chainPool,
|
||||
Roots: caPool,
|
||||
}
|
||||
|
||||
parsedCert, err := x509.ParseCertificate(signCert.Certificate[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing certificate: %s", err)
|
||||
}
|
||||
|
||||
chains, err := parsedCert.Verify(verifyOptions)
|
||||
if err != nil {
|
||||
log.Fatalf("Error verifying certificate: %s", err)
|
||||
}
|
||||
if len(chains) == 0 {
|
||||
log.Fatalf("No verified chains")
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading content from stdin: %s", err)
|
||||
}
|
||||
|
||||
sig, err := libtrust.ParsePrettySignature(content, "signatures")
|
||||
|
||||
buildKeys, err := sig.Verify()
|
||||
if err != nil {
|
||||
log.Fatalf("Error verifying signatures: %s", err)
|
||||
}
|
||||
|
||||
type manifest struct {
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
}
|
||||
var buildManifest manifest
|
||||
payload, err := sig.Payload()
|
||||
if err != nil {
|
||||
log.Fatalf("Error retrieving payload: %s", err)
|
||||
}
|
||||
err = json.Unmarshal(payload, &buildManifest)
|
||||
if err != nil {
|
||||
log.Fatalf("Error unmarshalling build manifest: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Build keys: %#v", buildKeys)
|
||||
// Check keys against list of valid keys
|
||||
var foundKey bool
|
||||
for _, key := range buildKeys {
|
||||
keyID := key.KeyID()
|
||||
log.Printf("Checking key id: %s", keyID)
|
||||
namespaces, ok := validKeyMap[keyID]
|
||||
if ok {
|
||||
for _, namespace := range namespaces {
|
||||
if namespace == "*" || strings.HasPrefix(buildManifest.Name, namespace) {
|
||||
foundKey = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !foundKey {
|
||||
log.Fatalf("No valid key found for build")
|
||||
}
|
||||
|
||||
verifiedSig, err := libtrust.NewJSONSignature(content)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
|
||||
privKey, err := libtrust.FromCryptoPrivateKey(signCert.PrivateKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting priv key: %s", err)
|
||||
}
|
||||
signChain := make([]*x509.Certificate, 1, len(chainCerts)+1)
|
||||
signChain[0] = parsedCert
|
||||
err = verifiedSig.SignWithChain(privKey, append(signChain, chainCerts...))
|
||||
if err != nil {
|
||||
log.Fatalf("Error signing with chain: %s", err)
|
||||
}
|
||||
|
||||
// Output signed content to stdout
|
||||
out, err := verifiedSig.PrettySignature("verifySignatures")
|
||||
if err != nil {
|
||||
log.Fatalf("Error formatting output: %s", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
log.Fatalf("Error writing output: %s", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,599 @@
|
|||
mode: set
|
||||
github.com/docker/libtrust/jsonsign.go:53.40,57.2 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:61.52,63.2 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:65.60,72.16 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:76.2,76.49 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:72.16,74.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:79.76,85.2 5 1
|
||||
github.com/docker/libtrust/jsonsign.go:88.53,90.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:93.2,94.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:97.2,98.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:102.2,114.12 4 1
|
||||
github.com/docker/libtrust/jsonsign.go:90.16,92.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:94.16,96.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:98.16,100.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:120.89,127.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:130.2,131.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:134.2,135.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:139.2,144.29 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:148.2,156.12 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:127.16,129.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:131.16,133.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:135.16,137.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:144.29,146.3 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:161.56,163.42 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:200.2,200.18 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:163.42,165.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:168.3,169.38 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:188.3,189.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:193.3,194.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:198.3,198.22 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:165.17,167.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:169.38,171.18 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:174.4,175.18 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:178.4,179.18 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:171.18,173.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:175.18,177.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:179.18,181.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:182.5,182.41 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:182.41,184.4 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:184.5,186.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:189.17,191.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:194.17,196.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:206.89,208.42 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:266.2,266.20 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:208.42,210.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:213.3,214.38 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:210.17,212.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:214.38,216.18 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:219.4,220.18 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:223.4,224.18 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:227.4,228.39 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:243.4,249.18 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:252.4,255.18 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:259.4,260.18 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:216.18,218.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:220.18,222.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:224.18,226.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:228.39,230.38 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:230.38,232.20 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:235.6,236.20 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:239.6,239.41 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:232.20,234.7 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:236.20,238.7 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:249.18,251.5 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:255.18,257.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:260.18,262.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:271.48,272.29 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:275.2,280.47 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:272.29,274.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:283.28,285.2 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:287.59,288.77 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:294.2,294.8 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:288.77,290.21 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:290.21,292.4 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:310.55,317.16 4 0
|
||||
github.com/docker/libtrust/jsonsign.go:320.2,320.33 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:323.2,324.16 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:328.2,329.16 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:332.2,333.46 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:354.2,354.16 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:317.16,319.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:320.33,322.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:324.16,326.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:329.16,331.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:333.46,337.36 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:340.3,340.34 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:347.3,351.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:337.36,339.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:340.34,342.18 2 0
|
||||
github.com/docker/libtrust/jsonsign.go:345.4,345.26 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:342.18,344.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:359.63,362.16 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:366.2,373.32 5 1
|
||||
github.com/docker/libtrust/jsonsign.go:376.2,377.35 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:380.2,383.16 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:362.16,364.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:373.32,375.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:377.35,379.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:388.75,389.24 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:396.2,400.16 4 1
|
||||
github.com/docker/libtrust/jsonsign.go:403.2,409.16 4 1
|
||||
github.com/docker/libtrust/jsonsign.go:390.2,390.30 0 1
|
||||
github.com/docker/libtrust/jsonsign.go:391.2,391.16 0 0
|
||||
github.com/docker/libtrust/jsonsign.go:392.2,393.46 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:400.16,402.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:412.71,414.9 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:417.2,417.27 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:414.9,416.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:418.2,419.17 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:420.2,421.22 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:422.2,423.18 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:427.82,429.9 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:432.2,433.8 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:429.9,431.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:440.88,443.16 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:446.2,447.9 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:451.2,453.16 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:457.2,460.49 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:511.2,511.36 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:514.2,520.16 6 1
|
||||
github.com/docker/libtrust/jsonsign.go:443.16,445.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:447.9,449.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:453.16,455.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:460.49,462.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:465.3,467.17 3 1
|
||||
github.com/docker/libtrust/jsonsign.go:471.3,472.10 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:475.3,476.10 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:479.3,480.17 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:483.3,483.27 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:488.3,488.30 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:494.3,498.39 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:505.3,509.4 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:462.17,464.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:467.17,469.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:472.10,474.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:476.10,478.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:480.17,482.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:483.27,485.4 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:485.5,485.45 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:485.45,487.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:488.30,490.4 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:490.5,490.59 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:490.59,492.4 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:498.39,500.18 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:503.4,503.26 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:500.18,502.5 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:511.36,513.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:525.79,526.29 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:529.2,530.16 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:533.2,537.21 4 1
|
||||
github.com/docker/libtrust/jsonsign.go:542.2,542.24 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:546.2,549.21 4 1
|
||||
github.com/docker/libtrust/jsonsign.go:563.2,565.25 2 1
|
||||
github.com/docker/libtrust/jsonsign.go:526.29,528.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:530.16,532.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:537.21,539.3 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:539.4,541.3 1 1
|
||||
github.com/docker/libtrust/jsonsign.go:542.24,544.3 1 0
|
||||
github.com/docker/libtrust/jsonsign.go:549.21,557.3 7 1
|
||||
github.com/docker/libtrust/jsonsign.go:557.4,562.3 4 1
|
||||
github.com/docker/libtrust/key.go:71.79,72.51 1 1
|
||||
github.com/docker/libtrust/key.go:73.2,74.42 1 1
|
||||
github.com/docker/libtrust/key.go:75.2,76.48 1 1
|
||||
github.com/docker/libtrust/key.go:77.2,78.81 1 0
|
||||
github.com/docker/libtrust/key.go:85.83,86.53 1 1
|
||||
github.com/docker/libtrust/key.go:87.2,88.44 1 1
|
||||
github.com/docker/libtrust/key.go:89.2,90.50 1 1
|
||||
github.com/docker/libtrust/key.go:91.2,92.83 1 0
|
||||
github.com/docker/libtrust/key.go:98.60,100.21 2 1
|
||||
github.com/docker/libtrust/key.go:106.2,106.37 1 1
|
||||
github.com/docker/libtrust/key.go:100.21,102.3 1 0
|
||||
github.com/docker/libtrust/key.go:102.4,102.42 1 1
|
||||
github.com/docker/libtrust/key.go:102.42,104.3 1 0
|
||||
github.com/docker/libtrust/key.go:112.68,115.6 2 1
|
||||
github.com/docker/libtrust/key.go:132.2,132.21 1 1
|
||||
github.com/docker/libtrust/key.go:115.6,118.22 3 1
|
||||
github.com/docker/libtrust/key.go:124.3,125.17 2 1
|
||||
github.com/docker/libtrust/key.go:129.3,129.36 1 1
|
||||
github.com/docker/libtrust/key.go:118.22,119.9 1 1
|
||||
github.com/docker/libtrust/key.go:120.5,120.43 1 1
|
||||
github.com/docker/libtrust/key.go:120.43,122.4 1 0
|
||||
github.com/docker/libtrust/key.go:125.17,127.4 1 0
|
||||
github.com/docker/libtrust/key.go:137.62,139.21 2 1
|
||||
github.com/docker/libtrust/key.go:143.2,145.9 2 1
|
||||
github.com/docker/libtrust/key.go:165.2,167.17 2 1
|
||||
github.com/docker/libtrust/key.go:139.21,141.3 1 0
|
||||
github.com/docker/libtrust/key.go:146.2,148.17 2 1
|
||||
github.com/docker/libtrust/key.go:151.3,151.41 1 1
|
||||
github.com/docker/libtrust/key.go:152.2,154.17 2 1
|
||||
github.com/docker/libtrust/key.go:157.3,158.17 2 1
|
||||
github.com/docker/libtrust/key.go:161.2,162.86 1 0
|
||||
github.com/docker/libtrust/key.go:148.17,150.4 1 0
|
||||
github.com/docker/libtrust/key.go:154.17,156.4 1 0
|
||||
github.com/docker/libtrust/key.go:158.17,160.4 1 0
|
||||
github.com/docker/libtrust/key.go:172.60,176.16 3 1
|
||||
github.com/docker/libtrust/key.go:183.2,184.16 2 1
|
||||
github.com/docker/libtrust/key.go:188.2,188.9 1 1
|
||||
github.com/docker/libtrust/key.go:176.16,180.3 1 0
|
||||
github.com/docker/libtrust/key.go:184.16,186.3 1 0
|
||||
github.com/docker/libtrust/key.go:189.2,191.33 1 1
|
||||
github.com/docker/libtrust/key.go:192.2,194.34 1 1
|
||||
github.com/docker/libtrust/key.go:195.2,198.4 1 0
|
||||
github.com/docker/libtrust/key.go:204.65,206.16 2 1
|
||||
github.com/docker/libtrust/key.go:210.2,212.33 2 1
|
||||
github.com/docker/libtrust/key.go:220.2,220.21 1 1
|
||||
github.com/docker/libtrust/key.go:206.16,208.3 1 0
|
||||
github.com/docker/libtrust/key.go:212.33,214.17 2 1
|
||||
github.com/docker/libtrust/key.go:217.3,217.36 1 1
|
||||
github.com/docker/libtrust/key.go:214.17,216.4 1 0
|
||||
github.com/docker/libtrust/key.go:225.62,229.16 3 1
|
||||
github.com/docker/libtrust/key.go:236.2,237.16 2 1
|
||||
github.com/docker/libtrust/key.go:241.2,241.9 1 1
|
||||
github.com/docker/libtrust/key.go:229.16,233.3 1 0
|
||||
github.com/docker/libtrust/key.go:237.16,239.3 1 0
|
||||
github.com/docker/libtrust/key.go:242.2,244.34 1 1
|
||||
github.com/docker/libtrust/key.go:245.2,247.35 1 1
|
||||
github.com/docker/libtrust/key.go:248.2,251.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:18.56,20.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:30.2,30.18 1 1
|
||||
github.com/docker/libtrust/key_files.go:20.16,21.25 1 1
|
||||
github.com/docker/libtrust/key_files.go:27.3,27.18 1 1
|
||||
github.com/docker/libtrust/key_files.go:21.25,23.4 1 1
|
||||
github.com/docker/libtrust/key_files.go:23.5,25.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:39.55,41.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:45.2,47.81 2 1
|
||||
github.com/docker/libtrust/key_files.go:59.2,59.17 1 1
|
||||
github.com/docker/libtrust/key_files.go:41.16,43.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:47.81,49.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:49.17,51.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:52.4,54.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:54.17,56.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:64.60,66.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:70.2,72.81 2 1
|
||||
github.com/docker/libtrust/key_files.go:84.2,84.17 1 1
|
||||
github.com/docker/libtrust/key_files.go:66.16,68.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:72.81,74.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:74.17,76.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:77.4,79.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:79.17,81.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:89.53,93.81 3 1
|
||||
github.com/docker/libtrust/key_files.go:108.2,109.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:113.2,113.12 1 1
|
||||
github.com/docker/libtrust/key_files.go:93.81,96.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:96.17,98.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:99.4,102.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:105.3,105.44 1 1
|
||||
github.com/docker/libtrust/key_files.go:102.17,104.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:109.16,111.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:117.58,121.81 3 1
|
||||
github.com/docker/libtrust/key_files.go:136.2,137.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:141.2,141.12 1 1
|
||||
github.com/docker/libtrust/key_files.go:121.81,124.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:124.17,126.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:127.4,130.17 2 1
|
||||
github.com/docker/libtrust/key_files.go:133.3,133.44 1 1
|
||||
github.com/docker/libtrust/key_files.go:130.17,132.4 1 0
|
||||
github.com/docker/libtrust/key_files.go:137.16,139.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:151.59,152.81 1 1
|
||||
github.com/docker/libtrust/key_files.go:157.2,157.36 1 1
|
||||
github.com/docker/libtrust/key_files.go:152.81,154.3 1 1
|
||||
github.com/docker/libtrust/key_files.go:160.64,161.20 1 1
|
||||
github.com/docker/libtrust/key_files.go:166.2,169.16 3 1
|
||||
github.com/docker/libtrust/key_files.go:173.2,173.25 1 1
|
||||
github.com/docker/libtrust/key_files.go:161.20,164.3 1 1
|
||||
github.com/docker/libtrust/key_files.go:169.16,171.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:176.63,178.49 2 1
|
||||
github.com/docker/libtrust/key_files.go:182.2,182.43 1 1
|
||||
github.com/docker/libtrust/key_files.go:178.49,180.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:185.62,187.49 2 1
|
||||
github.com/docker/libtrust/key_files.go:191.2,191.42 1 1
|
||||
github.com/docker/libtrust/key_files.go:187.49,189.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:195.58,196.81 1 1
|
||||
github.com/docker/libtrust/key_files.go:201.2,201.40 1 1
|
||||
github.com/docker/libtrust/key_files.go:196.81,198.3 1 1
|
||||
github.com/docker/libtrust/key_files.go:204.62,206.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:210.2,211.49 2 1
|
||||
github.com/docker/libtrust/key_files.go:215.2,216.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:220.2,224.16 4 1
|
||||
github.com/docker/libtrust/key_files.go:228.2,229.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:233.2,233.12 1 1
|
||||
github.com/docker/libtrust/key_files.go:206.16,208.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:211.49,213.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:216.16,218.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:224.16,226.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:229.16,231.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:236.61,239.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:242.2,245.16 3 1
|
||||
github.com/docker/libtrust/key_files.go:249.2,250.16 2 1
|
||||
github.com/docker/libtrust/key_files.go:254.2,254.12 1 1
|
||||
github.com/docker/libtrust/key_files.go:239.16,241.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:245.16,247.3 1 0
|
||||
github.com/docker/libtrust/key_files.go:250.16,252.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:26.69,28.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:31.41,33.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:36.39,47.2 5 1
|
||||
github.com/docker/libtrust/rsa_key.go:49.40,51.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:57.83,60.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:64.2,66.16 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:69.2,72.16 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:76.2,76.12 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:60.16,62.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:66.16,68.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:72.16,74.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:82.59,84.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:86.55,88.31 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:91.2,96.12 5 1
|
||||
github.com/docker/libtrust/rsa_key.go:88.31,90.3 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:101.63,103.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:106.55,108.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:111.2,112.59 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:108.16,110.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:115.74,117.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:119.67,121.9 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:124.2,124.10 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:121.9,123.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:127.77,134.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:138.2,139.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:144.2,145.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:149.2,150.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:154.2,160.8 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:170.2,170.27 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:174.2,176.17 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:134.16,136.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:139.16,141.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:145.16,147.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:150.16,152.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:160.8,162.17 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:165.3,165.25 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:162.17,164.4 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:165.25,167.4 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:170.27,172.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:189.73,194.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:197.47,199.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:201.41,203.2 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:211.108,217.16 4 1
|
||||
github.com/docker/libtrust/rsa_key.go:220.2,223.16 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:227.2,229.8 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:217.16,219.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:223.16,225.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:235.62,237.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:239.56,252.26 10 1
|
||||
github.com/docker/libtrust/rsa_key.go:265.2,265.12 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:252.26,254.33 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:262.3,262.31 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:254.33,261.4 6 0
|
||||
github.com/docker/libtrust/rsa_key.go:270.64,272.2 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:275.56,279.2 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:281.79,289.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:292.2,293.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:296.2,297.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:300.2,301.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:304.2,305.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:308.2,309.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:313.2,314.29 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:322.2,323.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:327.2,338.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:383.2,388.17 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:289.16,291.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:293.16,295.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:297.16,299.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:301.16,303.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:305.16,307.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:309.16,311.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:314.29,317.3 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:323.16,325.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:338.16,341.10 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:344.3,345.32 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:348.3,352.39 4 0
|
||||
github.com/docker/libtrust/rsa_key.go:379.3,380.47 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:341.10,343.4 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:345.32,347.4 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:352.39,354.11 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:358.4,359.18 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:362.4,363.18 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:366.4,367.18 2 0
|
||||
github.com/docker/libtrust/rsa_key.go:371.4,376.73 6 0
|
||||
github.com/docker/libtrust/rsa_key.go:354.11,356.5 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:359.18,361.5 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:363.18,365.5 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:367.18,369.5 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:395.68,398.16 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:402.2,405.8 3 1
|
||||
github.com/docker/libtrust/rsa_key.go:398.16,400.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:409.54,411.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:415.2,415.15 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:411.16,413.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:419.54,421.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:425.2,425.15 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:421.16,423.3 1 0
|
||||
github.com/docker/libtrust/rsa_key.go:429.54,431.16 2 1
|
||||
github.com/docker/libtrust/rsa_key.go:435.2,435.15 1 1
|
||||
github.com/docker/libtrust/rsa_key.go:431.16,433.3 1 0
|
||||
github.com/docker/libtrust/util.go:21.43,23.2 1 1
|
||||
github.com/docker/libtrust/util.go:29.52,30.20 1 1
|
||||
github.com/docker/libtrust/util.go:39.2,39.43 1 1
|
||||
github.com/docker/libtrust/util.go:31.2,31.9 0 1
|
||||
github.com/docker/libtrust/util.go:32.2,33.12 1 1
|
||||
github.com/docker/libtrust/util.go:34.2,35.11 1 1
|
||||
github.com/docker/libtrust/util.go:36.2,37.53 1 0
|
||||
github.com/docker/libtrust/util.go:42.35,46.33 4 1
|
||||
github.com/docker/libtrust/util.go:51.2,52.21 2 1
|
||||
github.com/docker/libtrust/util.go:46.33,50.3 3 1
|
||||
github.com/docker/libtrust/util.go:55.74,57.9 2 1
|
||||
github.com/docker/libtrust/util.go:61.2,62.9 2 1
|
||||
github.com/docker/libtrust/util.go:65.2,67.17 2 1
|
||||
github.com/docker/libtrust/util.go:57.9,59.3 1 0
|
||||
github.com/docker/libtrust/util.go:62.9,64.3 1 0
|
||||
github.com/docker/libtrust/util.go:70.80,74.16 3 1
|
||||
github.com/docker/libtrust/util.go:77.2,78.33 2 1
|
||||
github.com/docker/libtrust/util.go:81.2,81.43 1 1
|
||||
github.com/docker/libtrust/util.go:74.16,76.3 1 0
|
||||
github.com/docker/libtrust/util.go:78.33,80.3 1 0
|
||||
github.com/docker/libtrust/util.go:84.82,86.16 2 1
|
||||
github.com/docker/libtrust/util.go:98.2,102.32 4 1
|
||||
github.com/docker/libtrust/util.go:106.2,106.43 1 1
|
||||
github.com/docker/libtrust/util.go:86.16,88.3 1 0
|
||||
github.com/docker/libtrust/util.go:102.32,104.3 1 0
|
||||
github.com/docker/libtrust/util.go:109.61,111.16 2 1
|
||||
github.com/docker/libtrust/util.go:115.2,115.43 1 1
|
||||
github.com/docker/libtrust/util.go:111.16,113.3 1 0
|
||||
github.com/docker/libtrust/util.go:118.52,126.24 4 1
|
||||
github.com/docker/libtrust/util.go:131.2,131.16 1 1
|
||||
github.com/docker/libtrust/util.go:126.24,127.18 1 1
|
||||
github.com/docker/libtrust/util.go:127.18,128.9 1 1
|
||||
github.com/docker/libtrust/util.go:134.63,136.16 2 1
|
||||
github.com/docker/libtrust/util.go:142.2,146.50 4 1
|
||||
github.com/docker/libtrust/util.go:136.16,138.3 1 0
|
||||
github.com/docker/libtrust/util.go:149.93,151.16 2 1
|
||||
github.com/docker/libtrust/util.go:155.2,156.16 2 1
|
||||
github.com/docker/libtrust/util.go:160.2,160.47 1 1
|
||||
github.com/docker/libtrust/util.go:151.16,153.3 1 0
|
||||
github.com/docker/libtrust/util.go:156.16,158.3 1 0
|
||||
github.com/docker/libtrust/util.go:163.103,165.28 2 1
|
||||
github.com/docker/libtrust/util.go:180.2,180.22 1 1
|
||||
github.com/docker/libtrust/util.go:165.28,166.26 1 1
|
||||
github.com/docker/libtrust/util.go:167.3,168.29 1 1
|
||||
github.com/docker/libtrust/util.go:169.3,170.20 1 1
|
||||
github.com/docker/libtrust/util.go:175.3,175.11 0 0
|
||||
github.com/docker/libtrust/util.go:170.20,172.5 1 1
|
||||
github.com/docker/libtrust/util.go:172.6,174.5 0 0
|
||||
github.com/docker/libtrust/util.go:183.65,185.16 2 1
|
||||
github.com/docker/libtrust/util.go:189.2,190.16 2 1
|
||||
github.com/docker/libtrust/util.go:194.2,196.20 2 1
|
||||
github.com/docker/libtrust/util.go:185.16,187.3 1 0
|
||||
github.com/docker/libtrust/util.go:190.16,192.3 1 0
|
||||
github.com/docker/libtrust/util.go:199.64,200.43 1 1
|
||||
github.com/docker/libtrust/util.go:200.43,202.21 2 1
|
||||
github.com/docker/libtrust/util.go:207.3,207.40 1 1
|
||||
github.com/docker/libtrust/util.go:202.21,204.4 1 1
|
||||
github.com/docker/libtrust/util.go:204.5,206.4 1 1
|
||||
github.com/docker/libtrust/certificates.go:24.69,33.15 2 1
|
||||
github.com/docker/libtrust/certificates.go:37.2,37.21 1 1
|
||||
github.com/docker/libtrust/certificates.go:41.2,41.21 1 1
|
||||
github.com/docker/libtrust/certificates.go:45.2,58.3 1 1
|
||||
github.com/docker/libtrust/certificates.go:33.15,35.3 1 1
|
||||
github.com/docker/libtrust/certificates.go:37.21,39.3 1 1
|
||||
github.com/docker/libtrust/certificates.go:41.21,43.3 1 1
|
||||
github.com/docker/libtrust/certificates.go:61.123,69.16 4 1
|
||||
github.com/docker/libtrust/certificates.go:73.2,74.16 2 1
|
||||
github.com/docker/libtrust/certificates.go:78.2,78.8 1 1
|
||||
github.com/docker/libtrust/certificates.go:69.16,71.3 1 0
|
||||
github.com/docker/libtrust/certificates.go:74.16,76.3 1 0
|
||||
github.com/docker/libtrust/certificates.go:84.118,93.2 2 1
|
||||
github.com/docker/libtrust/certificates.go:97.78,104.2 2 1
|
||||
github.com/docker/libtrust/certificates.go:108.89,118.2 3 1
|
||||
github.com/docker/libtrust/certificates.go:123.93,126.41 2 1
|
||||
github.com/docker/libtrust/certificates.go:135.2,135.22 1 1
|
||||
github.com/docker/libtrust/certificates.go:126.41,128.17 2 1
|
||||
github.com/docker/libtrust/certificates.go:132.3,132.25 1 1
|
||||
github.com/docker/libtrust/certificates.go:128.17,130.4 1 0
|
||||
github.com/docker/libtrust/certificates.go:140.74,142.16 2 1
|
||||
github.com/docker/libtrust/certificates.go:145.2,148.47 4 1
|
||||
github.com/docker/libtrust/certificates.go:160.2,160.26 1 1
|
||||
github.com/docker/libtrust/certificates.go:142.16,144.3 1 0
|
||||
github.com/docker/libtrust/certificates.go:148.47,149.34 1 1
|
||||
github.com/docker/libtrust/certificates.go:149.34,151.18 2 1
|
||||
github.com/docker/libtrust/certificates.go:154.4,154.45 1 1
|
||||
github.com/docker/libtrust/certificates.go:151.18,153.5 1 0
|
||||
github.com/docker/libtrust/certificates.go:155.5,157.4 1 0
|
||||
github.com/docker/libtrust/certificates.go:165.67,167.16 2 1
|
||||
github.com/docker/libtrust/certificates.go:170.2,171.29 2 1
|
||||
github.com/docker/libtrust/certificates.go:174.2,174.18 1 1
|
||||
github.com/docker/libtrust/certificates.go:167.16,169.3 1 0
|
||||
github.com/docker/libtrust/certificates.go:171.29,173.3 1 1
|
||||
github.com/docker/libtrust/ec_key.go:30.78,33.9 2 1
|
||||
github.com/docker/libtrust/ec_key.go:34.2,35.86 1 1
|
||||
github.com/docker/libtrust/ec_key.go:36.2,37.86 1 1
|
||||
github.com/docker/libtrust/ec_key.go:38.2,39.86 1 1
|
||||
github.com/docker/libtrust/ec_key.go:40.2,41.55 1 0
|
||||
github.com/docker/libtrust/ec_key.go:46.40,48.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:52.42,54.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:57.38,68.2 5 1
|
||||
github.com/docker/libtrust/ec_key.go:70.39,72.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:78.82,81.47 1 1
|
||||
github.com/docker/libtrust/ec_key.go:86.2,88.38 3 1
|
||||
github.com/docker/libtrust/ec_key.go:92.2,98.16 6 1
|
||||
github.com/docker/libtrust/ec_key.go:101.2,103.44 2 1
|
||||
github.com/docker/libtrust/ec_key.go:107.2,107.12 1 1
|
||||
github.com/docker/libtrust/ec_key.go:81.47,83.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:88.38,90.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:98.16,100.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:103.44,105.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:113.58,115.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:117.54,119.31 2 1
|
||||
github.com/docker/libtrust/ec_key.go:122.2,139.12 13 1
|
||||
github.com/docker/libtrust/ec_key.go:119.31,121.3 1 1
|
||||
github.com/docker/libtrust/ec_key.go:144.62,146.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:149.54,151.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:154.2,155.59 2 1
|
||||
github.com/docker/libtrust/ec_key.go:151.16,153.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:158.73,160.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:162.66,164.9 2 1
|
||||
github.com/docker/libtrust/ec_key.go:167.2,167.10 1 1
|
||||
github.com/docker/libtrust/ec_key.go:164.9,166.3 1 1
|
||||
github.com/docker/libtrust/ec_key.go:170.75,177.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:181.2,186.9 2 1
|
||||
github.com/docker/libtrust/ec_key.go:201.2,202.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:205.2,206.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:210.2,211.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:214.2,215.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:219.2,226.8 3 1
|
||||
github.com/docker/libtrust/ec_key.go:236.2,238.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:177.16,179.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:187.2,189.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:190.2,192.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:193.2,195.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:196.2,197.88 1 0
|
||||
github.com/docker/libtrust/ec_key.go:202.16,204.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:206.16,208.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:211.16,213.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:215.16,217.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:226.8,228.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:231.3,231.25 1 1
|
||||
github.com/docker/libtrust/ec_key.go:228.17,230.4 1 0
|
||||
github.com/docker/libtrust/ec_key.go:231.25,233.4 1 0
|
||||
github.com/docker/libtrust/ec_key.go:252.82,254.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:258.2,258.57 1 1
|
||||
github.com/docker/libtrust/ec_key.go:254.16,256.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:262.46,264.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:266.40,268.2 1 0
|
||||
github.com/docker/libtrust/ec_key.go:276.107,283.16 3 1
|
||||
github.com/docker/libtrust/ec_key.go:286.2,289.16 3 1
|
||||
github.com/docker/libtrust/ec_key.go:292.2,304.8 9 1
|
||||
github.com/docker/libtrust/ec_key.go:283.16,285.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:289.16,291.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:310.61,312.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:314.55,335.2 8 1
|
||||
github.com/docker/libtrust/ec_key.go:339.63,341.2 1 1
|
||||
github.com/docker/libtrust/ec_key.go:344.55,346.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:349.2,350.63 2 1
|
||||
github.com/docker/libtrust/ec_key.go:346.16,348.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:353.77,355.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:362.2,363.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:367.2,368.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:372.2,380.17 2 1
|
||||
github.com/docker/libtrust/ec_key.go:355.16,357.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:363.16,365.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:368.16,370.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:387.78,390.16 3 1
|
||||
github.com/docker/libtrust/ec_key.go:394.2,397.8 3 1
|
||||
github.com/docker/libtrust/ec_key.go:390.16,392.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:401.53,403.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:407.2,410.15 3 1
|
||||
github.com/docker/libtrust/ec_key.go:403.16,405.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:414.53,416.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:420.2,423.15 3 1
|
||||
github.com/docker/libtrust/ec_key.go:416.16,418.3 1 0
|
||||
github.com/docker/libtrust/ec_key.go:427.53,429.16 2 1
|
||||
github.com/docker/libtrust/ec_key.go:433.2,436.15 3 1
|
||||
github.com/docker/libtrust/ec_key.go:429.16,431.3 1 0
|
||||
github.com/docker/libtrust/filter.go:10.91,13.30 2 1
|
||||
github.com/docker/libtrust/filter.go:49.2,49.22 1 1
|
||||
github.com/docker/libtrust/filter.go:13.30,15.55 2 1
|
||||
github.com/docker/libtrust/filter.go:28.3,28.22 1 1
|
||||
github.com/docker/libtrust/filter.go:36.3,36.37 1 1
|
||||
github.com/docker/libtrust/filter.go:16.3,17.13 1 1
|
||||
github.com/docker/libtrust/filter.go:18.3,19.28 1 1
|
||||
github.com/docker/libtrust/filter.go:19.28,21.12 2 1
|
||||
github.com/docker/libtrust/filter.go:24.5,24.29 1 1
|
||||
github.com/docker/libtrust/filter.go:21.12,22.14 1 0
|
||||
github.com/docker/libtrust/filter.go:28.22,29.20 1 1
|
||||
github.com/docker/libtrust/filter.go:32.4,32.12 1 1
|
||||
github.com/docker/libtrust/filter.go:29.20,31.5 1 1
|
||||
github.com/docker/libtrust/filter.go:36.37,38.18 2 1
|
||||
github.com/docker/libtrust/filter.go:42.4,42.13 1 1
|
||||
github.com/docker/libtrust/filter.go:38.18,40.5 1 0
|
||||
github.com/docker/libtrust/filter.go:42.13,44.13 2 1
|
||||
github.com/docker/libtrust/hash.go:15.51,17.2 1 1
|
||||
github.com/docker/libtrust/hash.go:19.51,21.2 1 1
|
||||
github.com/docker/libtrust/hash.go:32.75,33.9 1 1
|
||||
github.com/docker/libtrust/hash.go:34.2,35.20 1 1
|
||||
github.com/docker/libtrust/hash.go:36.2,37.20 1 1
|
||||
github.com/docker/libtrust/hash.go:38.2,39.20 1 1
|
||||
github.com/docker/libtrust/hash.go:40.2,41.82 1 0
|
||||
github.com/docker/libtrust/hash.go:45.85,46.9 1 1
|
||||
github.com/docker/libtrust/hash.go:47.2,48.15 1 1
|
||||
github.com/docker/libtrust/hash.go:49.2,50.15 1 1
|
||||
github.com/docker/libtrust/hash.go:51.2,52.14 1 1
|
||||
github.com/docker/libtrust/hash.go:53.2,54.15 1 1
|
|
@ -0,0 +1,14 @@
|
|||
# The `trust` client binary
|
||||
|
||||
The `trust` binary is a tool to manage key pair, signatures, and ACLs
|
||||
in the global trust graph.
|
||||
|
||||
## Command Reference
|
||||
|
||||
### `grant` command
|
||||
|
||||
### `grants` command
|
||||
|
||||
### `register` command
|
||||
|
||||
Registers a public key with a key server and links it with a user namespace
|
568
Godeps/_workspace/src/github.com/docker/libtrust/jsonsign/jsonsign.go
generated
vendored
Normal file
568
Godeps/_workspace/src/github.com/docker/libtrust/jsonsign/jsonsign.go
generated
vendored
Normal file
|
@ -0,0 +1,568 @@
|
|||
package jsonsign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidSignContent is used when the content to be signed is invalid.
|
||||
ErrInvalidSignContent = errors.New("invalid sign content")
|
||||
|
||||
// ErrInvalidJSONContent is used when invalid json is encountered.
|
||||
ErrInvalidJSONContent = errors.New("invalid json content")
|
||||
|
||||
// ErrMissingSignatureKey is used when the specified signature key
|
||||
// does not exist in the JSON content.
|
||||
ErrMissingSignatureKey = errors.New("missing signature key")
|
||||
)
|
||||
|
||||
type jsHeader struct {
|
||||
JWK libtrust.PublicKey `json:"jwk,omitempty"`
|
||||
Algorithm string `json:"alg"`
|
||||
Chain []string `json:"x5c,omitempty"`
|
||||
}
|
||||
|
||||
type jsSignature struct {
|
||||
Header *jsHeader `json:"header"`
|
||||
Signature string `json:"signature"`
|
||||
Protected string `json:"protected,omitempty"`
|
||||
}
|
||||
|
||||
type signKey struct {
|
||||
libtrust.PrivateKey
|
||||
Chain []*x509.Certificate
|
||||
}
|
||||
|
||||
// JSONSignature represents a signature of a json object.
|
||||
type JSONSignature struct {
|
||||
payload string
|
||||
signatures []*jsSignature
|
||||
indent string
|
||||
formatLength int
|
||||
formatTail []byte
|
||||
}
|
||||
|
||||
func newJSONSignature() *JSONSignature {
|
||||
return &JSONSignature{
|
||||
signatures: make([]*jsSignature, 0, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Payload returns the encoded payload of the signature. This
|
||||
// payload should not be signed directly
|
||||
func (js *JSONSignature) Payload() ([]byte, error) {
|
||||
return joseBase64UrlDecode(js.payload)
|
||||
}
|
||||
|
||||
func (js *JSONSignature) protectedHeader() (string, error) {
|
||||
protected := map[string]interface{}{
|
||||
"formatLength": js.formatLength,
|
||||
"formatTail": joseBase64UrlEncode(js.formatTail),
|
||||
"time": time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
protectedBytes, err := json.Marshal(protected)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return joseBase64UrlEncode(protectedBytes), nil
|
||||
}
|
||||
|
||||
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
|
||||
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
|
||||
copy(buf, protectedHeader)
|
||||
buf[len(protectedHeader)] = '.'
|
||||
copy(buf[len(protectedHeader)+1:], js.payload)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Sign adds a signature using the given private key.
|
||||
func (js *JSONSignature) Sign(key PrivateKey) error {
|
||||
protected, err := js.protectedHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signBytes, err := js.signBytes(protected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := &jsHeader{
|
||||
JWK: key.PublicKey(),
|
||||
Algorithm: algorithm,
|
||||
}
|
||||
sig := &jsSignature{
|
||||
Header: header,
|
||||
Signature: joseBase64UrlEncode(sigBytes),
|
||||
Protected: protected,
|
||||
}
|
||||
|
||||
js.signatures = append(js.signatures, sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignWithChain adds a signature using the given private key
|
||||
// and setting the x509 chain. The public key of the first element
|
||||
// in the chain must be the public key corresponding with the sign key.
|
||||
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
|
||||
// Ensure key.Chain[0] is public key for key
|
||||
//key.Chain.PublicKey
|
||||
//key.PublicKey().CryptoPublicKey()
|
||||
|
||||
// Verify chain
|
||||
protected, err := js.protectedHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signBytes, err := js.signBytes(protected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := &jsHeader{
|
||||
Chain: make([]string, len(chain)),
|
||||
Algorithm: algorithm,
|
||||
}
|
||||
|
||||
for i, cert := range chain {
|
||||
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
|
||||
}
|
||||
|
||||
sig := &jsSignature{
|
||||
Header: header,
|
||||
Signature: joseBase64UrlEncode(sigBytes),
|
||||
Protected: protected,
|
||||
}
|
||||
|
||||
js.signatures = append(js.signatures, sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies all the signatures and returns the list of
|
||||
// public keys used to sign. Any x509 chains are not checked.
|
||||
func (js *JSONSignature) Verify() ([]libtrust.PublicKey, error) {
|
||||
keys := make([]libtrust.PublicKey, len(js.signatures))
|
||||
for i, signature := range js.signatures {
|
||||
signBytes, err := js.signBytes(signature.Protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var publicKey libtrust.PublicKey
|
||||
if len(signature.Header.Chain) > 0 {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if signature.Header.JWK != nil {
|
||||
publicKey = signature.Header.JWK
|
||||
} else {
|
||||
return nil, errors.New("missing public key")
|
||||
}
|
||||
|
||||
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys[i] = publicKey
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// VerifyChains verifies all the signatures and the chains associated
|
||||
// with each signature and returns the list of verified chains.
|
||||
// Signatures without an x509 chain are not checked.
|
||||
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||
chains := make([][]*x509.Certificate, 0, len(js.signatures))
|
||||
for _, signature := range js.signatures {
|
||||
signBytes, err := js.signBytes(signature.Protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var publicKey libtrust.PublicKey
|
||||
if len(signature.Header.Chain) > 0 {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediates := x509.NewCertPool()
|
||||
if len(signature.Header.Chain) > 1 {
|
||||
intermediateChain := signature.Header.Chain[1:]
|
||||
for i := range intermediateChain {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediate, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediates.AddCert(intermediate)
|
||||
}
|
||||
}
|
||||
|
||||
verifyOptions := x509.VerifyOptions{
|
||||
Intermediates: intermediates,
|
||||
Roots: ca,
|
||||
}
|
||||
|
||||
verifiedChains, err := cert.Verify(verifyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chains = append(chains, verifiedChains...)
|
||||
|
||||
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
// JWS returns JSON serialized JWS according to
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
|
||||
func (js *JSONSignature) JWS() ([]byte, error) {
|
||||
if len(js.signatures) == 0 {
|
||||
return nil, errors.New("missing signature")
|
||||
}
|
||||
jsonMap := map[string]interface{}{
|
||||
"payload": js.payload,
|
||||
"signatures": js.signatures,
|
||||
}
|
||||
|
||||
return json.MarshalIndent(jsonMap, "", " ")
|
||||
}
|
||||
|
||||
func notSpace(r rune) bool {
|
||||
return !unicode.IsSpace(r)
|
||||
}
|
||||
|
||||
func detectJSONIndent(jsonContent []byte) (indent string) {
|
||||
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
|
||||
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
|
||||
if quoteIndex > 0 {
|
||||
indent = string(jsonContent[2 : quoteIndex+1])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type jsParsedHeader struct {
|
||||
JWK json.RawMessage `json:"jwk"`
|
||||
Algorithm string `json:"alg"`
|
||||
Chain []string `json:"x5c"`
|
||||
}
|
||||
|
||||
type jsParsedSignature struct {
|
||||
Header *jsParsedHeader `json:"header"`
|
||||
Signature string `json:"signature"`
|
||||
Protected string `json:"protected"`
|
||||
}
|
||||
|
||||
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
|
||||
func ParseJWS(content []byte) (*JSONSignature, error) {
|
||||
type jsParsed struct {
|
||||
Payload string `json:"payload"`
|
||||
Signatures []*jsParsedSignature `json:"signatures"`
|
||||
}
|
||||
parsed := &jsParsed{}
|
||||
err := json.Unmarshal(content, parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(parsed.Signatures) == 0 {
|
||||
return nil, errors.New("missing signatures")
|
||||
}
|
||||
payload, err := joseBase64UrlDecode(parsed.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js, err := NewJSONSignature(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.signatures = make([]*jsSignature, len(parsed.Signatures))
|
||||
for i, signature := range parsed.Signatures {
|
||||
header := &jsHeader{
|
||||
Algorithm: signature.Header.Algorithm,
|
||||
}
|
||||
if signature.Header.Chain != nil {
|
||||
header.Chain = signature.Header.Chain
|
||||
}
|
||||
if signature.Header.JWK != nil {
|
||||
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header.JWK = publicKey
|
||||
}
|
||||
js.signatures[i] = &jsSignature{
|
||||
Header: header,
|
||||
Signature: signature.Signature,
|
||||
Protected: signature.Protected,
|
||||
}
|
||||
}
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// NewJSONSignature returns a new unsigned JWS from a json byte array.
|
||||
// JSONSignature will need to be signed before serializing or storing.
|
||||
func NewJSONSignature(content []byte) (*JSONSignature, error) {
|
||||
var dataMap map[string]interface{}
|
||||
err := json.Unmarshal(content, &dataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.indent = detectJSONIndent(content)
|
||||
|
||||
js.payload = joseBase64UrlEncode(content)
|
||||
|
||||
// Find trailing } and whitespace, put in protected header
|
||||
closeIndex := bytes.LastIndexFunc(content, notSpace)
|
||||
if content[closeIndex] != '}' {
|
||||
return nil, ErrInvalidJSONContent
|
||||
}
|
||||
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
|
||||
if content[lastRuneIndex] == ',' {
|
||||
return nil, ErrInvalidJSONContent
|
||||
}
|
||||
js.formatLength = lastRuneIndex + 1
|
||||
js.formatTail = content[js.formatLength:]
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
|
||||
// struct. JWS will need to be signed before serializing or storing.
|
||||
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
|
||||
switch content.(type) {
|
||||
case map[string]interface{}:
|
||||
case struct{}:
|
||||
default:
|
||||
return nil, errors.New("invalid data type")
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.indent = " "
|
||||
|
||||
payload, err := json.MarshalIndent(content, "", js.indent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.payload = joseBase64UrlEncode(payload)
|
||||
|
||||
// Remove '\n}' from formatted section, put in protected header
|
||||
js.formatLength = len(payload) - 2
|
||||
js.formatTail = payload[js.formatLength:]
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
|
||||
value, ok := m[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case int:
|
||||
return v, true
|
||||
case float64:
|
||||
return int(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
|
||||
value, ok := m[key]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
v, ok = value.(string)
|
||||
return
|
||||
}
|
||||
|
||||
// ParsePrettySignature parses a formatted signature into a
|
||||
// JSON signature. If the signatures are missing the format information
|
||||
// an error is thrown. The formatted signature must be created by
|
||||
// the same method as format signature.
|
||||
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
|
||||
var contentMap map[string]json.RawMessage
|
||||
err := json.Unmarshal(content, &contentMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling content: %s", err)
|
||||
}
|
||||
sigMessage, ok := contentMap[signatureKey]
|
||||
if !ok {
|
||||
return nil, ErrMissingSignatureKey
|
||||
}
|
||||
|
||||
var signatureBlocks []jsParsedSignature
|
||||
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.signatures = make([]*jsSignature, len(signatureBlocks))
|
||||
|
||||
for i, signatureBlock := range signatureBlocks {
|
||||
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("base64 decode error: %s", err)
|
||||
}
|
||||
var protectedHeader map[string]interface{}
|
||||
err = json.Unmarshal(protectedBytes, &protectedHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
|
||||
}
|
||||
|
||||
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
|
||||
if !ok {
|
||||
return nil, errors.New("missing formatted length")
|
||||
}
|
||||
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
|
||||
if !ok {
|
||||
return nil, errors.New("missing formatted tail")
|
||||
}
|
||||
formatTail, err := joseBase64UrlDecode(encodedTail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
|
||||
}
|
||||
if js.formatLength == 0 {
|
||||
js.formatLength = formatLength
|
||||
} else if js.formatLength != formatLength {
|
||||
return nil, errors.New("conflicting format length")
|
||||
}
|
||||
if len(js.formatTail) == 0 {
|
||||
js.formatTail = formatTail
|
||||
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
|
||||
return nil, errors.New("conflicting format tail")
|
||||
}
|
||||
|
||||
header := &jsHeader{
|
||||
Algorithm: signatureBlock.Header.Algorithm,
|
||||
Chain: signatureBlock.Header.Chain,
|
||||
}
|
||||
if signatureBlock.Header.JWK != nil {
|
||||
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
|
||||
}
|
||||
header.JWK = publicKey
|
||||
}
|
||||
js.signatures[i] = &jsSignature{
|
||||
Header: header,
|
||||
Signature: signatureBlock.Signature,
|
||||
Protected: signatureBlock.Protected,
|
||||
}
|
||||
}
|
||||
if js.formatLength > len(content) {
|
||||
return nil, errors.New("invalid format length")
|
||||
}
|
||||
formatted := make([]byte, js.formatLength+len(js.formatTail))
|
||||
copy(formatted, content[:js.formatLength])
|
||||
copy(formatted[js.formatLength:], js.formatTail)
|
||||
js.indent = detectJSONIndent(formatted)
|
||||
js.payload = joseBase64UrlEncode(formatted)
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// PrettySignature formats a json signature into an easy to read
|
||||
// single json serialized object.
|
||||
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
|
||||
if len(js.signatures) == 0 {
|
||||
return nil, errors.New("no signatures")
|
||||
}
|
||||
payload, err := joseBase64UrlDecode(js.payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload = payload[:js.formatLength]
|
||||
|
||||
var marshalled []byte
|
||||
var marshallErr error
|
||||
if js.indent != "" {
|
||||
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
|
||||
} else {
|
||||
marshalled, marshallErr = json.Marshal(js.signatures)
|
||||
}
|
||||
if marshallErr != nil {
|
||||
return nil, marshallErr
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
|
||||
buf.Write(payload)
|
||||
buf.WriteByte(',')
|
||||
if js.indent != "" {
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteString(js.indent)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(signatureKey)
|
||||
buf.WriteString("\": ")
|
||||
buf.Write(marshalled)
|
||||
buf.WriteByte('\n')
|
||||
} else {
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(signatureKey)
|
||||
buf.WriteString("\":")
|
||||
buf.Write(marshalled)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
372
Godeps/_workspace/src/github.com/docker/libtrust/jsonsign/jsonsign_test.go
generated
vendored
Normal file
372
Godeps/_workspace/src/github.com/docker/libtrust/jsonsign/jsonsign_test.go
generated
vendored
Normal file
|
@ -0,0 +1,372 @@
|
|||
package jsonsign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) {
|
||||
testMap := map[string]interface{}{
|
||||
"name": "dmcgowan/mycontainer",
|
||||
"config": map[string]interface{}{
|
||||
"ports": []int{9101, 9102},
|
||||
"run": "/bin/echo \"Hello\"",
|
||||
},
|
||||
"layers": []string{
|
||||
"2893c080-27f5-11e4-8c21-0800200c9a66",
|
||||
"c54bc25b-fbb2-497b-a899-a8bc1b5b9d55",
|
||||
"4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4",
|
||||
"0b6da891-7f7f-4abf-9c97-7887549e696c",
|
||||
"1d960389-ae4f-4011-85fd-18d0f96a67ad",
|
||||
},
|
||||
}
|
||||
formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{`
|
||||
formattedSection = fmt.Sprintf(formattedSection, sigKey)
|
||||
if indent != "" {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
json.Indent(buf, []byte(formattedSection), "", indent)
|
||||
return testMap, buf.Bytes()
|
||||
}
|
||||
return testMap, []byte(formattedSection)
|
||||
|
||||
}
|
||||
|
||||
func TestSignJSON(t *testing.T) {
|
||||
key, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating EC key: %s", err)
|
||||
}
|
||||
|
||||
testMap, _ := createTestJSON("buildSignatures", " ")
|
||||
indented, err := json.MarshalIndent(testMap, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("Marshall error: %s", err)
|
||||
}
|
||||
|
||||
js, err := NewJSONSignature(indented)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
err = js.Sign(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing content: %s", err)
|
||||
}
|
||||
|
||||
keys, err := js.Verify()
|
||||
if err != nil {
|
||||
t.Fatalf("Error verifying signature: %s", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Error wrong number of keys returned")
|
||||
}
|
||||
if keys[0].KeyID() != key.KeyID() {
|
||||
t.Fatalf("Unexpected public key returned")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSignMap(t *testing.T) {
|
||||
key, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating EC key: %s", err)
|
||||
}
|
||||
|
||||
testMap, _ := createTestJSON("buildSignatures", " ")
|
||||
js, err := NewJSONSignatureFromMap(testMap)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
err = js.Sign(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing JSON signature: %s", err)
|
||||
}
|
||||
|
||||
keys, err := js.Verify()
|
||||
if err != nil {
|
||||
t.Fatalf("Error verifying signature: %s", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Error wrong number of keys returned")
|
||||
}
|
||||
if keys[0].KeyID() != key.KeyID() {
|
||||
t.Fatalf("Unexpected public key returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormattedJson(t *testing.T) {
|
||||
key, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating EC key: %s", err)
|
||||
}
|
||||
|
||||
testMap, firstSection := createTestJSON("buildSignatures", " ")
|
||||
indented, err := json.MarshalIndent(testMap, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("Marshall error: %s", err)
|
||||
}
|
||||
|
||||
js, err := NewJSONSignature(indented)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
err = js.Sign(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing content: %s", err)
|
||||
}
|
||||
|
||||
b, err := js.PrettySignature("buildSignatures")
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing map: %s", err)
|
||||
}
|
||||
|
||||
if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
|
||||
t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
|
||||
}
|
||||
|
||||
parsed, err := ParsePrettySignature(b, "buildSignatures")
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing formatted signature: %s", err)
|
||||
}
|
||||
|
||||
keys, err := parsed.Verify()
|
||||
if err != nil {
|
||||
t.Fatalf("Error verifying signature: %s", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Error wrong number of keys returned")
|
||||
}
|
||||
if keys[0].KeyID() != key.KeyID() {
|
||||
t.Fatalf("Unexpected public key returned")
|
||||
}
|
||||
|
||||
var unmarshalled map[string]interface{}
|
||||
err = json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not unmarshall after parse: %s", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFormattedFlatJson(t *testing.T) {
|
||||
key, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating EC key: %s", err)
|
||||
}
|
||||
|
||||
testMap, firstSection := createTestJSON("buildSignatures", "")
|
||||
unindented, err := json.Marshal(testMap)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshall error: %s", err)
|
||||
}
|
||||
|
||||
js, err := NewJSONSignature(unindented)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
err = js.Sign(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing JSON signature: %s", err)
|
||||
}
|
||||
|
||||
b, err := js.PrettySignature("buildSignatures")
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing map: %s", err)
|
||||
}
|
||||
|
||||
if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
|
||||
t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
|
||||
}
|
||||
|
||||
parsed, err := ParsePrettySignature(b, "buildSignatures")
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing formatted signature: %s", err)
|
||||
}
|
||||
|
||||
keys, err := parsed.Verify()
|
||||
if err != nil {
|
||||
t.Fatalf("Error verifying signature: %s", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("Error wrong number of keys returned")
|
||||
}
|
||||
if keys[0].KeyID() != key.KeyID() {
|
||||
t.Fatalf("Unexpected public key returned")
|
||||
}
|
||||
}
|
||||
|
||||
func generateTrustCA() (PrivateKey, *x509.Certificate) {
|
||||
key, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "CA Root",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, cert,
|
||||
key.CryptoPublicKey(), key.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return key, cert
|
||||
}
|
||||
|
||||
func generateIntermediate(key libtrust.PublicKey, parentKey PrivateKey, parent *x509.Certificate) *x509.Certificate {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "Intermediate",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, parent,
|
||||
key.CryptoPublicKey(), parentKey.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func generateTrustCert(key libtrust.PublicKey, parentKey PrivateKey, parent *x509.Certificate) *x509.Certificate {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "Trust Cert",
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, cert, parent,
|
||||
key.CryptoPublicKey(), parentKey.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func generateTrustChain(key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) {
|
||||
parent := ca
|
||||
parentKey := key
|
||||
chain := make([]*x509.Certificate, 6)
|
||||
for i := 5; i > 0; i-- {
|
||||
intermediatekey, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chain[i] = generateIntermediate(intermediatekey, parentKey, parent)
|
||||
parent = chain[i]
|
||||
parentKey = intermediatekey
|
||||
}
|
||||
trustKey, err := GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chain[0] = generateTrustCert(trustKey, parentKey, parent)
|
||||
|
||||
return trustKey, chain
|
||||
}
|
||||
|
||||
func TestChainVerify(t *testing.T) {
|
||||
caKey, ca := generateTrustCA()
|
||||
trustKey, chain := generateTrustChain(caKey, ca)
|
||||
|
||||
testMap, _ := createTestJSON("verifySignatures", " ")
|
||||
js, err := NewJSONSignatureFromMap(testMap)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSONSignature from map: %s", err)
|
||||
}
|
||||
|
||||
err = js.SignWithChain(trustKey, chain)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing with chain: %s", err)
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(ca)
|
||||
chains, err := js.VerifyChains(pool)
|
||||
if err != nil {
|
||||
t.Fatalf("Error verifying content: %s", err)
|
||||
}
|
||||
if len(chains) != 1 {
|
||||
t.Fatalf("Unexpected chains length: %d", len(chains))
|
||||
}
|
||||
if len(chains[0]) != 7 {
|
||||
t.Fatalf("Unexpected chain length: %d", len(chains[0]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidChain(t *testing.T) {
|
||||
caKey, ca := generateTrustCA()
|
||||
trustKey, chain := generateTrustChain(caKey, ca)
|
||||
|
||||
testMap, _ := createTestJSON("verifySignatures", " ")
|
||||
js, err := NewJSONSignatureFromMap(testMap)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating JSONSignature from map: %s", err)
|
||||
}
|
||||
|
||||
err = js.SignWithChain(trustKey, chain[:5])
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing with chain: %s", err)
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(ca)
|
||||
chains, err := js.VerifyChains(pool)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error verifying with bad chain")
|
||||
}
|
||||
if len(chains) != 0 {
|
||||
t.Fatalf("Unexpected chains returned from invalid verify")
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
27
Godeps/_workspace/src/github.com/docker/libtrust/manager/manager.go
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libtrust/manager/manager.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/docker/libtrust"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ids, err := libtrust.ListSSHAgentIDs()
|
||||
if err != nil {
|
||||
log.Fatalf("Error listing ssh agent ids: %s", err)
|
||||
}
|
||||
|
||||
for i := range ids {
|
||||
var id libtrust.ID
|
||||
id = ids[i]
|
||||
log.Printf("ID: %#v", id.JSONWebKey())
|
||||
|
||||
signed, err := id.Sign(bytes.NewReader([]byte("hello there")))
|
||||
if err != nil {
|
||||
log.Fatalf("Error signing: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Signed\n%x", signed)
|
||||
}
|
||||
}
|
8
Godeps/_workspace/src/github.com/docker/libtrust/official/ca-key.json
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/docker/libtrust/official/ca-key.json
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "T68FInnZjHW8c8Y3y8UKDEwVEfubGX7mP8X5EOpxRw0",
|
||||
"kid": "GW4F:GS7P:22KY:SQXM:FI74:SQBS:KRXR:V4TR:IG6Q:7NLL:OJIS:3XXJ",
|
||||
"kty": "EC",
|
||||
"x": "B23slGpLr16MTAJ3MaGVZK6QYEb_l5EZkpnrPcdX6JQ",
|
||||
"y": "xgkrg8u46RGLbAMqeg7tYxMGbsEdpxdZ4liN_V8TNZQ"
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnzCCAUSgAwIBAgIBADAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdT
|
||||
N1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6
|
||||
M1hYSjAeFw0xNDA5MDUyMzA3NTRaFw0yNDA5MDkyMzA3NTRaMEYxRDBCBgNVBAMT
|
||||
O0dXNEY6R1M3UDoyMktZOlNRWE06Rkk3NDpTUUJTOktSWFI6VjRUUjpJRzZROjdO
|
||||
TEw6T0pJUzozWFhKMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEB23slGpLr16M
|
||||
TAJ3MaGVZK6QYEb/l5EZkpnrPcdX6JTGCSuDy7jpEYtsAyp6Du1jEwZuwR2nF1ni
|
||||
WI39XxM1lKMjMCEwDgYDVR0PAQH/BAQDAgAEMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDSQAwRgIhAOFl3YnbPAPd7hRbh2Wpe0RrtZ0KAZGpjKk3C1ZhQEG4
|
||||
AiEAh6R8OVclkFNXFbQML8X5uEL+3d7wB+osNU0OlHFaiiQ=
|
||||
-----END CERTIFICATE-----
|
22
Godeps/_workspace/src/github.com/docker/libtrust/official/cert.pem
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/docker/libtrust/official/cert.pem
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcy
|
||||
QjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6
|
||||
WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMT
|
||||
O1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02
|
||||
MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ
|
||||
4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN
|
||||
3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkC
|
||||
IDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdT
|
||||
N1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6
|
||||
M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMT
|
||||
O09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNV
|
||||
TEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80e
|
||||
o8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8Uw
|
||||
Raw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsC
|
||||
IQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg==
|
||||
-----END CERTIFICATE-----
|
BIN
Godeps/_workspace/src/github.com/docker/libtrust/official/graphtool
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/docker/libtrust/official/graphtool
generated
vendored
Normal file
Binary file not shown.
35
Godeps/_workspace/src/github.com/docker/libtrust/official/josh-signed.json
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/docker/libtrust/official/josh-signed.json
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "PTC6:ABN6:Q4GG:GAAP:ZI7C:4TVD:NERQ:IWSC:UWVO:4HUK:DSI6:4NKD"
|
||||
}
|
||||
],
|
||||
"expiration": "2017-03-19T19:30:00.894517334Z",
|
||||
"issuedAt": "2015-03-19T19:30:00.894517667Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "KjhqAzsjY87dEyVdQkGoMgJciUalpFla1bno6KnZR6irMhfTxH_qwP69VAqPRrhHV5UdplAeKCDgTiXI6tphFg",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjYxNiwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE1LTAzLTE5VDE5OjMwOjAwWiJ9"
|
||||
}
|
||||
]
|
||||
}
|
17
Godeps/_workspace/src/github.com/docker/libtrust/official/josh.json
generated
vendored
Normal file
17
Godeps/_workspace/src/github.com/docker/libtrust/official/josh.json
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
[
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "PTC6:ABN6:Q4GG:GAAP:ZI7C:4TVD:NERQ:IWSC:UWVO:4HUK:DSI6:4NKD"
|
||||
}
|
||||
]
|
8
Godeps/_workspace/src/github.com/docker/libtrust/official/master-key.json
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/docker/libtrust/official/master-key.json
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "vIA7rvskakiQZI33vYTsOr_icsaTkQoml5IZjFAdBzA",
|
||||
"kid": "OQIS:G2B4:P7QB:G3ZI:THPN:ZITM:UDP2:V7ZS:US6H:CULD:RIZB:X7PI",
|
||||
"kty": "EC",
|
||||
"x": "K0pEXXaql80eo8khKBUhwg8fXwDnc-QIR3CB86JnCVg",
|
||||
"y": "hFrJFKwZkGb3XPCUw7cQQr37PZxu8mfFMEWsOhvUHVI"
|
||||
}
|
17
Godeps/_workspace/src/github.com/docker/libtrust/official/official.json
generated
vendored
Normal file
17
Godeps/_workspace/src/github.com/docker/libtrust/official/official.json
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
[
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "TIX7:E75F:VWRT:NFT6:3E25:TPJX:CAEL:YBHA:FTLE:IDGI:BBJ5:HS67"
|
||||
}
|
||||
]
|
25
Godeps/_workspace/src/github.com/docker/libtrust/official/official.json.1
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/docker/libtrust/official/official.json.1
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
}
|
||||
],
|
||||
"expiration": "2014-12-29T00:08:20.565183779Z",
|
||||
"issuedAt": "2014-09-30T00:08:20.565183976Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "uYMwXO869mGDkq7jrE-xfXHcY96JkcIZt_lLWidUtK4Z3-VH8UJx8j-bHQh0rD3C4Olsx7SY5dDOO_Zq60_i1w",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjMwMCwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE0LTA5LTMwVDAwOjA4OjIwWiJ9"
|
||||
}
|
||||
]
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-02-19-2015-longexpire.json
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-02-19-2015-longexpire.json
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
}
|
||||
],
|
||||
"expiration": "2017-02-19T22:55:15.973200265Z",
|
||||
"issuedAt": "2015-02-19T22:55:15.973200588Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "YYP07HOiZWVZ5ZYBvFW5FOlAbY86GrLQ62uGpDphPAJFUFzGgCZCuN04iXVIyFueiJOS6brPSV2dDcd_LlSz-A",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjQ1OCwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE1LTAyLTE5VDIyOjU1OjE1WiJ9"
|
||||
}
|
||||
]
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-02-19-2015.json
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-02-19-2015.json
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
}
|
||||
],
|
||||
"expiration": "2015-05-20T22:07:15.075359917Z",
|
||||
"issuedAt": "2015-02-19T22:07:15.075360044Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "te7xmIGQ9EGfYjikIwx0orqZ8WWH7DQN7-sAsoyfDfv_uq5DpgRqs5qkW_rGkwsbF37rq6Bdhdwktvt9YsmlVA",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjQ1OCwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE1LTAyLTE5VDIyOjA3OjE1WiJ9"
|
||||
}
|
||||
]
|
||||
}
|
35
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-03-22-2015-longexpire.json
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-03-22-2015-longexpire.json
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "TIX7:E75F:VWRT:NFT6:3E25:TPJX:CAEL:YBHA:FTLE:IDGI:BBJ5:HS67"
|
||||
}
|
||||
],
|
||||
"expiration": "2017-03-22T19:04:46.713978458Z",
|
||||
"issuedAt": "2015-03-22T19:04:46.713978769Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "h-4MmsL1QB9lEfq2V4MVpZNcuiw0cKYP4C8T-nd4JtU1WhJ1q2_9mMQmtaXOdg6wWTcBll1bbf11UwXP26OhaA",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjYxNiwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE1LTAzLTIyVDE5OjA0OjQ2WiJ9"
|
||||
}
|
||||
]
|
||||
}
|
25
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-10-01-2014.json
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-10-01-2014.json
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
}
|
||||
],
|
||||
"expiration": "2014-12-29T00:08:20.565183779Z",
|
||||
"issuedAt": "2014-09-30T00:08:20.565183976Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "uYMwXO869mGDkq7jrE-xfXHcY96JkcIZt_lLWidUtK4Z3-VH8UJx8j-bHQh0rD3C4Olsx7SY5dDOO_Zq60_i1w",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjMwMCwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE0LTA5LTMwVDAwOjA4OjIwWiJ9"
|
||||
}
|
||||
]
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-11-24-2014.json
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/libtrust/official/official_release-11-24-2014.json
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"revocations": [],
|
||||
"grants": [
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "LYRA:YAG2:QQKS:376F:QQXY:3UNK:SXH7:K6ES:Y5AU:XUN5:ZLVY:KBYL"
|
||||
},
|
||||
{
|
||||
"subject": "/library",
|
||||
"permission": 15,
|
||||
"grantee": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4"
|
||||
}
|
||||
],
|
||||
"expiration": "2015-02-22T22:47:54.722420237Z",
|
||||
"issuedAt": "2014-11-24T22:47:54.722420583Z",
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"alg": "ES256",
|
||||
"x5c": [
|
||||
"MIIBnTCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztPUUlTOkcyQjQ6UDdRQjpHM1pJOlRIUE46WklUTTpVRFAyOlY3WlM6VVM2SDpDVUxEOlJJWkI6WDdQSTAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO1c1MlY6QldDUTpPVVk2OklRNU46WDVYRzpNN0tKOkdYNlA6VFdNUTpaRkhKOk02MzI6SzIzVjpYRjQ1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG9rANGMaOxoZ4XzItNBpqrzZLe/hyMY/hnlXARER0rbEp5B5yl5sDYaXEI80VgpUGNvleFOVc7yN3rclT14tm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDRwAwRAIgQhkTTERnf/+2i1hM+hHivMngiciCQgqdcxo1UeK53fkCIDzml+Gk4oAFGGcsO3wdHqbxfjyGkNr8hxfuWoq6Wl3U",
|
||||
"MIIBnjCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztHVzRGOkdTN1A6MjJLWTpTUVhNOkZJNzQ6U1FCUzpLUlhSOlY0VFI6SUc2UTo3TkxMOk9KSVM6M1hYSjAeFw0xNDA5MTIyMzA3NTNaFw0xNDEyMTEyMzA3NTRaMEYxRDBCBgNVBAMTO09RSVM6RzJCNDpQN1FCOkczWkk6VEhQTjpaSVRNOlVEUDI6VjdaUzpVUzZIOkNVTEQ6UklaQjpYN1BJMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK0pEXXaql80eo8khKBUhwg8fXwDnc+QIR3CB86JnCViEWskUrBmQZvdc8JTDtxBCvfs9nG7yZ8UwRaw6G9QdUqMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgCA6aCc66+d2/yz47PmvvZy+GDudWXFxD6plt2KUtuGsCIQDykk/gYQ5MfIUlS8O+UEAwY7okfz6DxlpFGMVSO57gEg=="
|
||||
]
|
||||
},
|
||||
"signature": "WIxdNx3olDbq4fLRx46HHKyghPqpqTMB_RJkI5mCLrg3lIFq8ke-gNgfD_xDHHKgV7CBdmQpUS-FDABcKhQ_0g",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjQ1OCwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE0LTExLTI0VDIyOjQ3OjU0WiJ9"
|
||||
}
|
||||
]
|
||||
}
|
8
Godeps/_workspace/src/github.com/docker/libtrust/official/trust-key.json
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/docker/libtrust/official/trust-key.json
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "6kQq64Rv3qOPLbS9Yj125E1y0ytaUxL8zBA6NJcVEPM",
|
||||
"kid": "W52V:BWCQ:OUY6:IQ5N:X5XG:M7KJ:GX6P:TWMQ:ZFHJ:M632:K23V:XF45",
|
||||
"kty": "EC",
|
||||
"x": "G9rANGMaOxoZ4XzItNBpqrzZLe_hyMY_hnlXARER0rY",
|
||||
"y": "xKeQecpebA2GlxCPNFYKVBjb5XhTlXO8jd63JU9eLZs"
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
### Motivation
|
||||
|
||||
Currently, client to daemon authentication over a TCP port can only be achieved through generating TLS certificates for both the client and daemon. Each daemon instance then needs to be configured to use the generated TLS certificate and the client must specify its own certificate as well. Production critical, large-scale deployments should already be using this method to secure and control access to Docker daemons, but the extra setup required by generating your own keys, getting them signed by a certificate authority, and distributing those certificates is too much overhead for setting up small-scale deployments such as a Boot2Docker VM running on a developer's Mac, for example. Software developers are already familiar with how SSH key distribution works: through a list of authorized_keys on the server and known_host keys on the client. Ideally each instance of the Docker engine (client or daemon) would have a unique identity represented by its own public key. With a list of trusted public keys, two engines can authenticate to eachother and the daemon can authorize the connection. This can be done at the TLS layer after initially loading a list of trusted public keys into a CA Pool.
|
||||
|
||||
### Proposal Summary
|
||||
|
||||
Every instance of Docker will have its own public key which it either generates and saves on first run or loads from a file on subsequent runs. The public key will be distributed to other instances by a user of docker or system administrator to allow connections between two docker engines. Each instance will have a list of public keys which are trusted to accept connections from (trusted clients) and a separate list which it trusts to make connectionss to (trusted hosts). These public keys will be stored as JSON Web Keys and can be distributed as a JSON file, or as a standard PEM file. For TLS connections, the Docker engine's key pair will be used to generate a self-signed TLS certificate and the list of public keys will be used to generate a certificate pool with a certificate authority for each public key. For TLS servers the list of public keys will be loaded from an authorization file (authorized_keys.json) and for TLS clients the list will be loaded from a known hosts file (allowed_hosts.json), a client must always provide its certificate if the daemon requires it. In addition, a certificate authority PEM file will be allowed to be specified to maintain the existing TLS behavior. As another possible addition, upon connecting to a previously unknown server, a CLI user can be prompted to allow a public key now and in the future, leaving it up the user’s discretion.
|
||||
|
||||
### Key Files
|
||||
|
||||
Docker will support key files in either JSON Web Key format or more traditional PEM format.
|
||||
|
||||
##### Private and Public Key files
|
||||
|
||||
Both the docker daemon and client will have a private key file and a public key file in either of these formats. A client's private key default location will be at `~/.docker/key.(pem|json|jwk)` and public key at `~/.docker/pub_key.(pem|json|jwk)` where `~` is the home directory of the user running the `docker` client. The daemon's private key default location will be at `/var/lib/docker/key.(pem|json|jwk)` and public key at `/var/lib/docker/pub_key.(pem|json|jwk)`. Unix file permissions for these private keys MUST be set to `0600`, or 'Read/Write only by User'. It is suggested that the public keys have permissions set to `0644`, or 'Read/Write by User, Read by group/others'. Because these keys may have a variable file extension, Docker will load whichever one matches the glob `key.*` first, so it is NOT RECOMMENDED that there be multiple `key.*` files to avoid any ambiguity over which key file will be used. If the `--tlskey=KEYFILE` argument is used, that exact file will be used. Optionally, we may add a config file for Docker client and daemon in which users may specify the file to use, but that possibility is up for discussion.
|
||||
|
||||
##### Authorized Keys file
|
||||
|
||||
An instance of the Docker engine in daemon mode will need to know which clients are authorized to connect. We propose a file which contains a list of public keys which are authorized to access the Docker Remote API. This idea is borrowed from SSH's `authorized_keys` file. Any client which has the corresponding private key for any public key in this list will be able to connect. This is accomplished by generating a Certificate Authority Pool with a CA certificate automatically generated by the daemon for each key in this list. The server's TLS configuration will allow clients which present a self-signed certificate using one of these keys. Like today, the daemon can still be configured to use a traditional Certificate Authority (the `--tlscacert=CACERTFILE` option). The default location for this file will be `/var/lib/docker/authorized_keys.(pem|json|jwk)`. Docker will also look for trusted client keys in individual files in a directory at `/var/lib/docker/authorized_keys.d` in either PEM or JWK format.
|
||||
|
||||
##### Trusted Hosts file
|
||||
|
||||
An instance of the Docker engine in client mode will need to know which hosts it trusts to connect to. We propose a file which contains a list of public keys which the client trusts to be the key of the Docker Remote API server it wishes to connect to. This idea is borrowed from SSH's `know_hosts` file. Any daemon which has the corresponding private key for a public key in this list AND presents a self-signed server certificate in the TLS handshake which has the desired server name (hostname or IP address of `$DOCKER_HOST`) using one of these keys. Like today, the client can still be configured to use a traditional Certificate Authority (the `--tlscacert=CACERTFILE` option). The TCP address (in the form of `<hostname_or_ip>:<port>`) will be specified for each key using extended attributes for the key, i.e, a `address` JSON field if in JWK format or a `address` header if in PEM format. The default location for this file will be `~/.docker/trusted_hosts.(pem|json|jwk)`. Docker will also look for trusted host keys in individual files in a directory at `~/.docker/trusted_hosts.d` in either PEM or JWK format.
|
||||
|
||||
### Key Types
|
||||
|
||||
By default, a Docker engine will generate an ECDSA key, using the standard P-256 elliptic curve, if a private key file does not already exist. Supported Elliptic Curves are P-256, P-384, and P-521. RSA keys are also supported. The default of Elliptic Curve Cryptography was chosen due to more efficient key generation and smaller key sizes for equivalent levels of security when compared to RSA [[reference](http://www.nsa.gov/business/programs/elliptic_curve.shtml)].
|
||||
|
||||
### User visible changes
|
||||
- TLS is always used for when using tcp:// (unix:// does not require)
|
||||
- Client TLS verification is on by default (`--insecure` flag added to disable)
|
||||
- Server TLS verification is on by default (`--insecure` flag added to disable)
|
||||
- `--tls` and `--tlsverify` flags removed
|
||||
- `-i`/`--identity` flag to specify the identity (private key) file
|
||||
- User prompt added when connecting to unknown server
|
||||
|
||||
### Backwards Compatibility
|
||||
|
||||
In order to maintain backwards compatibility, existing TLS ca, cert, and key options for setting up TLS connections will be allowed. Scripts using `--tls` and `--tlsverify` will need to remove these options since these are now the default. To use the existing insecure behavior, run scripts will need to be modified to use `--insecure`, this is not recommended. These changes do no have any effect on servers using unix sockets.
|
||||
|
||||
- Connecting from older client: The client must generate a certificate which is distributed to the server. Optionally the newer server can run with `--insecure` which will require no changes to the client.
|
||||
- Connecting to an older server: If non-TLS, Client will maintain ability to connect to endpoint using the `--insecure` flag. If TLS is manually configured, no changes should be required.
|
||||
|
||||
### Usage Pattern
|
||||
- Single Machine - Setup using Unix socket, no changes
|
||||
- Single Machine (with non-B2D VM) -
|
||||
- Invoke docker on host to generate key.json
|
||||
- Invoke docker on guest to generate key.json
|
||||
- Copy ~/.docker/pub_key.json on guest to /var/lib/docker/trusted_hosts.d/guest.json on host
|
||||
- Copy /var/lib/docker/pub_key.json on host to ~/.docker/authorized_keys.d/host.json on guest (optionally use prompt)
|
||||
- Single Machine (B2D) - Boot2Docker installation generates and copies keys
|
||||
- Two Machines -
|
||||
- Invoke docker on client to generate key.json
|
||||
- Invoke docker on server to generate key.json
|
||||
- Copy ~/.docker/pub_key.json on client to ~/.docker/authorized_keys.d/client.json on server
|
||||
- Copy /var/lib/docker/pub_key.json on server to ~/.docker/trusted_hosts.d/server.json on client
|
||||
|
18
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/cert.pem
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/cert.pem
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7DCCAdagAwIBAgIRAMzw0lcScp8VgiIX5W18bkUwCwYJKoZIhvcNAQELMBYx
|
||||
FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxMzIxMjIyOVoXDTE3MDkyNzIx
|
||||
MjIyOVowEzERMA8GA1UEChMIUmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IB
|
||||
DwAwggEKAoIBAQC8V4jsxvsRZ5ml1Q/VXtrJStFcTjzwh2niEPCvER6xYcWHloMM
|
||||
A6q8cvoHYv2/1ELbKQbLaIuKowWa3gvsiL1Fs3b02jv0a9rN4kFofkRLKuvYKroB
|
||||
Y2P06LNT/sXnE+jCSw9OEdYZuoVXeohmVpcQUCeh8LNDS0b+vg+fU/O46iggUlqu
|
||||
LOaXC8A68SSiBP6qG/cOBPNmfGi09G8sCJ7b7Xd2J8MPeUovNVw+2Wfj4Bzox8s/
|
||||
7B9Ef6iIfwdkvclHoBZIzclJdA0ew5aam/a7SStuRMgX7yhDfRh9sRiyv/ZvKXW6
|
||||
CUy0Oqa//UlmQoC6K3kOm/EMzPocI6NwJ53PAgMBAAGjPDA6MA4GA1UdDwEB/wQE
|
||||
AwIAoDAMBgNVHRMBAf8EAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAL
|
||||
BgkqhkiG9w0BAQsDggEBAJjOpgKhzlMcrWhh2pFNaHK77IIROMG8XgmdWE3u2IS9
|
||||
oKbe31n1scyNIvUkrE11iwqJ+4i6C9/eHf1sTz1sHSoBA+7w9O7ghErQ5XK+rfZD
|
||||
Ax/Bu7o1lpnxtrQTiMaVUXZrBnowooDCdjyQyX0BeiIIRhQyBbx3lMpfjbyXFM/l
|
||||
hTuRC8Mt2QSu6pn+Le8t9rlkFq9exHg0Jng1ZpaJWCt6K27J8rMS3jngFUa2ihG6
|
||||
d5vSu6Gqaqhtki4uGA2mUu7RPUhZ5H+WLmDknHeD3/k9ASloa6jw2ehcyU9o4PMB
|
||||
5Lr1zMEG5RkNDVcE5qurWNrpZc1ajPomxmuTrcwO6vQ=
|
||||
-----END CERTIFICATE-----
|
173
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/generate_cert.go
generated
vendored
Normal file
173
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/generate_cert.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
// Usage:
|
||||
// Generate CA
|
||||
// ./generate_cert --cert ca.pem --key ca-key.pem
|
||||
// Generate signed certificate
|
||||
// ./generate_cert --host 127.0.0.1 --cert cert.pem --key key.pem --ca ca.pem --ca-key ca-key.pem
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for")
|
||||
certFile = flag.String("cert", "", "Output file for certificate")
|
||||
keyFile = flag.String("key", "", "Output file for key")
|
||||
ca = flag.String("ca", "", "Certificate authority file to sign with")
|
||||
caKey = flag.String("ca-key", "", "Certificate authority key file to sign with")
|
||||
)
|
||||
|
||||
const (
|
||||
RSABITS = 2048
|
||||
VALIDFOR = 1080 * 24 * time.Hour
|
||||
ORG = "Registry"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *certFile == "" {
|
||||
log.Fatalf("Missing required parameter: --cert")
|
||||
}
|
||||
|
||||
if *keyFile == "" {
|
||||
log.Fatalf("Missing required parameter: --key")
|
||||
}
|
||||
|
||||
if *ca == "" {
|
||||
if *caKey != "" {
|
||||
log.Fatalf("Must provide both --ca and --ca-key")
|
||||
}
|
||||
if err := GenerateCA(*certFile, *keyFile); err != nil {
|
||||
log.Fatalf("Failured to generate CA: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := GenerateCert(strings.Split(*host, ","), *certFile, *keyFile, *ca, *caKey); err != nil {
|
||||
log.Fatalf("Failured to generate cert: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newCertificate creates a new template
|
||||
func newCertificate() *x509.Certificate {
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(time.Hour * 24 * 1080)
|
||||
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate serial number: %s", err)
|
||||
}
|
||||
|
||||
return &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{ORG},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
//ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateCA generates a new certificate authority
|
||||
// and stores the resulting certificate and key file
|
||||
// in the arguments.
|
||||
func GenerateCA(certFile, keyFile string) error {
|
||||
template := newCertificate()
|
||||
template.IsCA = true
|
||||
template.KeyUsage |= x509.KeyUsageCertSign
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, RSABITS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certOut, err := os.Create(certFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
|
||||
keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateCert generates a new certificate signed using the provided
|
||||
// certificate authority files and stores the result in the certificate
|
||||
// file and key provided. The provided host names are set to the
|
||||
// appropriate certificate fields.
|
||||
func GenerateCert(hosts []string, certFile, keyFile, caFile, caKeyFile string) error {
|
||||
template := newCertificate()
|
||||
for _, h := range hosts {
|
||||
if ip := net.ParseIP(h); ip != nil {
|
||||
template.IPAddresses = append(template.IPAddresses, ip)
|
||||
} else {
|
||||
template.DNSNames = append(template.DNSNames, h)
|
||||
}
|
||||
}
|
||||
|
||||
tlsCert, err := tls.LoadX509KeyPair(caFile, caKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, RSABITS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, x509Cert, &priv.PublicKey, tlsCert.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certOut, err := os.Create(certFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
|
||||
keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
|
||||
return nil
|
||||
}
|
27
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/key.pem
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/key.pem
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAvFeI7Mb7EWeZpdUP1V7ayUrRXE488Idp4hDwrxEesWHFh5aD
|
||||
DAOqvHL6B2L9v9RC2ykGy2iLiqMFmt4L7Ii9RbN29No79GvazeJBaH5ESyrr2Cq6
|
||||
AWNj9OizU/7F5xPowksPThHWGbqFV3qIZlaXEFAnofCzQ0tG/r4Pn1PzuOooIFJa
|
||||
rizmlwvAOvEkogT+qhv3DgTzZnxotPRvLAie2+13difDD3lKLzVcPtln4+Ac6MfL
|
||||
P+wfRH+oiH8HZL3JR6AWSM3JSXQNHsOWmpv2u0krbkTIF+8oQ30YfbEYsr/2byl1
|
||||
uglMtDqmv/1JZkKAuit5DpvxDMz6HCOjcCedzwIDAQABAoIBAQCDKJZTBbK8CSCH
|
||||
yptPJNJJ0Y+Ar39ipXqIEvH5iHuOXZ8YcClsiXUx7QPPdxFssF2qj6SKrJBYiHST
|
||||
x8QYAZWFX5gcpYmRCjcCn2ibYyseCZLaI88KvnNSj2aqvMaGyGeUPn83B9bIbgsD
|
||||
w3IPioeCtb8T2DKOT3LuXd81PlqAz2Q2O1OHhTpWYA9gLAeXx+z6jU+Cvn9z67n0
|
||||
+9utUTk8n+SN4+K4h7ufM7CFzLgnugyrhvIgm0NfHDhR9SBwwSTJ/J1CrD4TLkIm
|
||||
d3HetZemfRjByg11Zrltm3YgxdTWKrjfqX3XguR/6HIiXvo5vWhs5DAuQtvTc8Wa
|
||||
ahceS2KxAoGBAMPqj8mWu+LSlWmXIx2gXNPkhK1EovZL0gImouWbb7kCHxQNYTJs
|
||||
wWzG61y9NdCHkhbCDsGXj9rFpPlrDr567gMZTYmLBzcB8DXpHr5XPyl6ZDqYs/L0
|
||||
4jDTSG/tthIDnxVY1O+bGrzknVpakrgJ9H2yivgWIWBVX3XUfXEH8f4HAoGBAPYa
|
||||
T5408onWBzWpbydvPd4loAbuCgG2bXOJjqsyZALHS4SoDQ2HUIZokVlf9yWOzyhd
|
||||
R8iWYADhMps+/wVW6eNAva3hCzI8US0ZiF7SgIZlq31UV1kDXl/XV+ccFp1R95KV
|
||||
c0VbRw2Tqzhmdu7OcvysP8FYZvGWGCev6Opxqe/5AoGBAJDt1xp4kFsWDwOCk66Z
|
||||
77GsTd3jdW4f3qZKsJyMJNTG1fi+gRWNUHVIndoH+mRdtdtyCPp7RSbAqO79nHWa
|
||||
eLIAKNTD7T1rCKjI4D5MSmRDrxuN1Si4sQ8PbXmnbtXaqfdftH3fzqLHuAOcFwdg
|
||||
DeZiit1FecFKxohi/bz89K5HAoGAQI0qMQ8mRu0yuuqUUz2kdq5byTWKvmRBVTrR
|
||||
hyoSfYAU2NjTg6bogjjPVygKxXkt8SJ2f7ChwdlP0tS1Q4tGkesbVY5fx292w2Od
|
||||
F3ITcC0ezCLhPmHZ0T3EusPtUpp7W6GDuL92ZaNEF+kYbQ8NriToHCi558g80bwc
|
||||
VdI2htECgYEAjOkauwVyJrNt3/NNW7yv/hLOzgX+Cxe9GSWZj9LoYdjB1Y9/rzyq
|
||||
f2YGdmyrrY8dWXHI9YK/RXk9/r2EDNNGwyH0+6na5pS5D+r78DhS1fiba7EJrPYK
|
||||
s68QsKo397pa6gWhlQpsUJr97/y2V9Fba8SINnzzmG0zJehPGQXuRFc=
|
||||
-----END RSA PRIVATE KEY-----
|
18
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/registry.cert
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/registry.cert
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC3DCCAcagAwIBAgIQXHT/h2+fDzoIxPvBTexiCjALBgkqhkiG9w0BAQswFjEU
|
||||
MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDEzMjIxMzI5WhcNMTcwOTI3MjIx
|
||||
MzI5WjATMREwDwYDVQQKEwhSZWdpc3RyeTCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAPKkPwCO4bgmrJek6lWjToBuyF4Or3E/38GwIECpuZuymoBs4C1B
|
||||
7Qp7oOrGIRh96htgYPl5WkDUM0If9yG2bDR9JwLxs5OEZrwLbzsvDgaGYiLocElH
|
||||
nKoUdorkr2DtG/rBOgiAqiw9Rw4+Bb0J09hx/Q+S9xuebthSnnKKtBXicBfmHkbD
|
||||
UhfolSdbdBa4u3R2gkFzGXCoAnuYJSerO6fewCJTg2jWxGjuU/Ekm3m13XWQWBis
|
||||
OQN7qGRr/sHLaB4wp69X/mtOKyK1BoGPYV2f73VKremU52QsTcpYw9q6Zoy/iztq
|
||||
WZMVY0Mo4x1TvVKFlzbb9g3UpqDEZCgxS60CAwEAAaMtMCswDgYDVR0PAQH/BAQD
|
||||
AgCgMAwGA1UdEwEB/wQCMAAwCwYDVR0RBAQwAoIAMAsGCSqGSIb3DQEBCwOCAQEA
|
||||
rq6CCnZKw39nT8VVAMiGS63q6+eGX0u8SjsPQu7qqxDex4La09rAZzlfh2GvusCz
|
||||
kJcFyliCwbZTZqFyfYXex+dFgBRaGmjTaE5ZKS7e0UV0oPE4ig9OFMl8/5dsOWJe
|
||||
3SpXOwC41idNAqzJ/StcWy2IIco7TM3YcjCFQeHwNOpspgOiTN3CYkUCmxkRVd2q
|
||||
U8aEvk6/rmmVCesDWsAAngfdHPRVca7PSn8Xz0hglHtdTsO/32EfCmOJI9Lc78cR
|
||||
6g9mX98oJwstQYTsA7lvcTArbCVhbQ/4z8T/AZu2P89Nr8+GW6XRxM5Ne08RnvlE
|
||||
IZa1y0Fg07FIqgJB3HAbHA==
|
||||
-----END CERTIFICATE-----
|
27
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/registry.key
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libtrust/registry-test/registry.key
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA8qQ/AI7huCasl6TqVaNOgG7IXg6vcT/fwbAgQKm5m7KagGzg
|
||||
LUHtCnug6sYhGH3qG2Bg+XlaQNQzQh/3IbZsNH0nAvGzk4RmvAtvOy8OBoZiIuhw
|
||||
SUecqhR2iuSvYO0b+sE6CICqLD1HDj4FvQnT2HH9D5L3G55u2FKecoq0FeJwF+Ye
|
||||
RsNSF+iVJ1t0Fri7dHaCQXMZcKgCe5glJ6s7p97AIlODaNbEaO5T8SSbebXddZBY
|
||||
GKw5A3uoZGv+wctoHjCnr1f+a04rIrUGgY9hXZ/vdUqt6ZTnZCxNyljD2rpmjL+L
|
||||
O2pZkxVjQyjjHVO9UoWXNtv2DdSmoMRkKDFLrQIDAQABAoIBAHkE4IN3wC7n4ydT
|
||||
UqlZ5Tp7hqYa7vguInzpSnzV6kQ0xYiORRVtjzuuQ6k1Hqanjo8O9+8VzqUM8W/m
|
||||
n40J/lgDn+SBBs7pt4/MqDK9mmI4vlOo2PBDrmjKAgHuY2aVfGIJ49b/zWB5Q6pf
|
||||
1t7dOvL2j4AoDeWRlLmCI5L/iShsKlm8isnEqnvSoWYQEIjyesum8LhjfvDVyvl7
|
||||
huOiIbwqCz5198GedhrbCRE4O/owbv9e1yv9RpV/Kruauw+W1uRg4o+WAzvaZ71q
|
||||
rzlJAi0TQ144E0DqCmCswpJpB/jxK+prcagdTY80SYxg+MPToqqwlUGzwP4piHyO
|
||||
Hltqm6UCgYEA+7IufLh6nhTgVRD5nPAVDZXxA5906YlZTWTeRH5gOBWoULh+4Ur/
|
||||
L+hEZzwiZz5cdWf+qggDmO8HAgtbvUdgMddAOGfEcsVma+H1BqD6ktFXU3tuFqPG
|
||||
+Mvtz7uDpyVCSiltBOBtI/Ps6VXXRyjXtpWir3QCjLn+emFlxB/kTKMCgYEA9spt
|
||||
k6TcFMVI8pXzpDhi4WDaf63/WftF3+FBCXLIejq1gQH0edUtKTYieyajgemo4/ph
|
||||
3LiF9P6lWLRjHpWyrIlW5j3qSOXu/KX1MkKGUnWXXu5EXf+U5+yXl/LgNvgkA3Rc
|
||||
lpIiL6xhJkkOn0b9b6SfDO3Mkc+fdFLNtoFzu28CgYBhwLEXVVqh/L+nqiEX15KF
|
||||
pT9mxc5VSTe6vDsq5N4fyiGFwDHe9A6qH445ffxqlqi4HHymstga1HKnU5v8pjTx
|
||||
BoV1oq+EV7BQpABS48CNtuowjRaTimYjiKhC7rPR/cGCJhamNzeMKBdSBDnI9E7b
|
||||
JcB6XhcDatUv2JX6ltxG+wKBgEGqbymf7idgrx5NI/0IO9c4TCs5Av7QSzFc4E9B
|
||||
SXKseGsROBnKzS9wBhC6oTsKDzRKBQwT1SDxONG3fcA/mMEr8gFlbBdlhweWqP1s
|
||||
kg0S2Oobf/l67gYf/94gc+RsdjitF43A6HPC7D6hpdBftUOli+SGya3qjsUG21G6
|
||||
dC7lAoGAJ84wM2yGU358OCmMdbxdjSpwk3P5ue+kcZqYa6R9NX503hg+zdDAnPzS
|
||||
Dn0Psf7XKklGqMnDRLyusKNPE4hhR/AukeGvBpB8rntBkXvBn0gR3Pz9mJioUvCw
|
||||
nvsYNrv7yPaMUjDpi3tlDdCxA/c1OYI4nPD1RO1z6KtXzXumg6k=
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"keyType": "jwk",
|
||||
"keyId": "KVKI:GMUJ:FMD7:7BWO:KP7O:QIWP:VRDG:IAWU:MJ6Z:GX35:N6E5:XCD6:2AIQ",
|
||||
"key": {
|
||||
"crv": "P-256",
|
||||
"d": "woOnFXsfJ2UhOW0A0sHRQr-j-ylaFp5hSqv9MXaWIfA",
|
||||
"kid": "KVKI:GMUJ:FMD7:7BWO:KP7O:QIWP:VRDG:IAWU:MJ6Z:GX35:N6E5:XCD6:2AIQ",
|
||||
"kty": "EC",
|
||||
"x": "7C7zxqK8TJUO0yiO3QHhaUm5gjBO_u4UN6Gdw2U846Y",
|
||||
"y": "11bnKVNb4Yoy0rpJA7slvBm44Lmc4c36h_LjEP8PJmQ"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
MHcCAQEEIFlzVietDbzPESwRynJaAZA38KgGkyxkmnOko1OAxniMoAoGCCqGSM49AwEHoUQDQgAEL1Rx9iUzepbmqQub62qSPgC2YTabUFtnqDXNrfN7g2z0P9FYV/ScBIqxnBDdV6Sy1ZIrfUgiuA7JyaABg4hjCw==
|
||||
MHcCAQEEIGrv2ULb+HV3KoJlciMJ1THswtYxvZ9HVrvIVYaZyt3UoAoGCCqGSM49AwEHoUQDQgAEV+CHKKPoF1rFiVkFiEdoXlXa8UybgoNPQmPVr8ImuR0grslx2Tk/Ja/23CYzYtLeEuRa+lRQSxKa2NjDo7BhfQ==
|
||||
MHcCAQEEIBxKadItcsdt+lqIh9fCOYX8OpTiIYLlaeZWeKxvJrw4oAoGCCqGSM49AwEHoUQDQgAEp8JenwbJSTVXKS0IHH1KrwPgF7duEwV3aIejGYiBlVZDdgz+DzJcQOGEh9gcOjQ02p7/ciwE4RGZVWs5gUi2nA==
|
||||
MHcCAQEEIEP8tpIAkgABEl87mJgiPLkdUIjowoDWFm6phU8bTpZ4oAoGCCqGSM49AwEHoUQDQgAEMvc0R61PCQT+NmyJjnjJugzZhfab2yb/7t40DiehQhotCpAiFY1xRIeTq0TphgsFAR7TSRajvBVwdPYfEypOAQ==
|
||||
MHcCAQEEIHQ1lyvaMTwnbab+NRQvZY4+uvrRwhiotyrZHkfnNWpSoAoGCCqGSM49AwEHoUQDQgAEujEqwGrX0laAM+OHIjM/84n/eYitBmcTTNPgoRaiGfd5xdFPdtTG6ZSt4/m+FBdQ1RAF8bvYPgyMmvm8V7uFDg==
|
|
@ -0,0 +1,5 @@
|
|||
MIIJKAIBAAKCAgEAonlf583Avnb72EySBaYxGLvjXDN9jYPPeuNw0E8X8KP5W0bPQNseJBDnXzuIoBwhBy5kecaoIo5UWVei9Z8KBYKP5OnfoEX15tOdm8wQxxACNv/9WtIAVt8Z9RGvuDseCy+Zk9aLBuxK/jIlOr0Ro6kX85Q9Dd/xErgDV/Fkekfzm/NpV5l2LsiUSulcXzsqWwhPEtLMIXYJh7otxcjzzmmrr0tVtlUE/uXq/VxCzf7oJ2liakOvmRSYYq0Kb39j1B75sjA3H7lzxL8ibth1iTsBtHLC16trq8ciFP4eUi+nncdmrAJ53vYX4M1i3lRR5K22Nnfb1NzTEmXjHHWBMjeYAe5Wx3PPDAiGMr5A8N0Bcq3pgy3jhFtHH5B2gO7GlyUoOrSaq9/B4v/UCexiMyacVUC1nfP4OboKXUXwI4CvS0UR0GTnsmloI7fsZUDJH0H8yWkXrDkueSDubTSjxK+B+WtVMXh7xjxAEqenWRKsL8o5QbvWPHjHnFsXbW2TCS72gzrbsx5n1GRHQw/QTKjZ6gaeCZxijZc0Fc7o9BEcFi2RUNmf4hdlvw4ioVeiwOCBsl0R8mIow2PlYsyrzqmPCJkdEIuxt2pstRcoiKIgMec9hIntz1QDU52SMhqquQupRa85rglkWk/AMaERj93/7ljtT8rpXpJkrsxuDwsCAwEAAQKCAgAJFgKQAwihXiQNX5LW8AlU2cUINfTyggmLVMbNT28GTRU3zojulpJj7/IQeFLWqVwLe8Owr7RdYlpDOMxrMf6sejWz1oRkASWUSAdLXCkgCLwm+T/g4VeVhBp+gwOxqNSwSXQsOVZDB9nVx1Yfp80huuYVABg9+2UO5+1/8Ibu87Ei6oTcmj9qtFreqZg51yQyPQ+2/MTfpis4ROgkZG2GAJDSV8MuVCjYXGE80/kEbhnrglA3oxth0y4GbqjjvVsG1Mif92RNyOA5g3rYehBwoTUiMzzENNjrnVJBnEhzzihwgWMzgVvxZQ/GyZWa21pNEI3dE+zeRY8kO0BtKgEUiAW3Zvjhn+P/rdP3FsThLhVawwcrgAYnuRjp9B+NbycctzLXdUfJ9SioSzBx/9ztDHDV3AzH+zOtfrUDmKQ7gLYaJMXZ9ZrrFM4XuxLADo3S4R1zjxipEu2iy/ePitP11sCgVJ3jMWDfHaZpxGUFGypCIfI+rACny9HzrZBjYUozs9LCC4a1bRIfyfmgPp6w3IPOqpyWH9usswhDZVD/ubQ9SKvNfsiQ+oJCki5Py6gIg/6cTt9VLX6e/UPUJdcD7cebnoQOQEjBHT8ONOFu+XhUQcpz/A/d7Af7Py8y0okpZpFYaGAgyzhmJqYcTyriER+cAePaiI4sjiS3z/WX6QKCAQEAxGZH63VehvqWhSZbmDuW+k4KHAKoSPgP0nARL91LxUqg/wE9Ca9a+r3I9y9y7GHLFQc0Z1j/cvpQzQgsfZZZlZ5rHiBKfUgg0ia16pJyVaJXx/7BiL+vJRvFq+AFD3hyIUXooXw3Bnu9TpkNyahIoOJUMw4SNkjV++61ERfr68x1+nfQ8yI1N93uNFXkEZjddlsm6+i8NOBbxvAtxxsnK99m2oL5cGsZmdGwTHwI7lkNvF6QpUaolD5JUWmYYjQW3FLU+fN2Ck6DNarXvq9j+8Mm/1Z+BjW/aMYst12AsUvZ/W/6KhGhAfCDTcmiy5E2fPt39Akq3k7lszwbTXjO/QKCAQEA08eGafWOCAchWyppOSl3kwjYgzzxcTPWYGwcw4pcTHDdVKIpCTMJBc1yi1mykba4lH1J2+bHt23Cwsic7zMDDzS3/0Tfa6XybBOABXokAHbMBWPBSiapou+5s53psI35TrVjnQ78UAxvAzL4GRqwmPLoo122u99rEIcTDIaHmzryDzuUbzT9ZF6r/s8y+GxqtmUdUjijf40NX1IOVm1nkF8xCsFa7q9i7Gnc2lDkIX5+is7NPFMStG9MMFJ7BQi+Gj39aNeL0C2IBqT8J2TTs1WWdSjFGVkRQM/RQGu7+j2OrVwU/KHbey4sbkg3JyR66A9ZlL+DZxIeC2yc8r2opwKCAQB7LSboLvl1MI7cvXzu0ljTbcR7YtNGyIWr1uuAmItgFnk41f8BJwCpiAQVu6WZSZBEBPvB4Zh89S9eGEVHEtfmta+6+kedoXIsO2kW0Hhy6tmhLk8VRBtF69AjBuJFnKX/5keLmEsufsbxF58FHc1HUgu65ZVYhH/ofQME0Hr/rYFE8IaVJpw4CLHQExpySuyCwZJ8nMAFbb1B3PrpCEeeBYQNgd2Nuy4YQeIPZi93xVSPEsQnVFSAfTwgmlifJso7rpuylBYxSqKSMy8Sgg/0I6Y+uHFb1bPRs5DyA+ulxsvWXreIUPwpKj4/uDmo8M86vcCyxlkjiloX+pepFB7FAoIBAQDLVEO/M91746yo5N1F3oRJjTKjPj3pnAV1ahdrvknDspEuEssnY/KDua8CD8qckDxDhM8G7FpPyHg3n3jdR7LmRI1r7uo7ZoaMMWS23X/3WhekDRSlZt1z7In04+ZcdtMoOIs9XgftqyJihesh3zgygPXO88jgNC+NHMKzQe6soTmKWn+3Db9DQIoOCU08qosCN66hJHQFoJ14KUZOwJewS1TAqMfvZkCqDzHWAdgmOFayWr897dyFlF4n/ujR+oTamS3WHSzFYRG6n2jhQbLn+YKNVel23sbr3RtgqI9AQR3dYA1eSOB7MefCsl8Rm7rNVv+BCybULNbfzRYbEPD9AoIBABLEFXpQIrbWJhsElj2VSdbtSLGgnfugc37G3gmBVtIA/l9ZZ5Kh0xuRUtCX3LxUp21+U66Dlk3YxcOKUu4jrO9VQGawzvqefHKvY++SapW3hwTBQKfipG6r2/344rh2xGH5WNV5y/n8Rdubin1viPXaFMy2Xg4N6Yx7XNsYqqt1ctjmk1bZ0jVXo9vEzOw36MEhWjZr+dfJh7ifMrTUEIQzwawbC3zIf3WgTVfjFmcJuMubPa6zPeypdmfK8ZCPSpm+s0wpQENqJoo7b5qCtrXM8ZXSk3wWYselJEKMxe6N97ltXRtRYOzK+mNiwsAKdIjg7bWb4hl5urc0Wwh5NPQ=
|
||||
MIIJJwIBAAKCAgEAu1Rb7RmjsZOpMfxtXLNwg0rj3lu/jJS67xZ6RlRi6wAeSqM9x2i1W/LwrcP9gp0XSYYde7WzDy1KVkm7x8zV38rrMeWNKISF2y1IyatwXUNWpMpKLT5Px37ayyU5HD29eH+A7yAA6wfvv/QXNh2mxf/qTquqiZ6E/Z6JjpVOOIJXAjjH4k4iHtXrDR11VEvMhpoiClue4iKYOZUgZdP2fjVYtyJmKXr0lpAtuROdZ+s655dUzvNkp9PlgPLU9iF020DGwD0/4JFXAj1dYDMwKlLkL1auU8zqARgWabis/4FGLwN66QryAJXySyjBcByPWtuapxZwlKyqr4sDUfd68YuAQ9Y6sTS9SCWIeM6iXpxRBl03gFITDv0E6T0RXOBtqIJ5he3L9ajR3ynGN7jA5R6cVNJ1gtqkFRKquasTT+7iAOZdNcIXl7PP3dPNdWzGONAf8A0yEyJ7rKxPZLx6KuZUZ6q51JpDa7Rr6zcoeKVZ1rjisI1dtXEneqQ8ay8bNRzuc8IBY/AzYPcvK4oQRj1syVL9ZqWBxnahKfs/YJzyraA/GpepAPSUAIFpxcad0UOl7HYuawkk8a+nB/pfR1fAmyFJ8EKfg4utRG8tnP1W86yIVZ24Uqaf5RMeR/S3AiQyroZKODUOLWdrnTJ24t7Pf7+I3pcdzVFY2dFo66MCAwEAAQKCAgA57wDu9rr1NCFjYBnCTYxCTmriXaWDqIuXXXA7R+W/n62+j3WPsgZEhYGBe9dvCX4mMt/zXmIeE3el7++u/t4189+A8ukQ2ss62WplVCvP6/9jv1vpWhf6LnUj1kMvRLjQBz6stU0TYsT2rFpxVgI5wceIxPZc0TV36gvImMw0Fbsq6LyIdHmA589lovKiOz7pccmNQoCElwZU6JW4aDEXHxBssqq9sVTrK3O4hSRs+wagkOCdkBBVBuc3VeIerIIjPIk68rZKkShDfQ7frXT74d7H9Rq8t+a2p+hmkcKO0kAOUhk08TmejMD+FYCdV3fONZRCkGII2M2AtJE2TD8s5OgLvyrk7/eINf1npwKPLW0+jwHvF5IkxFgrEGVO6mDMcKnS9GJ8Okq6OHO6NsG6SKfxQ6PJEBU3k8YPIQmdpEPgmu+VTzEPwsaImLZBclEkXX9IfvWm8nfoI5KK0UPkHBW+9gPQxa1p5FaHkXO3GARurRU/dIkLU8sBYr3dorNbW8kAQvWK+G8BBKoIQRKb9nGvITcny/+vg/NTdumUqxBrbRL90AgZXC5HJ8fhKgRpsR2nL4VUysESJfE81QVrpN7tl36R+F7oscHwQhpknhZz1QrXb9GEn5ZDvZGol6X3EpCMJKgAQ/umGHohltbanCGJmrgMaQ5wZjLm4yIaQQKCAQEA1lTP3Cq8NgcY5xJFdBGeK2cPpGQ3frMue3dXPEHDaHxpPhllAFbIfWwfWocgTlk3RKUpo2Kmu/b6r+ZHaIEMSC6X0bqPBLEZ1XJ8XVkUBucjHXHqR6GJefbsCipPl5TbbtTrmnDL7EwIcZ2g/dRW10zbfV7M3LDhTQyEsPG6Aai4UVztxFV050wmG7N6fmx7TFx2xtlvZ41sIVMB2ZpodjMV74xV+g22ojyRDMT7+jHdWAbfT7+YXyO1Qb1GvOsLOV9M4sfAKVVpv/AllTfAtgYvIufzRjXIRnTrR7rIeIQSaocUxJaz7NAslKgZAFxijuQXoy4e6iLjovtB0bIJOwKCAQEA37+t4tbDfy+J3O6Tfmioy+aHQ9u+7e8a+vKsnyUxssE29n2wfGtq8wbpP6+ffkLinisu4JkkHeO5A/cvbfYQWLmDRDnDMyO2qJROoI4E1SQQtqAAo/194oiPLY3wKenw8EZn24EUR+7N5JM60I2R3kZKe+Hn6VsIn7SjBSLGMBzGRDZ6+jaYT/bSu9E3JRE2ZAiFo9uAxroOMx2pfhaZ0Rkjav8fURc0pMy1Jr/bTljO6xLlk2eLLEe0SEf+5mzMr8C2ZFklDDFhroDSNR7zqiZ0qslNPXqAdY2a6hbG243X7BjY/wjJs3P8bRvN8nBfO4Gj4E/yo7LMUbcbVTzAuQKCAQBg7yqzIIDOzpbsqs7Ol8k5R/tDFjAjFVbIcEj776I490uB0mpjpNw4HVZw0vxBcwgT+77BLjTKfgES3Mse8H5lhu0S5ZUZQh+08XpdnZP5K0AuaP7UNrK+fnZygoBMxf4YOiP0TV9tF1YaTHgrVIWSCiiaou10QmsjNa4teKXvaicyZR0D1sJweXgivyF/XADHoGn3tQ81fjiROcF0cOkdzCz3kjiwRtN6vHm9lLmBhlvdG/6TSsvW+4dWnAwKFSOWZwuW5VB7YS3aqPnWasi1ikZ3OwBDmL9jXi2q20cfelv54e36m030pJIFCaGFEENi7LXkD0/cBLt/UrOJ60NRAoIBACE+ME6Yi/k0je4WQDGyyQlqWcR6PJbLRJ3uOjl25Dhjvo/Gwtbper1a1ILSzAaNV3AaiF90QUpzkASH0DMtMOaNwXP8hQTvFG9dgYUXNJ6aOiV4BkBXJEHo+iTRhtIjYNnCDCIvvlylnU+NlFpx/i8hEGXxfWxNSy23P9U+gmWZkNeHpKPSQrT2vf6Lqf7G3Obl4T0kGrDr6boseQczwmyvqHR4LSX2Jasqguub67BrP2rrptJMuv+8M4vdroB7v7mJD1A/cmKqX5H2r8SWsashBYpuWBx/O+C7Rj3rXmY2X9NDKdJScu1HmUR4shOIII4Nx9RG7IznvrWRV+1FFnkCggEAZpeoy6ucv1+0N7NPPIq1s/bYOVMEFZ10+EQulNOVzLkZayFklfuDN84H7goEGFZx9H5R/vWOjPWot/886S8qGMGxeHL0e9smiLg6MC8dXTq9+kg3hfOGB/WeiQHLv1CGuM5aYZqAEIbOf0Svn2GR1C0Z1TWHxC8dr6AejSZ5AZ3RpJoJOwvQOI8xnUr+pXLxpokrPgLM4MpfgZMBkRgUGZtv0CEufiP2npDnNBD3kCvsBBhdsIynCQv5LU79QTaYWheSJBSSk70CUXU/SuEYDnj6yu4DZhCehvIwJBkS7D4YUJjZPrj5slXjyEernNyORMlXuvK8DMBn1gK1SrPh7g==
|
||||
MIIJKQIBAAKCAgEAxqzmn280w+345zwcVb3sn51+aZ4fHHrV9ngkzv/+qc2chBSQb+yfIh9SGd16y9Ct6yNRpkwyndcIBkeEzMsQ89vRa+5yzGwGpj/+GJ43GyTIJx4LGnvZ2rfqsw7emp06+bjMvfB+zeN2nWO5q8VX610q8alRZJKPqs8rNLi7x7GWIAj1JThocuxJ0VkkWeBhDCyMa4sZvaYunPSmBlvPGfzZ/el2UFTUXmyJTf0DcOqJP46B9s9F2smel3t7k2v5J+r8qD7JO/Q5B0vGMJGbSUCFe7d5usGUO/d0CrIzS/GuvZM8s7rqUEnUsYrosC2uURwE1ub8A7IcTDMPfQJ8L5nm76H1l+CZfBRxlj30/ms6aC8h0qVjpfN83suxzX5s3V8HsBp5sd4KA8dFG1rZb+9ziEwjELV2YWkiHEdn5BZ/t5i761ufE882c/e3LgpEVqbcaTczPSLHbdwgA8JavgwzSuuRFnZatYDoGbr3XRBJePy/7msIdkf1wXxeO757vtR2NlCqy8ERSBCI1oHf7wmsTjZbYjSdY6hvQI6gwLoSJWvlckzsDMdCEP+Vj3Vcqu8pWwT0WP4DGmJH3ZCtNm/qp9llPJagcD7jtJQYD2fBsmddtSPnZ08yQJyKgvqB/pkFD8SqrUJUk22rHdX/7ITV71x1c7WjHI/XqbxZz5cCAwEAAQKCAgEAkN9RiF8CHgEwqAVIMCm/GyEwJfoce5sC3Kf9R0iavd1lzVZlgIgN/kbSinPbcDXqLHVju50Cp+A+RL3wE5nb8caP7mFBKtl2+9HcCr2MN2nZ3dRlillXocrNBObJ4frXH3PnMHXnMWE1hE1M/fl4Q/N7Nh8eb/UDeC17NwWiQFq5uwBtKkf4uhydM2AKRtN8UPFHL8CN86HziZZIXoih+zRLDYEpOiixd20zrj/aO4N6s3LKSFwadxDgaVus3/IzDC2pMVIfUW2kDDLwAUib8I1xCKbgyBMScave3VHWYAGLR0fPUsvwwr0jsJc3WAjnfNOun7pteJBvXA07tQ+T16UrXgEtu6tlq1muBcBjdjLBI7HcxngwqRZOKLxox6uSAHpSNQ6NamWBtmBYOzmZ7V6mrdeOmLJT6B0y8JWrn5yqR1/bGmVhJ3mS3A7gSCXLeS9shLtEPXiQa3ry9Vz1QQo18LK3AQrIb9aWvDWOu5f5llqOgrEWl6/UkaJU+jFHTX7iYPewWaGkTIah/haI6CWp0gGk7nPOsPivOJostYmi3cyNzRAqpmv56ahOudPHFQnIjJrUXKZ+p4BVmHpibL9GEVHkKwSsg7nSXA/XnWReAjrov4cJTMPlB40bqDl/TQ0eJVUcE/bIJTS0Wm7r0S5MUq5JWlMzmKU8T1G7l1ECggEBANzid637XvUsKQjNeJ/daPcS81GUsTtpIPGtW291MkGKaQhqQc3kvU71IWCKUyN6BXidc9eGohzKVhhSRmdPjIjNJ+10SJX83UbUgKeuMVWiyZsMeIJOFR23ub0ex9av4vmo5HkLRX5vocI1Rp9MvoOyxGwygRRBiOPfKnux5amho7YaJkfn9b6wFHdC5AQ8ay1oD2Fz8abTMPkXGB000dWiPrvFbC91vczAdAS4hzCa85w+i6H6eTzVIDLBARLNYeEMSD4ao337tdie40pAFfT3h2iSIO0DIaA+S1xeMdFoiai0K4nUJpnWGxD28Y9tkRtAUeWnRlQBttLt+tX24nkCggEBAOZCkInHVpBdFWxjxDr9GgIzY6SfBq2PcHrMv65cPmP2/LONnYV2y9z0N/+jd0T0+uuNPWAeX13+sXK0XdRqfP8ejOS61Y7Z6iaCtxCxipDskv4qHf6uVH5ftViKbW9G/ibzULCGXS46p9dTjxcWL0hv/qeoS7+ExWIuCNDCCh6X7v+Rpfe1WzlpS38C0ldf4ts2Ib8lDdKVBtpZL1nvdPUH00cbByjsC1QXzlkHrB9GbFFL2nDybkZzqjv4ZsIdvTQlP0cuLiWgLZC1/5OAo9GZWPUwweTJpknSTb7g2PZGoOEFYkVBQu/uzHXqZ8BsYFGX94VNCSDdvlYX1GtYPo8CggEAWdgf9/uT/0OMGVGCK/xiMoVZxSgnAtRbGxJ5a5eO7ou+9zZJN9c/jTs17U+2S2rZiNr6gIL5s541cMUeb+0ya36PLA2tbJyi4KmU/x9iX8PMHy3uYooiGqldemhnvpJiT0x8SiPki24arKUA6dDMeUMglCXizPtvE8j1PcT5dosRzQNL6NmX4o06ddwWZQayTZDTGL+RXlruw5K6Mz8FztCklbnSrGdG4V6yki0AiCuuxTHHmAYKhEzoQNyydBRoKKLXS7OIJrlYmRjfuJbil3aLdGRj0iQA7G4KNzL1LolH3QAT6YcKgG5PKhJX5RAbAquh5Diq6Yac96Tm71rxeQKCAQBS70TKK181biGKQcDrEkCs7D+GirTIDLMRt++OaLEcJljMf0RuTuSMt/X3gLQzxR0ffGbP2QC2hUb6rpZhLN3KsgFDXTwQUNLyfgBb5ean3l3vZ9IX6kiKoARcWLej7ekatGaqviqPvRa5CuKuCRKMv7H0RzzJRZQmkbghINKuREEUEiy6NLsfyZokrCqAyAuYsz2icEEzSPQH7zmL5iC1JNVndge1KJfH+S8ciBPpy961xWFvbxJjE8QMB6NTZYzBXG0XQT3j1Xaz+amNQjc/BkGxRpd/xVW8nx/bkXbz9Wp6wgqsMR5f9x3zlrM57o8etGK1w5J3IQcuUTT0WPtnAoIBAQCd5Eic6Puecp35/wZtk3vtYMbM6Qg0JhX4D1/uN3xfZBCKxL9rerlrAMpQuHAcqU7OwNokaQLJn5CUs0GlXMoBEhYLcsa8HRa+HY1jTUcDStwr5erBCJVNbAJyOS361AHzJ2NRcC5ZibvKUha4F1fcjWT/4JkoELY3mtbBNXO+X4Za3hZiwLLwH8yzO2BRHEEH1KFy1e3yRXx5SkDA1ZAfebhZNvyIw/EQ0GZ3+zuzzoNxkgOnB6BmhWWVdVo1Iz/bxCVXUNbSkVa7g2/5Jl6OBzni8VPsc3RVTxeJmpRz7CxP9pHJV/RNChbkb/s80Er6ZsuhVasMmICtXEXuvl7B
|
||||
MIIJKQIBAAKCAgEA1XJzVZsvnmQ7XvV8tmzY1y/BGOsi9G/Ux+FJhHKeimcrJ7MybfTpsDXmbtHGwrOwq0W1rf3GJfDNXpp/zyr2iWTbLZg1W6HyKuheZiK2IxItdnIW68qh0hpcqAlg41WclX9K26Rqc7CdwZI0ssdZGwowjHKGOhrK8fZKvurJwNHE8euxuh5T58TRCrdj/lozzop0jIJMPBW/GzmQhIFOKI+j6PIhMwJ1jBHEUG+yeh0VqapGzPmWyGqSi2W1exBaYmruneCTgaV3Ok3usfcq8GMKC40YBtSOwQTDTBUlmgAGM9qN4PtJOsbCXwsIdITO8wmiLzi8Bt/tbJTPKZwUe3BUZzrudFt45ifGDq8XDGJLzpegqBrmibwR9IwjYm6FBjN0tj9wUIfFHIt1iQmXBMiGadXUF9I05YdPukxPt9pJxZ0+4e4p2FoK+8U74iRu0gBg6k+iT9WpFDl4Lv1/nZ2cV9UEjbqwXmT2vRejbispiOyvZJQPwOpjXlDG7AJgVnldkM2K9dMUB6jbaoYXI00SOBH/G8aQloqMXdkAEvqyv+TGkbEgpbftnq/Lhjl1yFj/d0Aci05pvUkxGd5TFrUG9BLAagoTBfcXB5JnUkZJnZTfRaeFc+21KvnbroxRuw5IF1sThD6IfOxa2757za+XxwvXOnEyRds63QpMuQECAwEAAQKCAgEAq0Kxl7Z0droliQOflR+wvLfsT3obCdXQ/K+etU7iNlqhHbGCfQdNmb0lrzVSLVbQIhxEsd405qxXm9iLREZMmeD6m5LM/UDrddIMLBcd3PXw1GCmU3q6bEIKpikgPEes46bHAYTihQDpEy+kaO3aaP+8BHS/jk+BFrNvNdBdIQEu7I0YhSVgXU5mDBspeCQX/VjKOxMw5+Tq+YUul6LIe3MFKRtd1/fZKRH4WjHFGoXLo96kAEMi4JviCUcKXju4yc7StopC0HtbNsThCJoxMb/Fh48mABT8lZKkwJ8XFIGAeIqb3DDDiQ2BV5Ear6MACbFpGEqFU17IuZ6Mue4p34oVBbBlPO5EDHjXCv1OyAIFYbCTw3Eg24MR5n9IITyWA/3ZawEyuxEpVR9YUF37NdwmXTFo6sCJwO2mID9Lm9VLMaWJgzNCoZD0/eFwqljo3/UILXFIXWRcNa6aVRlM5ZxIRtqTiwjsN8XWHpQ5N8c7Eja4UBYr8XTG0ra2OgZgdV7AT5pI/LqtSnG0a1qgp1OuZl9Ya4+HmdvOKJgHNFOSgsFzCuG95H3LQqWj9C/Q9v3vw1w58T/uZ2fxSDR8t9IUS1d8x6Mhe7v3ZdkAwmvkuKDpW0wXqlpHwHGjA0QUsg6KBgvembM0cvI51JA8fwqlWNF0JrLYnfbFndq5JaECggEBAOxxJ7jJBO7oKPBxWYfQsP5fzzQtR5su9cYvQ4FW4s7COre5bUvUWWm+QKzO3O8qdzneaDXOMr3qm4ayrghMZxlsOQB+M9adJMqMeoi6xxZOoEq7bb/c9bNyeNd5DvKpi9l/FwgXerFcmMYbD5aXtkbCPNJOGL1FtGaVWqLg2WkzyhYBy1PkiTU69fKNmwl0mkLAsHVJyY4fYia5wlVa3qsCJrCr1A6EhCyeh7ajdPGheFHO4paCilnVBuBkXcAsGV+QKekIc3VZ25yx92i5ffg6qfa2dvOnEJ9+G0GR5jKDOBtugLP+Ct22sQGUvqT1yM/AK+/r9XoHoN6zw5bEVB8CggEBAOcaXAfChblJX9/0h0Ce756WzU/zVoEtGRoUf1b8ep2pUhDpeOIEINmJQzGBFvGXBpHAYXmrUoOKWZ+FzoIJfilTmnlVkladnrTQBukqFEVMWyjaA6mQi86p63HPH8p8VyAUM71B87N9wO/W8ZKIuCmjRNmjM2M2mLinnWp5/ZVYWIOoMZ5zzDbSnjMYqXComgAMJ55FpTeb4k/DyT9BSJbaH9Mi+frmQZRsfiPZdpBc6AFkh6x1Nf4L+NyCMGzNO/eA2cMrhhoStenSdP7EFyeVLj6wrHr9yXRfg6ixR8pdQxkxblpB4sKz7tvzYFABaEWXA7ojKGwM7H8ZrJeaTt8CggEBAI0mcCQsPTmydt4zjNM7jq4M3FG1xk2qFycdCBuqlvP/l7YeiFUAi/BIl6uSQuq0W0C8uVGFq10dNYOQH7zsf4/5v32/2YqBfXk9q9j8R+XVQnPauIYVjsh1yhY4Oug6tzY8XK+D9Mb1FG8PsJqiLzrGsjCKVtKHTk7/UoBx4tifBkFlCNEoWZDhhfoq9ycJ6R14kt8yEZU6vwmHU7BI8yz7t8U4jvuruIw6h6JMqn+leYK9Ex/vBlDij5xdzmnu6abXFW38kZC4+BXvWFe4hK8vtk+GVjTWw2GzkunuA5KcK16U9Vh+jFYXvkvHVJh98kez3Yb5Qi57Z1oLzvOueNkCggEAaHomOOvcAJQHX/vwWWqA8bVDRG3CIcv9mSjDGNQzqqstiq/CEovDPhUr52lwse+Q7LuIVbT6LLWTJucbIPahrpU5NLlW2wmPAjc5qJd5aorGe4V0Omg6jEeNRXpZ6PSlfr8jVOBICFFFtCf8Nj7Q4yIIsktxo9y+1lwQcl6MIcQ63+pd5EB9hhtZocf4chIG7RaRop/hvW4ftefRR2OAKZKa6JzbXm5SH4Oc0yAw1brquBZ2r6SdsChBbY79zXJLNe1uhSlcCtWCoB6g/bwwq5hpu7WrhD7PoGi94LmktvvKpc0PEobOuS8XQSCZ3hGEzlEjQjc6wv5kvGEjAbalCQKCAQAppSlaI/vMwjOtON7rxA0chWYr3xK2kMAsW5SSIsV5/sxwcglZgPYBjy0J5sPg/mPXxVpwGGhAl303qPJkTDCNUhkKm75RnDsvCeQ3QBYDmBL3TMsy9gVUpulm3j3vz6RNwCH6dZz9iLK7sJRtqz1mineZfo2Jg9XGU9Phr0LX5Y/LsCBnoESEiPva4iwZqJKyS8SDCAq/T2IzIYqfjYR2Vsz84REIejZ4PJcF1S/cFd5SitLYkLeSh0up5org8fPbdNG4KcRuBjZp2Cg6WVaip7+iIdJQuJXL9uluczwchukCb6V1tovr4nPr3qKU3aB2Ut18Qpc363d8tyhSEpIA
|
||||
MIIEpQIBAAKCAQEAsbSxg1wOa/TuwCiOaIGCwIUQXoKOtOOy5IUx7jhiH89ZBwyXelw2mYxHo5VtTTKjmreAgb7UUle+9+/b92uFudJ05VammjgzjB2jOJT4U5luMFBIjiRn5AH1EpXxHdHAvPB9BaNCGC70DjbJMskG6keJeM5VPsvvV3B8wLkzRRvQ5eZCFdAz3iwYHss3Wfx1WQWLG4zE8uXq8IgmJb8+wkLzlsesvPfZpraYFE2A35FeOFYTPY2XqQ3hHAnHdA1pqhWEvq+Nqt3b/UxusEKT4dTjJrEg9CQXycBM3AXudnlZ3Dfgd63rmIVWdkRRc+fhEU/aBes16FijeWNyVq6CAwIDAQABAoIBAQCPdNlgu+4AhMhAY/0irHqLRGBEi4z8xlxiUoxOTUIZf9/U6vtWEh8Zsb8bdcO48EpW2hBprU3l82P876tZf3mC2zz27w6rrPcgA8UCJnCZvGDRq94GsakUzeezy0vM0MmRPWsQJA11TGJFH0EpOFPWyU1XMupqOWPL6MktI9UZ0dPe/Ce5PjJTfQgcje/vbLJxhOMTcuX4vtu/SSEGPWf9seLTickgqsz5B1pRmV8tCNol0DCfjy33jXAwx+JkuNHYeO87cL1FMwiLxMsV7XznDDNj9nj6senTY4likuC/lvGzd3zbBKf9+vfoAffpDLp3TJ0OXqpgmv+tqWeu/NAxAoGBAMoDiYKkb4XiM7ZyFYjRf7JV97+OCAdJLIov0odldKkVMzj9Sihmsv//Iz9gere54X4u51JQA0y1RApw2yA6S3U+E3ULKlD/GZ3+sNN5+RIhuRX0RFrcCr4zHE3uhCv8Zw1XvOwguj+cEd/4im3IapRvPhyoAN1TCiURo7EAaZHVAoGBAOEyKcXVPZdIQut+mJHBVwApy9vx514UYs9Y1g7ICn08IoKvfntB2/7scvV70imhXaihNDFVcHJEw2d7ezTh7ZaLIw2yOUs8Z9ecqiXDDcraG1g2zMBILrIzCt1EzAm/6nDG+oWxof0NRBUIlvubcOUkLbm2stKGFjyiLUKAUNh3AoGAL1V81vuolfpWm51HpnNQNH0oO94Py4U9xnNGJSuElkm9YtEWmzjVIF6r56G3n20ShPqAgSz2WPbwJnIjJf4CyEnvTTF8jNX5LqgKzsJ5teHo+Ffv3EpKyh43t5r3pUbS7rFsNvfELDtphM07wlV/g6A9MYG8Eh/u9JiZflPWaLECgYEAqhKvd6iHPnSljaLraNDies4WFwMHoIa25S+LYlhzi4dZ7ObLGB1yH4HFhF5CJ4yl5k/w0f1b0gDarpGUSCkEvThRfeuG16pEJpM78Q7xoKy+EA467lDVa2GfJ+LoUciuLwQnecbgtvNI4rW9CUKorXV4TxSK0vHPY3V7xwiPhhsCgYEAlvERyrVhOT0u/thXpbQPtJ01Ydwgv94acHC6mR+e2Ty9jO6OBj39vVIWeF3PGd4ZHBNhaKfYG4X9eUGwepU2ewhjqjxLr3DSTgjYHikUfKMcqOYRuNMltkIfYbGDRVFQPvik2abm1mtlvIBm5xlZCe9AEnN7S9XnR6rxA0lQIOM=
|
|
@ -47,4 +47,4 @@ The file `gencert.go` can be used to generate PEM encoded version of the client
|
|||
|
||||
```
|
||||
curl --cert cert.pem --key key.pem -k https://localhost:8888
|
||||
```
|
||||
```
|
||||
|
|
Binary file not shown.
53
Godeps/_workspace/src/github.com/docker/libtrust/tools/buildsign.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/docker/libtrust/tools/buildsign.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
var ca string
|
||||
var chain string
|
||||
var cert string
|
||||
var signKey string
|
||||
var validKeys string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&signKey, "k", "", "Private key to use for signing (pem or JWS file)")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if signKey == "" {
|
||||
log.Fatalf("Missing key")
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading content from stdin: %s", err)
|
||||
}
|
||||
|
||||
sig, err := libtrust.NewJSONSignature(content)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
|
||||
privKey, err := libtrust.LoadKeyFile(signKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading priv key: %s", err)
|
||||
}
|
||||
sig.Sign(privKey)
|
||||
|
||||
// Output signed content to stdout
|
||||
out, err := sig.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
log.Fatalf("Error formatting output: %s", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
log.Fatalf("Error writing output: %s", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "z7JYp-fmJMFPsalhcGy7G0RNdE2l23VDcNh6zYI75aE",
|
||||
"kid": "UAY5:GSOJ:M4G5:Y5E4:CZ43:HJC4:G5AD:IDW3:GM7B:TRKT:5C3C:H3NH",
|
||||
"kty": "EC",
|
||||
"x": "8P_ZDmt8RFIeIDR_vma9SUZOF3GWUV9gDF6MuAJY68I",
|
||||
"y": "kFEmSOPtggWhun8pyUh3V6oQZYXfQuoz4Ia7ituWpDc"
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAUSgAwIBAgIBADAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztVQVk1OkdT
|
||||
T0o6TTRHNTpZNUU0OkNaNDM6SEpDNDpHNUFEOklEVzM6R003QjpUUktUOjVDM0M6
|
||||
SDNOSDAeFw0xNDExMDQyMDQxMTVaFw0yNDExMDgyMDQxMTVaMEYxRDBCBgNVBAMT
|
||||
O1VBWTU6R1NPSjpNNEc1Olk1RTQ6Q1o0MzpISkM0Okc1QUQ6SURXMzpHTTdCOlRS
|
||||
S1Q6NUMzQzpIM05IMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8P/ZDmt8RFIe
|
||||
IDR/vma9SUZOF3GWUV9gDF6MuAJY68KQUSZI4+2CBaG6fynJSHdXqhBlhd9C6jPg
|
||||
hruK25akN6MjMCEwDgYDVR0PAQH/BAQDAgAEMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDSAAwRQIhAKVFo9JVRbjIJaOjgMpPUuVMxfLYffDfiEYOPvJkEu9b
|
||||
AiBWSPsJyfyiv5sD+qKc/pKU31o9v/gQ8agQMs6weIomrw==
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,22 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnjCCAUSgAwIBAgIBAjAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztKQk5IOlgy
|
||||
R1o6R0tIVzpJWjZEOk1TRzY6R1BZRjo0NDU2OkNFNDY6VDRKNjpWVVdROkZEWlU6
|
||||
QUxZNDAeFw0xNDExMTEyMDQxMTRaFw0xNTAyMDkyMDQxMTVaMEYxRDBCBgNVBAMT
|
||||
O1dXQUM6NURWRzpSVlhZOkFQU0M6Q0VaVzpUT1A0Ok1NSEo6RkVaVDpXTU9KOkhW
|
||||
TFg6SFFNUDo2SldLMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEfmp8Xhfx+d5l
|
||||
FYD+Ouxq3b75DzAnCGjY2iGGwcGRpkmk5dslR9JmySCKl7saJHYcoGdljR1PeYwt
|
||||
aRiEKL2Nm6MjMCEwDgYDVR0PAQH/BAQDAgCAMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDSAAwRQIgBhSPTHU/d4SvN9fD54wM9Es856s7KSikP2CSrvKvNW4C
|
||||
IQDdwJd7h0u7oMKlD3QSadun4Wwu6hKnMoLevQPeRMV9bA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBnzCCAUSgAwIBAgIBATAKBggqhkjOPQQDAjBGMUQwQgYDVQQDEztVQVk1OkdT
|
||||
T0o6TTRHNTpZNUU0OkNaNDM6SEpDNDpHNUFEOklEVzM6R003QjpUUktUOjVDM0M6
|
||||
SDNOSDAeFw0xNDExMTEyMDQxMTRaFw0xNTAyMDkyMDQxMTVaMEYxRDBCBgNVBAMT
|
||||
O0pCTkg6WDJHWjpHS0hXOklaNkQ6TVNHNjpHUFlGOjQ0NTY6Q0U0NjpUNEo2OlZV
|
||||
V1E6RkRaVTpBTFk0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAECoQE4u9QsNH5
|
||||
FmoDfSQHojxMN/NoFF5qs1Tnp5M9oHTzwT+vFA2g6jArP+ZKl67nLanA/K4pFle8
|
||||
jgcCobScsKMjMCEwDgYDVR0PAQH/BAQDAgAGMA8GA1UdEwEB/wQFMAMBAf8wCgYI
|
||||
KoZIzj0EAwIDSQAwRgIhANhTbxwevwYurL4GRP4PRreZEvSjgOJnQO0/7FGhzDct
|
||||
AiEA5gPXKid+i1xQ+ZjiNcuUzdAi828WRqmpPYVbpK18AbA=
|
||||
-----END CERTIFICATE-----
|
BIN
Godeps/_workspace/src/github.com/docker/libtrust/tools/generate_trust_keys
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/docker/libtrust/tools/generate_trust_keys
generated
vendored
Normal file
Binary file not shown.
126
Godeps/_workspace/src/github.com/docker/libtrust/tools/generate_trust_keys.go
generated
vendored
Normal file
126
Godeps/_workspace/src/github.com/docker/libtrust/tools/generate_trust_keys.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
func writeCertFile(filename string, certs []*x509.Certificate) error {
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, cert := range certs {
|
||||
err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateIntermediate(parent *x509.Certificate, key libtrust.PublicKey, parentKey libtrust.PrivateKey) (*x509.Certificate, error) {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: key.KeyID(),
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(90 * 24 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key.CryptoPublicKey(), parentKey.CryptoPrivateKey())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func generateLeaf(parent *x509.Certificate, key libtrust.PublicKey, parentKey libtrust.PrivateKey) (*x509.Certificate, error) {
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(2),
|
||||
Subject: pkix.Name{
|
||||
CommonName: key.KeyID(),
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Second),
|
||||
NotAfter: time.Now().Add(90 * 24 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key.CryptoPublicKey(), parentKey.CryptoPrivateKey())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
caKey, _ := libtrust.GenerateECP256PrivateKey()
|
||||
masterKey, _ := libtrust.GenerateECP256PrivateKey()
|
||||
trustKey, _ := libtrust.GenerateECP256PrivateKey()
|
||||
|
||||
log.Printf("Generated keys:\n\tCA: %s\n\tMaster: %s\n\tTrust: %s", caKey.KeyID(), masterKey.KeyID(), trustKey.KeyID())
|
||||
|
||||
libtrust.SaveKey("ca-key.json", caKey)
|
||||
libtrust.SaveKey("master-key.json", masterKey)
|
||||
libtrust.SaveKey("trust-key.json", trustKey)
|
||||
|
||||
// TODO better CA function
|
||||
ca, err := libtrust.GenerateCACert(caKey, caKey.PublicKey())
|
||||
if err != nil {
|
||||
log.Fatalf("Error generating CA: %s", err)
|
||||
}
|
||||
|
||||
err = writeCertFile("ca.pem", []*x509.Certificate{ca})
|
||||
if err != nil {
|
||||
log.Fatalf("Error writing CA pem file: %s", err)
|
||||
}
|
||||
|
||||
masterCert, err := generateIntermediate(ca, masterKey.PublicKey(), caKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Error generating master certificate: %s", err)
|
||||
}
|
||||
// Generate Master Server certificate, signed by CA
|
||||
// Output master-key.json
|
||||
|
||||
leafCert, err := generateLeaf(masterCert, trustKey.PublicKey(), masterKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Error generating leaf certificate: %s", err)
|
||||
}
|
||||
// Generate key, from key trust Server certificate, signed by master
|
||||
// Output cert.pem (both trust server and master certificate), key.json
|
||||
|
||||
err = writeCertFile("cert.pem", []*x509.Certificate{leafCert, masterCert})
|
||||
if err != nil {
|
||||
log.Fatalf("Error generating cert pem file")
|
||||
}
|
||||
|
||||
}
|
8
Godeps/_workspace/src/github.com/docker/libtrust/tools/master-key.json
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/docker/libtrust/tools/master-key.json
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "VTn3fGowJO6x7dYNvmYPIMt2nLJRMOVM-GG6ZdlSUS8",
|
||||
"kid": "JBNH:X2GZ:GKHW:IZ6D:MSG6:GPYF:4456:CE46:T4J6:VUWQ:FDZU:ALY4",
|
||||
"kty": "EC",
|
||||
"x": "CoQE4u9QsNH5FmoDfSQHojxMN_NoFF5qs1Tnp5M9oHQ",
|
||||
"y": "88E_rxQNoOowKz_mSpeu5y2pwPyuKRZXvI4HAqG0nLA"
|
||||
}
|
BIN
Godeps/_workspace/src/github.com/docker/libtrust/tools/pretty_to_jws
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/docker/libtrust/tools/pretty_to_jws
generated
vendored
Normal file
Binary file not shown.
28
Godeps/_workspace/src/github.com/docker/libtrust/tools/pretty_to_jws.go
generated
vendored
Normal file
28
Godeps/_workspace/src/github.com/docker/libtrust/tools/pretty_to_jws.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
func main() {
|
||||
input, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading stdin: %s", err)
|
||||
}
|
||||
|
||||
sig, err := libtrust.ParsePrettySignature(input, "signatures")
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing pretty signature: %s", err)
|
||||
}
|
||||
|
||||
jws, err := sig.JWS()
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating JWS: %s", err)
|
||||
}
|
||||
|
||||
os.Stdout.Write(jws)
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,44 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
func main() {
|
||||
keyFile, err := filepath.Abs("/home/derek/.docker/key.json")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting path: %s", err)
|
||||
}
|
||||
pk, err := libtrust.LoadKeyFile(keyFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading key file: %s", err)
|
||||
}
|
||||
|
||||
input, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading stdin: %s", err)
|
||||
}
|
||||
|
||||
sig, err := libtrust.NewJSONSignature(input)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating JSON signature: %s", err)
|
||||
}
|
||||
|
||||
err = sig.Sign(pk)
|
||||
if err != nil {
|
||||
log.Fatalf("Error signing: %s", err)
|
||||
}
|
||||
//log.Printf("Private key (%s): %s", pk.KeyType(), pk.KeyID())
|
||||
jws, err := sig.JWS()
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting JWS: %s", err)
|
||||
}
|
||||
|
||||
os.Stdout.Write(jws)
|
||||
|
||||
}
|
8
Godeps/_workspace/src/github.com/docker/libtrust/tools/trust-key.json
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/docker/libtrust/tools/trust-key.json
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"crv": "P-256",
|
||||
"d": "M_n9CSpMg51gEhx1vf28QcQGjxcT43sps1-F8Yzo9dQ",
|
||||
"kid": "WWAC:5DVG:RVXY:APSC:CEZW:TOP4:MMHJ:FEZT:WMOJ:HVLX:HQMP:6JWK",
|
||||
"kty": "EC",
|
||||
"x": "fmp8Xhfx-d5lFYD-Ouxq3b75DzAnCGjY2iGGwcGRpkk",
|
||||
"y": "pOXbJUfSZskgipe7GiR2HKBnZY0dT3mMLWkYhCi9jZs"
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dir := path.Join(getHomeDir(), ".docker")
|
||||
app := cli.NewApp()
|
||||
app.Name = "trust"
|
||||
app.Usage = "manage keys and grants"
|
||||
app.Commands = []cli.Command{
|
||||
cli.Command{
|
||||
Name: "tag",
|
||||
Usage: "create or update a tag",
|
||||
Action: actionTag,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "commit",
|
||||
Usage: "Whether to immediately commit tag",
|
||||
},
|
||||
},
|
||||
},
|
||||
cli.Command{
|
||||
Name: "untag",
|
||||
Usage: "delete a tag",
|
||||
Action: actionUntag,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "ref,r",
|
||||
Value: "",
|
||||
Usage: "Tag or Hash reference",
|
||||
},
|
||||
},
|
||||
},
|
||||
cli.Command{
|
||||
Name: "commit",
|
||||
Usage: "commit target changes",
|
||||
Action: actionCommit,
|
||||
},
|
||||
}
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "dir,d",
|
||||
Value: dir,
|
||||
Usage: "Directory for repository",
|
||||
},
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func getHomeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
||||
|
||||
func updateRepository() {
|
||||
log.Infof("Getting updated targets from repository")
|
||||
log.Infof("Verifying local keys match repository")
|
||||
}
|
||||
|
||||
func actionTag(c *cli.Context) {
|
||||
hash := c.Args().First()
|
||||
if len(hash) == 0 {
|
||||
cli.ShowCommandHelp(c, "tag")
|
||||
return
|
||||
}
|
||||
|
||||
tag := c.Args().Get(1)
|
||||
if len(tag) == 0 {
|
||||
cli.ShowCommandHelp(c, "tag")
|
||||
return
|
||||
}
|
||||
// TODO parse tag (Get last index of ':')
|
||||
|
||||
updateRepository()
|
||||
log.Infof("Checking hash exists for name")
|
||||
log.Infof("Tagging %s as %s", hash, tag)
|
||||
fmt.Printf("%s+ %s %s%s\n", Green, tag, hash, Clear)
|
||||
}
|
||||
|
||||
func actionUntag(c *cli.Context) {
|
||||
tagOrHash := c.Args().First()
|
||||
if len(tagOrHash) == 0 {
|
||||
cli.ShowCommandHelp(c, "untag")
|
||||
return
|
||||
}
|
||||
updateRepository()
|
||||
fmt.Printf("%s+ %s%s\n", Red, tagOrHash, Clear)
|
||||
}
|
||||
|
||||
func actionCommit(c *cli.Context) {
|
||||
}
|
||||
|
||||
const Black = "\x1b[30;1m"
|
||||
const Red = "\x1b[31;1m"
|
||||
const Green = "\x1b[32;1m"
|
||||
const Yellow = "\x1b[33;1m"
|
||||
const Blue = "\x1b[34;1m"
|
||||
const Magenta = "\x1b[35;1m"
|
||||
const Cyan = "\x1b[36;1m"
|
||||
const White = "\x1b[37;1m"
|
||||
const Clear = "\x1b[0m"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue