mirror of https://github.com/docker/docs.git
update go-tuf dependency
This commit is contained in:
parent
b2e089c6ee
commit
cdc0c59cbb
|
@ -16,7 +16,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/go-tuf",
|
||||
"Rev": "a06029e9b42bff41f0277e5ceb482ad00299210a"
|
||||
"Rev": "913d6f239a809f317bf3642019bc480d18e80cfc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/context",
|
||||
|
|
|
@ -11,11 +11,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/flynn/go-tuf"
|
||||
. "github.com/flynn/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
"github.com/flynn/go-tuf/keys"
|
||||
"github.com/flynn/go-tuf/signed"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/agl/ed25519"
|
||||
. "github.com/flynn/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type InteropSuite struct{}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func MemoryLocalStore() LocalStore {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
. "github.com/flynn/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type LocalStoreSuite struct{}
|
||||
|
|
0
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
Normal file → Executable file
0
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
Normal file → Executable file
0
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
Normal file → Executable file
0
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
Normal file → Executable file
|
@ -5,9 +5,9 @@ import (
|
|||
"log"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
)
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/dustin/go-humanize"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
// "encoding/json"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -20,17 +20,17 @@ Options:
|
|||
}
|
||||
|
||||
func cmdAdd(args *docopt.Args, repo *tuf.Repo) error {
|
||||
var custom json.RawMessage
|
||||
if c := args.String["--custom"]; c != "" {
|
||||
custom = json.RawMessage(c)
|
||||
}
|
||||
// var custom json.RawMessage
|
||||
// if c := args.String["--custom"]; c != "" {
|
||||
// custom = json.RawMessage(c)
|
||||
// }
|
||||
paths := args.All["<path>"].([]string)
|
||||
if arg := args.String["--expires"]; arg != "" {
|
||||
expires, err := parseExpires(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.AddTargetsWithExpires(paths, custom, expires)
|
||||
return repo.AddTargetsWithExpires(paths, nil, expires)
|
||||
}
|
||||
return repo.AddTargets(paths, custom)
|
||||
return repo.AddTargets(paths, nil)
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -3,8 +3,8 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -11,9 +11,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/docker/docker/pkg/term"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
)
|
||||
|
||||
|
@ -107,7 +109,8 @@ func runCommand(name string, args []string, dir string, insecure bool) error {
|
|||
if !insecure {
|
||||
p = getPassphrase
|
||||
}
|
||||
repo, err := tuf.NewRepo(tuf.FileSystemStore(dir, p))
|
||||
signer := signed.Ed25519{}
|
||||
repo, err := tuf.NewRepo(signer, store.FileSystemStore(dir, p), "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ package main
|
|||
import (
|
||||
"log"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -3,8 +3,8 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
. "github.com/endophage/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
|
|
@ -6,20 +6,14 @@ import (
|
|||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/tent/canonical-json-go"
|
||||
cjson "github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/tent/canonical-json-go"
|
||||
)
|
||||
|
||||
const KeyIDLength = sha256.Size * 2
|
||||
|
||||
type Signed struct {
|
||||
Signed json.RawMessage `json:"signed"`
|
||||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
KeyID string `json:"keyid"`
|
||||
Method string `json:"method"`
|
||||
Signature HexBytes `json:"sig"`
|
||||
type KeyValue struct {
|
||||
Public HexBytes `json:"public"`
|
||||
// Private HexBytes `json:"private,omitempty"`
|
||||
}
|
||||
|
||||
type Key struct {
|
||||
|
@ -37,9 +31,15 @@ func (k *Key) ID() string {
|
|||
return hex.EncodeToString(digest[:])
|
||||
}
|
||||
|
||||
type KeyValue struct {
|
||||
Public HexBytes `json:"public"`
|
||||
//Private HexBytes `json:"private,omitempty"`
|
||||
type Signed struct {
|
||||
Signed json.RawMessage `json:"signed"`
|
||||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
KeyID string `json:"keyid"`
|
||||
Method string `json:"method"`
|
||||
Signature HexBytes `json:"sig"`
|
||||
}
|
||||
|
||||
func DefaultExpires(role string) time.Time {
|
||||
|
@ -82,6 +82,15 @@ type Role struct {
|
|||
Threshold int `json:"threshold"`
|
||||
}
|
||||
|
||||
func (r *Role) ValidKey(id string) bool {
|
||||
for _, key := range r.KeyIDs {
|
||||
if key == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Files map[string]FileMeta
|
||||
|
||||
type Snapshot struct {
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox"
|
||||
"github.com/flynn/go-tuf/Godeps/_workspace/src/golang.org/x/crypto/scrypt"
|
||||
)
|
||||
|
||||
const saltSize = 32
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
. "github.com/flynn/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package tuf
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
|
@ -1,10 +1,8 @@
|
|||
package keys
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
)
|
||||
|
||||
|
@ -18,79 +16,59 @@ var (
|
|||
ErrInvalidThreshold = errors.New("tuf: invalid role threshold")
|
||||
)
|
||||
|
||||
func NewKey() (*Key, error) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k := &Key{
|
||||
Public: *pub,
|
||||
Private: priv,
|
||||
}
|
||||
k.ID = k.Serialize().ID()
|
||||
return k, nil
|
||||
}
|
||||
|
||||
type Key struct {
|
||||
type PublicKey struct {
|
||||
data.Key
|
||||
ID string
|
||||
Public [ed25519.PublicKeySize]byte
|
||||
//Private *[ed25519.PrivateKeySize]byte
|
||||
}
|
||||
|
||||
func (k *Key) Serialize() *data.Key {
|
||||
return &data.Key{
|
||||
Type: "ed25519",
|
||||
Value: data.KeyValue{Public: k.Public[:]},
|
||||
func NewPublicKey(keyType string, public []byte) *PublicKey {
|
||||
// create a copy so the private key is not included
|
||||
key := data.Key{
|
||||
Type: keyType,
|
||||
Value: data.KeyValue{Public: public},
|
||||
}
|
||||
return &PublicKey{key, key.ID()}
|
||||
}
|
||||
|
||||
func (k *Key) SerializePrivate() *data.Key {
|
||||
return &data.Key{
|
||||
Type: "ed25519",
|
||||
Value: data.KeyValue{
|
||||
Public: k.Public[:],
|
||||
Private: k.Private[:],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Role struct {
|
||||
KeyIDs map[string]struct{}
|
||||
Threshold int
|
||||
}
|
||||
|
||||
func (r *Role) ValidKey(id string) bool {
|
||||
_, ok := r.KeyIDs[id]
|
||||
return ok
|
||||
type PrivateKey struct {
|
||||
PublicKey
|
||||
Private []byte
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
roles map[string]*Role
|
||||
keys map[string]*Key
|
||||
types map[string]int
|
||||
roles map[string]*data.Role
|
||||
keys map[string]*PublicKey
|
||||
}
|
||||
|
||||
func NewDB() *DB {
|
||||
return &DB{
|
||||
roles: make(map[string]*Role),
|
||||
keys: make(map[string]*Key),
|
||||
roles: make(map[string]*data.Role),
|
||||
keys: make(map[string]*PublicKey),
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) AddKey(id string, k *data.Key) error {
|
||||
if k.Type != "ed25519" {
|
||||
return ErrWrongType
|
||||
}
|
||||
if id != k.ID() {
|
||||
return ErrWrongID
|
||||
}
|
||||
if len(k.Value.Public) != ed25519.PublicKeySize {
|
||||
return ErrInvalidKey
|
||||
func (db *DB) AddKey(k *PublicKey) error {
|
||||
//if _, ok := db.types[k.Type]; !ok {
|
||||
// return ErrWrongType
|
||||
//}
|
||||
//if len(k.Value.Public) != ed25519.PublicKeySize {
|
||||
// return ErrInvalidKey
|
||||
//}
|
||||
|
||||
key := PublicKey{
|
||||
Key: data.Key{
|
||||
Type: k.Type,
|
||||
Value: data.KeyValue{
|
||||
Public: make([]byte, len(k.Value.Public)),
|
||||
},
|
||||
},
|
||||
ID: k.ID,
|
||||
}
|
||||
|
||||
var key Key
|
||||
copy(key.Public[:], k.Value.Public)
|
||||
key.ID = id
|
||||
db.keys[id] = &key
|
||||
copy(key.Value.Public, k.Value.Public)
|
||||
|
||||
db.keys[k.ID] = &key
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -114,25 +92,21 @@ func (db *DB) AddRole(name string, r *data.Role) error {
|
|||
return ErrInvalidThreshold
|
||||
}
|
||||
|
||||
role := &Role{
|
||||
KeyIDs: make(map[string]struct{}),
|
||||
Threshold: r.Threshold,
|
||||
}
|
||||
// validate all key ids have the correct length
|
||||
for _, id := range r.KeyIDs {
|
||||
if len(id) != data.KeyIDLength {
|
||||
return ErrInvalidKeyID
|
||||
}
|
||||
role.KeyIDs[id] = struct{}{}
|
||||
}
|
||||
|
||||
db.roles[name] = role
|
||||
db.roles[name] = r
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) GetKey(id string) *Key {
|
||||
func (db *DB) GetKey(id string) *PublicKey {
|
||||
return db.keys[id]
|
||||
}
|
||||
|
||||
func (db *DB) GetRole(name string) *Role {
|
||||
func (db *DB) GetRole(name string) *data.Role {
|
||||
return db.roles[name]
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/errors"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
|
@ -37,13 +38,14 @@ var snapshotManifests = []string{
|
|||
}
|
||||
|
||||
type Repo struct {
|
||||
trust signed.Signer
|
||||
local store.LocalStore
|
||||
hashAlgorithms []string
|
||||
meta map[string]json.RawMessage
|
||||
}
|
||||
|
||||
func NewRepo(local store.LocalStore, hashAlgorithms ...string) (*Repo, error) {
|
||||
r := &Repo{local: local, hashAlgorithms: hashAlgorithms}
|
||||
func NewRepo(trust *signed.Signer, local store.LocalStore, hashAlgorithms ...string) (*Repo, error) {
|
||||
r := &Repo{trust: *trust, local: local, hashAlgorithms: hashAlgorithms}
|
||||
|
||||
var err error
|
||||
r.meta, err = local.GetMeta()
|
||||
|
@ -59,7 +61,7 @@ func (r *Repo) Init(consistentSnapshot bool) error {
|
|||
return err
|
||||
}
|
||||
if len(t.Targets) > 0 {
|
||||
return ErrInitNotAllowed
|
||||
return errors.ErrInitNotAllowed
|
||||
}
|
||||
root := data.NewRoot()
|
||||
root.ConsistentSnapshot = consistentSnapshot
|
||||
|
@ -72,8 +74,8 @@ func (r *Repo) db() (*keys.DB, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for id, k := range root.Keys {
|
||||
if err := db.AddKey(id, k); err != nil {
|
||||
for _, k := range root.Keys {
|
||||
if err := db.AddKey(&keys.PublicKey{*k, k.ID()}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +96,7 @@ func (r *Repo) root() (*data.Root, error) {
|
|||
if err := json.Unmarshal(rootJSON, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root := &data.Root{}
|
||||
root := data.NewRoot()
|
||||
if err := json.Unmarshal(s.Signed, root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -155,11 +157,11 @@ func (r *Repo) GenKey(role string) (string, error) {
|
|||
|
||||
func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (string, error) {
|
||||
if !keys.ValidRole(keyRole) {
|
||||
return "", ErrInvalidRole{keyRole}
|
||||
return "", errors.ErrInvalidRole{keyRole}
|
||||
}
|
||||
|
||||
if !validExpires(expires) {
|
||||
return "", ErrInvalidExpires{expires}
|
||||
return "", errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
root, err := r.root()
|
||||
|
@ -167,11 +169,11 @@ func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (string, err
|
|||
return "", err
|
||||
}
|
||||
|
||||
key, err := keys.NewKey()
|
||||
key, err := r.trust.NewKey()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := r.local.SaveKey(keyRole, key.SerializePrivate()); err != nil {
|
||||
if err := r.local.SaveKey(keyRole, &key.Key); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -182,7 +184,7 @@ func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (string, err
|
|||
}
|
||||
role.KeyIDs = append(role.KeyIDs, key.ID)
|
||||
|
||||
root.Keys[key.ID] = key.Serialize()
|
||||
root.Keys[key.ID] = &key.Key
|
||||
root.Expires = expires.Round(time.Second)
|
||||
root.Version++
|
||||
|
||||
|
@ -219,11 +221,11 @@ func (r *Repo) RevokeKey(role, id string) error {
|
|||
|
||||
func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error {
|
||||
if !keys.ValidRole(keyRole) {
|
||||
return ErrInvalidRole{keyRole}
|
||||
return errors.ErrInvalidRole{keyRole}
|
||||
}
|
||||
|
||||
if !validExpires(expires) {
|
||||
return ErrInvalidExpires{expires}
|
||||
return errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
root, err := r.root()
|
||||
|
@ -232,12 +234,12 @@ func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error
|
|||
}
|
||||
|
||||
if _, ok := root.Keys[id]; !ok {
|
||||
return ErrKeyNotFound{keyRole, id}
|
||||
return errors.ErrKeyNotFound{keyRole, id}
|
||||
}
|
||||
|
||||
role, ok := root.Roles[keyRole]
|
||||
if !ok {
|
||||
return ErrKeyNotFound{keyRole, id}
|
||||
return errors.ErrKeyNotFound{keyRole, id}
|
||||
}
|
||||
|
||||
keyIDs := make([]string, 0, len(role.KeyIDs))
|
||||
|
@ -248,7 +250,7 @@ func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error
|
|||
keyIDs = append(keyIDs, keyID)
|
||||
}
|
||||
if len(keyIDs) == len(role.KeyIDs) {
|
||||
return ErrKeyNotFound{keyRole, id}
|
||||
return errors.ErrKeyNotFound{keyRole, id}
|
||||
}
|
||||
role.KeyIDs = keyIDs
|
||||
|
||||
|
@ -265,7 +267,7 @@ func (r *Repo) setMeta(name string, meta interface{}) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := signed.Marshal(meta, keys...)
|
||||
s, err := r.trust.Marshal(meta, keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -280,7 +282,7 @@ func (r *Repo) setMeta(name string, meta interface{}) error {
|
|||
func (r *Repo) Sign(name string) error {
|
||||
role := strings.TrimSuffix(name, ".json")
|
||||
if !keys.ValidRole(role) {
|
||||
return ErrInvalidRole{role}
|
||||
return errors.ErrInvalidRole{role}
|
||||
}
|
||||
|
||||
s, err := r.signedMeta(name)
|
||||
|
@ -293,12 +295,11 @@ func (r *Repo) Sign(name string) error {
|
|||
return err
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
return ErrInsufficientKeys{name}
|
||||
}
|
||||
for _, k := range keys {
|
||||
signed.Sign(s, k)
|
||||
return errors.ErrInsufficientKeys{name}
|
||||
}
|
||||
|
||||
r.trust.Sign(s, keys...)
|
||||
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -313,13 +314,17 @@ func (r *Repo) Sign(name string) error {
|
|||
// been revoked are omitted), except for the root role in which case all local
|
||||
// keys are returned (revoked root keys still need to sign new root metadata so
|
||||
// clients can verify the new root.json and update their keys db accordingly).
|
||||
func (r *Repo) getKeys(name string) ([]*data.Key, error) {
|
||||
func (r *Repo) getKeys(name string) ([]*keys.PublicKey, error) {
|
||||
localKeys, err := r.local.GetKeys(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name == "root" {
|
||||
return localKeys, nil
|
||||
rootkeys := make([]*keys.PublicKey, 0, len(localKeys))
|
||||
for _, key := range localKeys {
|
||||
rootkeys = append(rootkeys, &keys.PublicKey{*key, key.ID()})
|
||||
}
|
||||
return rootkeys, nil
|
||||
}
|
||||
db, err := r.db()
|
||||
if err != nil {
|
||||
|
@ -332,19 +337,19 @@ func (r *Repo) getKeys(name string) ([]*data.Key, error) {
|
|||
if len(role.KeyIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
keys := make([]*data.Key, 0, len(role.KeyIDs))
|
||||
rolekeys := make([]*keys.PublicKey, 0, len(role.KeyIDs))
|
||||
for _, key := range localKeys {
|
||||
if _, ok := role.KeyIDs[key.ID()]; ok {
|
||||
keys = append(keys, key)
|
||||
if role.ValidKey(key.ID()) {
|
||||
rolekeys = append(rolekeys, &keys.PublicKey{*key, key.ID()})
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
return rolekeys, nil
|
||||
}
|
||||
|
||||
func (r *Repo) signedMeta(name string) (*data.Signed, error) {
|
||||
b, ok := r.meta[name]
|
||||
if !ok {
|
||||
return nil, ErrMissingMetadata{name}
|
||||
return nil, errors.ErrMissingMetadata{name}
|
||||
}
|
||||
s := &data.Signed{}
|
||||
if err := json.Unmarshal(b, s); err != nil {
|
||||
|
@ -376,7 +381,7 @@ func (r *Repo) AddTargetWithExpires(path string, custom json.RawMessage, expires
|
|||
|
||||
func (r *Repo) AddTargetsWithExpires(paths []string, custom json.RawMessage, expires time.Time) error {
|
||||
if !validExpires(expires) {
|
||||
return ErrInvalidExpires{expires}
|
||||
return errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
t, err := r.targets()
|
||||
|
@ -388,7 +393,7 @@ func (r *Repo) AddTargetsWithExpires(paths []string, custom json.RawMessage, exp
|
|||
normalizedPaths[i] = util.NormalizeTarget(path)
|
||||
}
|
||||
if err := r.local.WalkStagedTargets(normalizedPaths, func(path string, meta data.FileMeta) (err error) {
|
||||
t.Targets[path] = meta
|
||||
t.Targets[util.NormalizeTarget(path)] = meta
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
|
@ -413,7 +418,7 @@ func (r *Repo) RemoveTargetWithExpires(path string, expires time.Time) error {
|
|||
// If paths is empty, all targets will be removed.
|
||||
func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error {
|
||||
if !validExpires(expires) {
|
||||
return ErrInvalidExpires{expires}
|
||||
return errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
t, err := r.targets()
|
||||
|
@ -447,7 +452,7 @@ func (r *Repo) Snapshot(t CompressionType) error {
|
|||
|
||||
func (r *Repo) SnapshotWithExpires(t CompressionType, expires time.Time) error {
|
||||
if !validExpires(expires) {
|
||||
return ErrInvalidExpires{expires}
|
||||
return errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
snapshot, err := r.snapshot()
|
||||
|
@ -480,7 +485,7 @@ func (r *Repo) Timestamp() error {
|
|||
|
||||
func (r *Repo) TimestampWithExpires(expires time.Time) error {
|
||||
if !validExpires(expires) {
|
||||
return ErrInvalidExpires{expires}
|
||||
return errors.ErrInvalidExpires{expires}
|
||||
}
|
||||
|
||||
db, err := r.db()
|
||||
|
@ -535,7 +540,7 @@ func (r *Repo) Commit() error {
|
|||
// check we have all the metadata
|
||||
for _, name := range topLevelManifests {
|
||||
if _, ok := r.meta[name]; !ok {
|
||||
return ErrMissingMetadata{name}
|
||||
return errors.ErrMissingMetadata{name}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,7 +551,7 @@ func (r *Repo) Commit() error {
|
|||
}
|
||||
for name, role := range root.Roles {
|
||||
if len(role.KeyIDs) < role.Threshold {
|
||||
return ErrNotEnoughKeys{name, len(role.KeyIDs), role.Threshold}
|
||||
return errors.ErrNotEnoughKeys{name, len(role.KeyIDs), role.Threshold}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -611,7 +616,7 @@ func (r *Repo) verifySignature(name string, db *keys.DB) error {
|
|||
}
|
||||
role := strings.TrimSuffix(name, ".json")
|
||||
if err := signed.Verify(s, role, 0, db); err != nil {
|
||||
return ErrInsufficientSignatures{name, err}
|
||||
return errors.ErrInsufficientSignatures{name, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -619,7 +624,7 @@ func (r *Repo) verifySignature(name string, db *keys.DB) error {
|
|||
func (r *Repo) fileMeta(name string) (data.FileMeta, error) {
|
||||
b, ok := r.meta[name]
|
||||
if !ok {
|
||||
return data.FileMeta{}, ErrMissingMetadata{name}
|
||||
return data.FileMeta{}, errors.ErrMissingMetadata{name}
|
||||
}
|
||||
return util.GenerateFileMeta(bytes.NewReader(b), r.hashAlgorithms...)
|
||||
}
|
||||
|
|
|
@ -10,12 +10,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/agl/ed25519"
|
||||
. "github.com/endophage/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
. "gopkg.in/check.v1"
|
||||
// "github.com/endophage/go-tuf/encrypted"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
tuferr "github.com/endophage/go-tuf/errors"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
)
|
||||
|
@ -28,6 +28,9 @@ type RepoSuite struct{}
|
|||
var _ = Suite(&RepoSuite{})
|
||||
|
||||
func (RepoSuite) TestNewRepo(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
meta := map[string]json.RawMessage{
|
||||
"root.json": []byte(`{
|
||||
"signed": {
|
||||
|
@ -75,7 +78,7 @@ func (RepoSuite) TestNewRepo(c *C) {
|
|||
local.SetMeta(k, v)
|
||||
}
|
||||
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
root, err := r.root()
|
||||
|
@ -108,6 +111,9 @@ func (RepoSuite) TestNewRepo(c *C) {
|
|||
}
|
||||
|
||||
func (RepoSuite) TestInit(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
local := store.DBStore(
|
||||
|
@ -117,7 +123,7 @@ func (RepoSuite) TestInit(c *C) {
|
|||
)
|
||||
local.AddBlob("/foo.txt", util.SampleMeta())
|
||||
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Init() sets root.ConsistentSnapshot
|
||||
|
@ -130,7 +136,7 @@ func (RepoSuite) TestInit(c *C) {
|
|||
|
||||
// Init() fails if targets have been added
|
||||
c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
c.Assert(r.Init(true), Equals, ErrInitNotAllowed)
|
||||
c.Assert(r.Init(true), Equals, tuferr.ErrInitNotAllowed)
|
||||
}
|
||||
|
||||
func genKey(c *C, r *Repo, role string) string {
|
||||
|
@ -140,15 +146,18 @@ func genKey(c *C, r *Repo, role string) string {
|
|||
}
|
||||
|
||||
func (RepoSuite) TestGenKey(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
sqldb := util.GetSqliteDB()
|
||||
defer util.FlushDB(sqldb)
|
||||
local := store.DBStore(sqldb, "")
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// generate a key for an unknown role
|
||||
_, err = r.GenKey("foo")
|
||||
c.Assert(err, Equals, ErrInvalidRole{"foo"})
|
||||
c.Assert(err, Equals, tuferr.ErrInvalidRole{"foo"})
|
||||
|
||||
// generate a root key
|
||||
id := genKey(c, r, "root")
|
||||
|
@ -173,7 +182,7 @@ func (RepoSuite) TestGenKey(c *C) {
|
|||
}
|
||||
c.Assert(k.ID(), Equals, keyID)
|
||||
c.Assert(k.Value.Public, HasLen, ed25519.PublicKeySize)
|
||||
c.Assert(k.Value.Private, IsNil)
|
||||
//c.Assert(k.Value.Private, IsNil)
|
||||
|
||||
// check root key + role are in db
|
||||
db, err := r.db()
|
||||
|
@ -182,7 +191,7 @@ func (RepoSuite) TestGenKey(c *C) {
|
|||
c.Assert(rootKey, NotNil)
|
||||
c.Assert(rootKey.ID, Equals, keyID)
|
||||
role := db.GetRole("root")
|
||||
c.Assert(role.KeyIDs, DeepEquals, map[string]struct{}{keyID: {}})
|
||||
c.Assert(role.KeyIDs, DeepEquals, []string{keyID})
|
||||
|
||||
// check the key was saved correctly
|
||||
localKeys, err := local.GetKeys("root")
|
||||
|
@ -195,8 +204,8 @@ func (RepoSuite) TestGenKey(c *C) {
|
|||
c.Assert(err, IsNil)
|
||||
c.Assert(rootKeys, HasLen, 1)
|
||||
c.Assert(rootKeys[0].ID(), Equals, rootKey.ID)
|
||||
c.Assert(rootKeys[0].Value.Public, DeepEquals, rootKey.Serialize().Value.Public)
|
||||
c.Assert(rootKeys[0].Value.Private, IsNil)
|
||||
c.Assert(rootKeys[0].Value.Public, DeepEquals, rootKey.Key.Value.Public)
|
||||
//c.Assert(rootKeys[0].Value.Private, IsNil)
|
||||
|
||||
// generate two targets keys
|
||||
genKey(c, r, "targets")
|
||||
|
@ -212,11 +221,11 @@ func (RepoSuite) TestGenKey(c *C) {
|
|||
c.Fatal("missing targets role")
|
||||
}
|
||||
c.Assert(targetsRole.KeyIDs, HasLen, 2)
|
||||
targetKeyIDs := make(map[string]struct{}, 2)
|
||||
targetKeyIDs := make([]string, 0, 2)
|
||||
db, err = r.db()
|
||||
c.Assert(err, IsNil)
|
||||
for _, id := range targetsRole.KeyIDs {
|
||||
targetKeyIDs[id] = struct{}{}
|
||||
targetKeyIDs = append(targetKeyIDs, id)
|
||||
_, ok = root.Keys[id]
|
||||
if !ok {
|
||||
c.Fatal("missing key")
|
||||
|
@ -269,17 +278,20 @@ func (RepoSuite) TestGenKey(c *C) {
|
|||
}
|
||||
|
||||
func (RepoSuite) TestRevokeKey(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
local := store.DBStore(db, "")
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// revoking a key for an unknown role returns ErrInvalidRole
|
||||
c.Assert(r.RevokeKey("foo", ""), DeepEquals, ErrInvalidRole{"foo"})
|
||||
c.Assert(r.RevokeKey("foo", ""), DeepEquals, tuferr.ErrInvalidRole{"foo"})
|
||||
|
||||
// revoking a key which doesn't exist returns ErrKeyNotFound
|
||||
c.Assert(r.RevokeKey("root", "nonexistent"), DeepEquals, ErrKeyNotFound{"root", "nonexistent"})
|
||||
c.Assert(r.RevokeKey("root", "nonexistent"), DeepEquals, tuferr.ErrKeyNotFound{"root", "nonexistent"})
|
||||
|
||||
// generate keys
|
||||
genKey(c, r, "root")
|
||||
|
@ -319,16 +331,19 @@ func (RepoSuite) TestRevokeKey(c *C) {
|
|||
}
|
||||
|
||||
func (RepoSuite) TestSign(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
baseMeta := map[string]json.RawMessage{"root.json": []byte(`{"signed":{},"signatures":[]}`)}
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
local := store.DBStore(db, "")
|
||||
local.SetMeta("root.json", baseMeta["root.json"])
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// signing with no keys returns ErrInsufficientKeys
|
||||
c.Assert(r.Sign("root.json"), Equals, ErrInsufficientKeys{"root.json"})
|
||||
c.Assert(r.Sign("root.json"), Equals, tuferr.ErrInsufficientKeys{"root.json"})
|
||||
|
||||
checkSigIDs := func(keyIDs ...string) {
|
||||
meta, err := local.GetMeta()
|
||||
|
@ -341,8 +356,6 @@ func (RepoSuite) TestSign(c *C) {
|
|||
}
|
||||
s := &data.Signed{}
|
||||
c.Assert(json.Unmarshal(rootJSON, s), IsNil)
|
||||
fmt.Println("Len Signatures", len(s.Signatures))
|
||||
fmt.Println("Len KeyIDs", len(keyIDs))
|
||||
c.Assert(s.Signatures, HasLen, len(keyIDs))
|
||||
for i, id := range keyIDs {
|
||||
c.Assert(s.Signatures[i].KeyID, Equals, id)
|
||||
|
@ -350,53 +363,58 @@ func (RepoSuite) TestSign(c *C) {
|
|||
}
|
||||
|
||||
// signing with an available key generates a signature
|
||||
key, err := keys.NewKey()
|
||||
//key, err := signer.NewKey()
|
||||
kID, err := r.GenKey("root")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(local.SaveKey("root", key.SerializePrivate()), IsNil)
|
||||
//c.Assert(local.SaveKey("root", key.SerializePrivate()), IsNil)
|
||||
c.Assert(r.Sign("root.json"), IsNil)
|
||||
checkSigIDs(key.ID)
|
||||
checkSigIDs(kID)
|
||||
|
||||
// signing again does not generate a duplicate signature
|
||||
c.Assert(r.Sign("root.json"), IsNil)
|
||||
checkSigIDs(key.ID)
|
||||
checkSigIDs(kID)
|
||||
|
||||
// signing with a new available key generates another signature
|
||||
newKey, err := keys.NewKey()
|
||||
//newKey, err := signer.NewKey()
|
||||
newkID, err := r.GenKey("root")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(local.SaveKey("root", newKey.SerializePrivate()), IsNil)
|
||||
//c.Assert(local.SaveKey("root", newKey.SerializePrivate()), IsNil)
|
||||
c.Assert(r.Sign("root.json"), IsNil)
|
||||
checkSigIDs(key.ID, newKey.ID)
|
||||
checkSigIDs(kID, newkID)
|
||||
}
|
||||
|
||||
func (RepoSuite) TestCommit(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
//files := map[string][]byte{"/foo.txt": []byte("foo"), "/bar.txt": []byte("bar")}
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
local := store.DBStore(db, "")
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// commit without root.json
|
||||
c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"root.json"})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"root.json"})
|
||||
|
||||
// commit without targets.json
|
||||
genKey(c, r, "root")
|
||||
c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"targets.json"})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"targets.json"})
|
||||
|
||||
// commit without snapshot.json
|
||||
genKey(c, r, "targets")
|
||||
local.AddBlob("/foo.txt", util.SampleMeta())
|
||||
c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"snapshot.json"})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"snapshot.json"})
|
||||
|
||||
// commit without timestamp.json
|
||||
genKey(c, r, "snapshot")
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"timestamp.json"})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"timestamp.json"})
|
||||
|
||||
// commit with timestamp.json but no timestamp key
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), DeepEquals, ErrInsufficientSignatures{"timestamp.json", signed.ErrNoSignatures})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrInsufficientSignatures{"timestamp.json", signed.ErrNoSignatures})
|
||||
|
||||
// commit success
|
||||
genKey(c, r, "timestamp")
|
||||
|
@ -436,7 +454,7 @@ func (RepoSuite) TestCommit(c *C) {
|
|||
c.Assert(r.RevokeKey("timestamp", role.KeyIDs[0]), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), DeepEquals, ErrNotEnoughKeys{"timestamp", 0, 1})
|
||||
c.Assert(r.Commit(), DeepEquals, tuferr.ErrNotEnoughKeys{"timestamp", 0, 1})
|
||||
}
|
||||
|
||||
type tmpDir struct {
|
||||
|
@ -512,147 +530,154 @@ func (t *tmpDir) readFile(path string) []byte {
|
|||
return data
|
||||
}
|
||||
|
||||
//func (RepoSuite) TestCommitFileSystem(c *C) {
|
||||
// tmp := newTmpDir(c)
|
||||
// local := FileSystemStore(tmp.path, nil)
|
||||
// r, err := NewRepo(local)
|
||||
// c.Assert(err, IsNil)
|
||||
//
|
||||
// // don't use consistent snapshots to make the checks simpler
|
||||
// c.Assert(r.Init(false), IsNil)
|
||||
//
|
||||
// // generating keys should stage root.json and create repo dirs
|
||||
// genKey(c, r, "root")
|
||||
// genKey(c, r, "targets")
|
||||
// genKey(c, r, "snapshot")
|
||||
// genKey(c, r, "timestamp")
|
||||
// tmp.assertExists("staged/root.json")
|
||||
// tmp.assertEmpty("repository")
|
||||
// tmp.assertEmpty("staged/targets")
|
||||
//
|
||||
// // adding a non-existent file fails
|
||||
// c.Assert(r.AddTarget("foo.txt", nil), Equals, ErrFileNotFound{tmp.stagedTargetPath("foo.txt")})
|
||||
// tmp.assertEmpty("repository")
|
||||
//
|
||||
// // adding a file stages targets.json
|
||||
// tmp.writeStagedTarget("foo.txt", "foo")
|
||||
// c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
// tmp.assertExists("staged/targets.json")
|
||||
// tmp.assertEmpty("repository")
|
||||
// t, err := r.targets()
|
||||
// c.Assert(err, IsNil)
|
||||
// c.Assert(t.Targets, HasLen, 1)
|
||||
// if _, ok := t.Targets["/foo.txt"]; !ok {
|
||||
// c.Fatal("missing target file: /foo.txt")
|
||||
// }
|
||||
//
|
||||
// // Snapshot() stages snapshot.json
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// tmp.assertExists("staged/snapshot.json")
|
||||
// tmp.assertEmpty("repository")
|
||||
//
|
||||
// // Timestamp() stages timestamp.json
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// tmp.assertExists("staged/timestamp.json")
|
||||
// tmp.assertEmpty("repository")
|
||||
//
|
||||
// // committing moves files from staged -> repository
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertExists("repository/root.json")
|
||||
// tmp.assertExists("repository/targets.json")
|
||||
// tmp.assertExists("repository/snapshot.json")
|
||||
// tmp.assertExists("repository/timestamp.json")
|
||||
// tmp.assertFileContent("repository/targets/foo.txt", "foo")
|
||||
// tmp.assertEmpty("staged/targets")
|
||||
// tmp.assertEmpty("staged")
|
||||
//
|
||||
// // adding and committing another file moves it into repository/targets
|
||||
// tmp.writeStagedTarget("path/to/bar.txt", "bar")
|
||||
// c.Assert(r.AddTarget("path/to/bar.txt", nil), IsNil)
|
||||
// tmp.assertExists("staged/targets.json")
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertFileContent("repository/targets/foo.txt", "foo")
|
||||
// tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")
|
||||
// tmp.assertEmpty("staged/targets")
|
||||
// tmp.assertEmpty("staged")
|
||||
//
|
||||
// // removing and committing a file removes it from repository/targets
|
||||
// c.Assert(r.RemoveTarget("foo.txt"), IsNil)
|
||||
// tmp.assertExists("staged/targets.json")
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertNotExist("repository/targets/foo.txt")
|
||||
// tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")
|
||||
// tmp.assertEmpty("staged/targets")
|
||||
// tmp.assertEmpty("staged")
|
||||
//}
|
||||
//
|
||||
//func (RepoSuite) TestConsistentSnapshot(c *C) {
|
||||
// tmp := newTmpDir(c)
|
||||
// local := FileSystemStore(tmp.path, nil)
|
||||
// r, err := NewRepo(local, "sha512", "sha256")
|
||||
// c.Assert(err, IsNil)
|
||||
//
|
||||
// genKey(c, r, "root")
|
||||
// genKey(c, r, "targets")
|
||||
// genKey(c, r, "snapshot")
|
||||
// genKey(c, r, "timestamp")
|
||||
// tmp.writeStagedTarget("foo.txt", "foo")
|
||||
// c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
// tmp.writeStagedTarget("dir/bar.txt", "bar")
|
||||
// c.Assert(r.AddTarget("dir/bar.txt", nil), IsNil)
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
//
|
||||
// hashes, err := r.fileHashes()
|
||||
// c.Assert(err, IsNil)
|
||||
//
|
||||
// // root.json, targets.json and snapshot.json should exist at both hashed and unhashed paths
|
||||
// for _, path := range []string{"root.json", "targets.json", "snapshot.json"} {
|
||||
// repoPath := filepath.Join("repository", path)
|
||||
// tmp.assertHashedFilesExist(repoPath, hashes[path])
|
||||
// tmp.assertExists(repoPath)
|
||||
// }
|
||||
//
|
||||
// // target files should exist at hashed but not unhashed paths
|
||||
// for _, path := range []string{"targets/foo.txt", "targets/dir/bar.txt"} {
|
||||
// repoPath := filepath.Join("repository", path)
|
||||
// tmp.assertHashedFilesExist(repoPath, hashes[path])
|
||||
// tmp.assertNotExist(repoPath)
|
||||
// }
|
||||
//
|
||||
// // timestamp.json should exist at an unhashed path (it doesn't have a hash)
|
||||
// tmp.assertExists("repository/timestamp.json")
|
||||
//
|
||||
// // removing a file should remove the hashed files
|
||||
// c.Assert(r.RemoveTarget("foo.txt"), IsNil)
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertHashedFilesNotExist("repository/targets/foo.txt", hashes["targets/foo.txt"])
|
||||
// tmp.assertNotExist("repository/targets/foo.txt")
|
||||
//
|
||||
// // targets should be returned by new repo
|
||||
// newRepo, err := NewRepo(local, "sha512", "sha256")
|
||||
// c.Assert(err, IsNil)
|
||||
// t, err := newRepo.targets()
|
||||
// c.Assert(err, IsNil)
|
||||
// c.Assert(t.Targets, HasLen, 1)
|
||||
// if _, ok := t.Targets["/dir/bar.txt"]; !ok {
|
||||
// c.Fatal("missing targets file: dir/bar.txt")
|
||||
// }
|
||||
//}
|
||||
func (RepoSuite) TestCommitFileSystem(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
tmp := newTmpDir(c)
|
||||
local := store.FileSystemStore(tmp.path, nil)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// don't use consistent snapshots to make the checks simpler
|
||||
c.Assert(r.Init(false), IsNil)
|
||||
|
||||
// generating keys should stage root.json and create repo dirs
|
||||
genKey(c, r, "root")
|
||||
genKey(c, r, "targets")
|
||||
genKey(c, r, "snapshot")
|
||||
genKey(c, r, "timestamp")
|
||||
tmp.assertExists("staged/root.json")
|
||||
tmp.assertEmpty("repository")
|
||||
tmp.assertEmpty("staged/targets")
|
||||
|
||||
// adding a non-existent file fails
|
||||
c.Assert(r.AddTarget("foo.txt", nil), Equals, tuferr.ErrFileNotFound{tmp.stagedTargetPath("foo.txt")})
|
||||
tmp.assertEmpty("repository")
|
||||
|
||||
// adding a file stages targets.json
|
||||
tmp.writeStagedTarget("foo.txt", "foo")
|
||||
c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
tmp.assertExists("staged/targets.json")
|
||||
tmp.assertEmpty("repository")
|
||||
t, err := r.targets()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(t.Targets, HasLen, 1)
|
||||
if _, ok := t.Targets["/foo.txt"]; !ok {
|
||||
c.Fatal("missing target file: /foo.txt")
|
||||
}
|
||||
|
||||
// Snapshot() stages snapshot.json
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
tmp.assertExists("staged/snapshot.json")
|
||||
tmp.assertEmpty("repository")
|
||||
|
||||
// Timestamp() stages timestamp.json
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
tmp.assertExists("staged/timestamp.json")
|
||||
tmp.assertEmpty("repository")
|
||||
|
||||
// committing moves files from staged -> repository
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertExists("repository/root.json")
|
||||
tmp.assertExists("repository/targets.json")
|
||||
tmp.assertExists("repository/snapshot.json")
|
||||
tmp.assertExists("repository/timestamp.json")
|
||||
tmp.assertFileContent("repository/targets/foo.txt", "foo")
|
||||
tmp.assertEmpty("staged/targets")
|
||||
tmp.assertEmpty("staged")
|
||||
|
||||
// adding and committing another file moves it into repository/targets
|
||||
tmp.writeStagedTarget("path/to/bar.txt", "bar")
|
||||
c.Assert(r.AddTarget("path/to/bar.txt", nil), IsNil)
|
||||
tmp.assertExists("staged/targets.json")
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertFileContent("repository/targets/foo.txt", "foo")
|
||||
tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")
|
||||
tmp.assertEmpty("staged/targets")
|
||||
tmp.assertEmpty("staged")
|
||||
|
||||
// removing and committing a file removes it from repository/targets
|
||||
c.Assert(r.RemoveTarget("foo.txt"), IsNil)
|
||||
tmp.assertExists("staged/targets.json")
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertNotExist("repository/targets/foo.txt")
|
||||
tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")
|
||||
tmp.assertEmpty("staged/targets")
|
||||
tmp.assertEmpty("staged")
|
||||
}
|
||||
|
||||
func (RepoSuite) TestConsistentSnapshot(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
tmp := newTmpDir(c)
|
||||
local := store.FileSystemStore(tmp.path, nil)
|
||||
r, err := NewRepo(signer, local, "sha512", "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
genKey(c, r, "root")
|
||||
genKey(c, r, "targets")
|
||||
genKey(c, r, "snapshot")
|
||||
genKey(c, r, "timestamp")
|
||||
tmp.writeStagedTarget("foo.txt", "foo")
|
||||
c.Assert(r.AddTarget("foo.txt", nil), IsNil)
|
||||
tmp.writeStagedTarget("dir/bar.txt", "bar")
|
||||
c.Assert(r.AddTarget("dir/bar.txt", nil), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
|
||||
hashes, err := r.fileHashes()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// root.json, targets.json and snapshot.json should exist at both hashed and unhashed paths
|
||||
for _, path := range []string{"root.json", "targets.json", "snapshot.json"} {
|
||||
repoPath := filepath.Join("repository", path)
|
||||
tmp.assertHashedFilesExist(repoPath, hashes[path])
|
||||
tmp.assertExists(repoPath)
|
||||
}
|
||||
|
||||
// target files should exist at hashed but not unhashed paths
|
||||
for _, path := range []string{"targets/foo.txt", "targets/dir/bar.txt"} {
|
||||
repoPath := filepath.Join("repository", path)
|
||||
tmp.assertHashedFilesExist(repoPath, hashes[path])
|
||||
tmp.assertNotExist(repoPath)
|
||||
}
|
||||
|
||||
// timestamp.json should exist at an unhashed path (it doesn't have a hash)
|
||||
tmp.assertExists("repository/timestamp.json")
|
||||
|
||||
// removing a file should remove the hashed files
|
||||
c.Assert(r.RemoveTarget("foo.txt"), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertHashedFilesNotExist("repository/targets/foo.txt", hashes["targets/foo.txt"])
|
||||
tmp.assertNotExist("repository/targets/foo.txt")
|
||||
|
||||
// targets should be returned by new repo
|
||||
newRepo, err := NewRepo(signer, local, "sha512", "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
t, err := newRepo.targets()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(t.Targets, HasLen, 1)
|
||||
if _, ok := t.Targets["/dir/bar.txt"]; !ok {
|
||||
c.Fatal("missing targets file: dir/bar.txt")
|
||||
}
|
||||
}
|
||||
|
||||
func (RepoSuite) TestExpiresAndVersion(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
//files := map[string][]byte{"/foo.txt": []byte("foo")}
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
local := store.DBStore(db, "")
|
||||
r, err := NewRepo(local)
|
||||
r, err := NewRepo(signer, local, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
past := time.Now().Add(-1 * time.Second)
|
||||
|
@ -664,7 +689,7 @@ func (RepoSuite) TestExpiresAndVersion(c *C) {
|
|||
r.SnapshotWithExpires(CompressionTypeNone, past),
|
||||
r.TimestampWithExpires(past),
|
||||
} {
|
||||
c.Assert(err, Equals, ErrInvalidExpires{past})
|
||||
c.Assert(err, Equals, tuferr.ErrInvalidExpires{past})
|
||||
}
|
||||
|
||||
genKey(c, r, "root")
|
||||
|
@ -736,6 +761,9 @@ func (RepoSuite) TestExpiresAndVersion(c *C) {
|
|||
}
|
||||
|
||||
func (RepoSuite) TestHashAlgorithm(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
|
||||
//files := map[string][]byte{"/foo.txt": []byte("foo")}
|
||||
db := util.GetSqliteDB()
|
||||
defer util.FlushDB(db)
|
||||
|
@ -750,7 +778,7 @@ func (RepoSuite) TestHashAlgorithm(c *C) {
|
|||
{args: []string{"sha512", "sha256"}},
|
||||
} {
|
||||
// generate metadata with specific hash functions
|
||||
r, err := NewRepo(local, test.args...)
|
||||
r, err := NewRepo(signer, local, test.args...)
|
||||
c.Assert(err, IsNil)
|
||||
genKey(c, r, "root")
|
||||
genKey(c, r, "targets")
|
||||
|
@ -851,67 +879,69 @@ func testPassphraseFunc(p []byte) util.PassphraseFunc {
|
|||
// c.Assert(insecureStore.SaveKey("targets", key.SerializePrivate()), IsNil)
|
||||
// assertKeys("targets", false, []*keys.Key{key})
|
||||
//}
|
||||
//
|
||||
//func (RepoSuite) TestManageMultipleTargets(c *C) {
|
||||
// tmp := newTmpDir(c)
|
||||
// local := FileSystemStore(tmp.path, nil)
|
||||
// r, err := NewRepo(local)
|
||||
// c.Assert(err, IsNil)
|
||||
// // don't use consistent snapshots to make the checks simpler
|
||||
// c.Assert(r.Init(false), IsNil)
|
||||
// genKey(c, r, "root")
|
||||
// genKey(c, r, "targets")
|
||||
// genKey(c, r, "snapshot")
|
||||
// genKey(c, r, "timestamp")
|
||||
//
|
||||
// assertRepoTargets := func(paths ...string) {
|
||||
// t, err := r.targets()
|
||||
// c.Assert(err, IsNil)
|
||||
// for _, path := range paths {
|
||||
// if _, ok := t.Targets[path]; !ok {
|
||||
// c.Fatalf("missing target file: %s", path)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // adding and committing multiple files moves correct targets from staged -> repository
|
||||
// tmp.writeStagedTarget("foo.txt", "foo")
|
||||
// tmp.writeStagedTarget("bar.txt", "bar")
|
||||
// c.Assert(r.AddTargets([]string{"foo.txt", "bar.txt"}, nil), IsNil)
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// assertRepoTargets("/foo.txt", "/bar.txt")
|
||||
// tmp.assertExists("repository/targets/foo.txt")
|
||||
// tmp.assertExists("repository/targets/bar.txt")
|
||||
//
|
||||
// // adding all targets moves them all from staged -> repository
|
||||
// count := 10
|
||||
// files := make([]string, count)
|
||||
// for i := 0; i < count; i++ {
|
||||
// files[i] = fmt.Sprintf("/file%d.txt", i)
|
||||
// tmp.writeStagedTarget(files[i], "data")
|
||||
// }
|
||||
// c.Assert(r.AddTargets(nil, nil), IsNil)
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertExists("repository/targets/foo.txt")
|
||||
// tmp.assertExists("repository/targets/bar.txt")
|
||||
// assertRepoTargets(files...)
|
||||
// for _, file := range files {
|
||||
// tmp.assertExists("repository/targets/" + file)
|
||||
// }
|
||||
// tmp.assertEmpty("staged/targets")
|
||||
// tmp.assertEmpty("staged")
|
||||
//
|
||||
// // removing all targets removes them from the repository and targets.json
|
||||
// c.Assert(r.RemoveTargets(nil), IsNil)
|
||||
// c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
// c.Assert(r.Timestamp(), IsNil)
|
||||
// c.Assert(r.Commit(), IsNil)
|
||||
// tmp.assertEmpty("repository/targets")
|
||||
// t, err := r.targets()
|
||||
// c.Assert(err, IsNil)
|
||||
// c.Assert(t.Targets, HasLen, 0)
|
||||
//}
|
||||
|
||||
func (RepoSuite) TestManageMultipleTargets(c *C) {
|
||||
trust := signed.NewEd25519()
|
||||
signer := signed.NewSigner(trust)
|
||||
tmp := newTmpDir(c)
|
||||
local := store.FileSystemStore(tmp.path, nil)
|
||||
r, err := NewRepo(signer, local)
|
||||
c.Assert(err, IsNil)
|
||||
// don't use consistent snapshots to make the checks simpler
|
||||
c.Assert(r.Init(false), IsNil)
|
||||
genKey(c, r, "root")
|
||||
genKey(c, r, "targets")
|
||||
genKey(c, r, "snapshot")
|
||||
genKey(c, r, "timestamp")
|
||||
|
||||
assertRepoTargets := func(paths ...string) {
|
||||
t, err := r.targets()
|
||||
c.Assert(err, IsNil)
|
||||
for _, path := range paths {
|
||||
if _, ok := t.Targets[path]; !ok {
|
||||
c.Fatalf("missing target file: %s", path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// adding and committing multiple files moves correct targets from staged -> repository
|
||||
tmp.writeStagedTarget("foo.txt", "foo")
|
||||
tmp.writeStagedTarget("bar.txt", "bar")
|
||||
c.Assert(r.AddTargets([]string{"foo.txt", "bar.txt"}, nil), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
assertRepoTargets("/foo.txt", "/bar.txt")
|
||||
tmp.assertExists("repository/targets/foo.txt")
|
||||
tmp.assertExists("repository/targets/bar.txt")
|
||||
|
||||
// adding all targets moves them all from staged -> repository
|
||||
count := 10
|
||||
files := make([]string, count)
|
||||
for i := 0; i < count; i++ {
|
||||
files[i] = fmt.Sprintf("/file%d.txt", i)
|
||||
tmp.writeStagedTarget(files[i], "data")
|
||||
}
|
||||
c.Assert(r.AddTargets(nil, nil), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertExists("repository/targets/foo.txt")
|
||||
tmp.assertExists("repository/targets/bar.txt")
|
||||
assertRepoTargets(files...)
|
||||
for _, file := range files {
|
||||
tmp.assertExists("repository/targets/" + file)
|
||||
}
|
||||
tmp.assertEmpty("staged/targets")
|
||||
tmp.assertEmpty("staged")
|
||||
|
||||
// removing all targets removes them from the repository and targets.json
|
||||
c.Assert(r.RemoveTargets(nil), IsNil)
|
||||
c.Assert(r.Snapshot(CompressionTypeNone), IsNil)
|
||||
c.Assert(r.Timestamp(), IsNil)
|
||||
c.Assert(r.Commit(), IsNil)
|
||||
tmp.assertEmpty("repository/targets")
|
||||
t, err := r.targets()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(t.Targets, HasLen, 0)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,16 @@ import (
|
|||
"github.com/endophage/go-tuf/keys"
|
||||
)
|
||||
|
||||
type Signer interface {
|
||||
GetPublicKeys(keyIDs ...string) (map[string]keys.Key, error)
|
||||
Sign(keyIDs []string, data json.RawMessage) ([]data.Signature, error)
|
||||
type SigningService interface {
|
||||
Sign(keyIDs []string, data []byte) ([]data.Signature, error)
|
||||
}
|
||||
|
||||
type KeyService interface {
|
||||
Create() (*keys.PublicKey, error)
|
||||
PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error)
|
||||
}
|
||||
|
||||
type TrustService interface {
|
||||
SigningService
|
||||
KeyService
|
||||
}
|
||||
|
|
|
@ -1,40 +1,78 @@
|
|||
package signed
|
||||
|
||||
import (
|
||||
"github.com/agl/ed25519"
|
||||
cjson "github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/tent/canonical-json-go"
|
||||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
)
|
||||
|
||||
// Sign takes a data.Signed and a key, calculated and adds the signature
|
||||
// to the data.Signed
|
||||
func Sign(s *data.Signed, k *data.Key) {
|
||||
id := k.ID()
|
||||
//func Sign(s *data.Signed, k *data.Key) {
|
||||
// id := k.ID()
|
||||
// signatures := make([]data.Signature, 0, len(s.Signatures)+1)
|
||||
// for _, sig := range s.Signatures {
|
||||
// if sig.KeyID == id {
|
||||
// continue
|
||||
// }
|
||||
// signatures = append(signatures, sig)
|
||||
// }
|
||||
// priv := [ed25519.PrivateKeySize]byte{}
|
||||
// copy(priv[:], k.Value.Private)
|
||||
// sig := ed25519.Sign(&priv, s.Signed)
|
||||
// s.Signatures = append(signatures, data.Signature{
|
||||
// KeyID: id,
|
||||
// Method: "ed25519",
|
||||
// Signature: sig[:],
|
||||
// })
|
||||
//}
|
||||
|
||||
// Signer encapsulates a signing service with some convenience methods to
|
||||
// interface between TUF keys and the generic service interface
|
||||
type Signer struct {
|
||||
service TrustService
|
||||
}
|
||||
|
||||
func NewSigner(service TrustService) *Signer {
|
||||
return &Signer{service}
|
||||
}
|
||||
|
||||
// Sign takes a data.Signed and a key, calculated and adds the signature
|
||||
// to the data.Signed
|
||||
func (signer *Signer) Sign(s *data.Signed, keys ...*keys.PublicKey) error {
|
||||
signatures := make([]data.Signature, 0, len(s.Signatures)+1)
|
||||
keyIDMemb := make(map[string]struct{})
|
||||
keyIDs := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
keyIDMemb[key.ID] = struct{}{}
|
||||
keyIDs = append(keyIDs, key.ID)
|
||||
}
|
||||
for _, sig := range s.Signatures {
|
||||
if sig.KeyID == id {
|
||||
if _, ok := keyIDMemb[sig.KeyID]; ok {
|
||||
continue
|
||||
}
|
||||
signatures = append(signatures, sig)
|
||||
}
|
||||
priv := [ed25519.PrivateKeySize]byte{}
|
||||
copy(priv[:], k.Value.Private)
|
||||
sig := ed25519.Sign(&priv, s.Signed)
|
||||
s.Signatures = append(signatures, data.Signature{
|
||||
KeyID: id,
|
||||
Method: "ed25519",
|
||||
Signature: sig[:],
|
||||
})
|
||||
newSigs, err := signer.service.Sign(keyIDs, s.Signed)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Signatures = append(signatures, newSigs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Marshal(v interface{}, keys ...*data.Key) (*data.Signed, error) {
|
||||
func (signer *Signer) Marshal(v interface{}, keys ...*keys.PublicKey) (*data.Signed, error) {
|
||||
b, err := cjson.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := &data.Signed{Signed: b}
|
||||
for _, k := range keys {
|
||||
Sign(s, k)
|
||||
err = signer.Sign(s, keys...)
|
||||
return s, err // err may be nil but there's no point in checking, just return it
|
||||
}
|
||||
return s, nil
|
||||
|
||||
func (signer *Signer) NewKey() (*keys.PublicKey, error) {
|
||||
return signer.service.Create()
|
||||
}
|
||||
|
|
113
Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign_test.go
generated
vendored
Normal file
113
Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign_test.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
package signed
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
)
|
||||
|
||||
type MockTrustService struct {
|
||||
testSig data.Signature
|
||||
testKey keys.PublicKey
|
||||
}
|
||||
|
||||
func (mts *MockTrustService) Sign(keyIDs []string, data []byte) ([]data.Signature, error) {
|
||||
sigs := []data.Signature{mts.testSig}
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
func (mts *MockTrustService) Create(keyType string) (keys.PublicKey, error) {
|
||||
return keys.PublicKey{mts.testKey}, nil
|
||||
}
|
||||
|
||||
func (mts *MockTrustService) PublicKeys(keyIDs ...string) (map[string]keys.PublicKey, error) {
|
||||
keys := map[string]keys.PublicKey{"testID": mts.testKey}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
var _ TrustService = &MockTrustService{}
|
||||
|
||||
// Test signing and ensure the expected signature is added
|
||||
func TestBasicSign(t *testing.T) {
|
||||
signer := Signer{&MockTrustService{
|
||||
testSig: data.Signature{KeyID: "testID"},
|
||||
testKey: keys.PublicKey{},
|
||||
}}
|
||||
key := keys.PublicKey{}
|
||||
testData := data.Signed{}
|
||||
|
||||
signer.Sign(&testData, &key)
|
||||
|
||||
if len(testData.Signatures) != 1 {
|
||||
t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures))
|
||||
}
|
||||
|
||||
if testData.Signatures[0].KeyID != "testID" {
|
||||
t.Fatalf("Wrong signature ID returned: %s", testData.Signatures[0].KeyID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test signing with the same key multiple times only registers a single signature
|
||||
// for the key (N.B. MockTrustService.Sign will still be called again, but Signer.Sign
|
||||
// should be cleaning previous signatures by the KeyID when asked to sign again)
|
||||
func TestReSign(t *testing.T) {
|
||||
signer := Signer{&MockTrustService{
|
||||
testSig: data.Signature{KeyID: "testID"},
|
||||
testKey: keys.PublicKey{},
|
||||
}}
|
||||
key := keys.PublicKey{ID: "testID"}
|
||||
testData := data.Signed{}
|
||||
|
||||
signer.Sign(&testData, &key)
|
||||
signer.Sign(&testData, &key)
|
||||
|
||||
if len(testData.Signatures) != 1 {
|
||||
t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures))
|
||||
}
|
||||
|
||||
if testData.Signatures[0].KeyID != "testID" {
|
||||
t.Fatalf("Wrong signature ID returned: %s", testData.Signatures[0].KeyID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMultiSign(t *testing.T) {
|
||||
signer := Signer{&MockTrustService{
|
||||
testSig: data.Signature{KeyID: "testID"},
|
||||
testKey: keys.PublicKey{},
|
||||
}}
|
||||
key := keys.PublicKey{ID: "testID1"}
|
||||
testData := data.Signed{}
|
||||
|
||||
signer.Sign(&testData, &key)
|
||||
|
||||
key = keys.PublicKey{ID: "testID2"}
|
||||
signer.Sign(&testData, &key)
|
||||
|
||||
if len(testData.Signatures) != 2 {
|
||||
t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures))
|
||||
}
|
||||
|
||||
keyIDs := map[string]struct{}{"testID1": struct{}{}, "testID2": struct{}{}}
|
||||
for _, sig := range testData.Signatures {
|
||||
if _, ok := keyIDs[sig.KeyID]; !ok {
|
||||
t.Fatalf("Got a signature we didn't expect: %s", sig.KeyID)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNewKey(t *testing.T) {
|
||||
signer := Signer{&MockTrustService{
|
||||
testSig: data.Signature{},
|
||||
testKey: keys.PublicKey{ID: "testID"},
|
||||
}}
|
||||
|
||||
key := signer.NewKey("testType")
|
||||
|
||||
if key.ID != "testID" {
|
||||
t.Fatalf("Expected key ID not found: %s", key.ID)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package signed
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
)
|
||||
|
||||
// Ed25519 implements a simple in memory keystore and trust service
|
||||
type Ed25519 struct {
|
||||
keys map[string]*keys.PrivateKey
|
||||
}
|
||||
|
||||
var _ TrustService = &Ed25519{}
|
||||
|
||||
func NewEd25519() *Ed25519 {
|
||||
return &Ed25519{
|
||||
make(map[string]*keys.PrivateKey),
|
||||
}
|
||||
}
|
||||
|
||||
// addKey allows you to add a private key to the trust service
|
||||
func (trust *Ed25519) addKey(k *keys.PrivateKey) {
|
||||
key := keys.PrivateKey{
|
||||
PublicKey: keys.PublicKey{
|
||||
Key: data.Key{
|
||||
Type: k.Type,
|
||||
Value: data.KeyValue{
|
||||
Public: make([]byte, len(k.Value.Public)),
|
||||
},
|
||||
},
|
||||
ID: k.ID,
|
||||
},
|
||||
Private: make([]byte, len(k.Private)),
|
||||
}
|
||||
|
||||
copy(key.Value.Public, k.Value.Public)
|
||||
copy(key.Private, k.Private)
|
||||
trust.keys[k.ID] = &key
|
||||
}
|
||||
|
||||
func (trust *Ed25519) RemoveKey(keyID string) {
|
||||
delete(trust.keys, keyID)
|
||||
}
|
||||
|
||||
func (trust *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, error) {
|
||||
signatures := make([]data.Signature, 0, len(keyIDs))
|
||||
for _, kID := range keyIDs {
|
||||
priv := [ed25519.PrivateKeySize]byte{}
|
||||
pub := [ed25519.PublicKeySize]byte{}
|
||||
copy(priv[:], trust.keys[kID].Private)
|
||||
copy(pub[:], trust.keys[kID].Value.Public)
|
||||
sig := ed25519.Sign(&priv, toSign)
|
||||
signatures = append(signatures, data.Signature{
|
||||
KeyID: kID,
|
||||
Method: "ed25519",
|
||||
Signature: sig[:],
|
||||
})
|
||||
}
|
||||
return signatures, nil
|
||||
|
||||
}
|
||||
|
||||
func (trust *Ed25519) Create() (*keys.PublicKey, error) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubBytes := make([]byte, ed25519.PublicKeySize)
|
||||
copy(pubBytes, pub[:])
|
||||
privBytes := make([]byte, ed25519.PrivateKeySize)
|
||||
copy(privBytes, priv[:])
|
||||
public := keys.NewPublicKey("ed25519", pubBytes)
|
||||
private := keys.PrivateKey{*public, privBytes}
|
||||
trust.addKey(&private)
|
||||
return public, nil
|
||||
}
|
||||
|
||||
func (trust *Ed25519) PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) {
|
||||
k := make(map[string]*keys.PublicKey)
|
||||
for _, kID := range keyIDs {
|
||||
if key, ok := trust.keys[kID]; ok {
|
||||
k[kID] = &key.PublicKey
|
||||
}
|
||||
}
|
||||
return k, nil
|
||||
}
|
|
@ -6,10 +6,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/tent/canonical-json-go"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
"github.com/tent/canonical-json-go"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -74,8 +74,8 @@ func VerifySignatures(s *data.Signed, role string, db *keys.DB) error {
|
|||
}
|
||||
|
||||
valid := make(map[string]struct{})
|
||||
var sigBytes [ed25519.SignatureSize]byte
|
||||
for _, sig := range s.Signatures {
|
||||
var sigBytes [ed25519.SignatureSize]byte
|
||||
if sig.Method != "ed25519" {
|
||||
return ErrWrongMethod
|
||||
}
|
||||
|
@ -92,7 +92,9 @@ func VerifySignatures(s *data.Signed, role string, db *keys.DB) error {
|
|||
}
|
||||
|
||||
copy(sigBytes[:], sig.Signature)
|
||||
if !ed25519.Verify(&key.Public, msg, &sigBytes) {
|
||||
var keyBytes [32]byte
|
||||
copy(keyBytes[:], key.Value.Public)
|
||||
if !ed25519.Verify(&keyBytes, msg, &sigBytes) {
|
||||
return ErrInvalid
|
||||
}
|
||||
valid[sig.KeyID] = struct{}{}
|
||||
|
|
|
@ -4,11 +4,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/Godeps/_workspace/src/github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
. "github.com/endophage/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -11,75 +10,16 @@ import (
|
|||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/encrypted"
|
||||
"github.com/endophage/go-tuf/errors"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
)
|
||||
|
||||
func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) LocalStore {
|
||||
if meta == nil {
|
||||
meta = make(map[string]json.RawMessage)
|
||||
}
|
||||
return &memoryStore{
|
||||
meta: meta,
|
||||
files: files,
|
||||
keys: make(map[string][]*data.Key),
|
||||
}
|
||||
}
|
||||
|
||||
type memoryStore struct {
|
||||
meta map[string]json.RawMessage
|
||||
files map[string][]byte
|
||||
keys map[string][]*data.Key
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
return m.meta, nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
m.meta[name] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error {
|
||||
if len(paths) == 0 {
|
||||
for path, data := range m.files {
|
||||
if err := targetsFn(path, bytes.NewReader(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
data, ok := m.files[path]
|
||||
if !ok {
|
||||
return ErrFileNotFound{path}
|
||||
}
|
||||
if err := targetsFn(path, bytes.NewReader(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Commit(map[string]json.RawMessage, bool, map[string]data.Hashes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetKeys(role string) ([]*data.Key, error) {
|
||||
return m.keys[role], nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) SaveKey(role string, key *data.Key) error {
|
||||
if _, ok := m.keys[role]; !ok {
|
||||
m.keys[role] = make([]*data.Key, 0)
|
||||
}
|
||||
m.keys[role] = append(m.keys[role], key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Clean() error {
|
||||
return nil
|
||||
// topLevelManifests determines the order signatures are verified when committing.
|
||||
var topLevelManifests = []string{
|
||||
"root.json",
|
||||
"targets.json",
|
||||
"snapshot.json",
|
||||
"timestamp.json",
|
||||
}
|
||||
|
||||
type persistedKeys struct {
|
||||
|
@ -171,7 +111,11 @@ func (f *fileSystemStore) WalkStagedTargets(paths []string, targetsFn targetsWal
|
|||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
return targetsFn(rel, file)
|
||||
meta, err := util.GenerateFileMeta(file, "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return targetsFn(rel, meta)
|
||||
}
|
||||
return filepath.Walk(filepath.Join(f.stagedDir(), "targets"), walkFunc)
|
||||
}
|
||||
|
@ -181,7 +125,7 @@ func (f *fileSystemStore) WalkStagedTargets(paths []string, targetsFn targetsWal
|
|||
realPath := filepath.Join(f.stagedDir(), "targets", path)
|
||||
if _, err := os.Stat(realPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return ErrFileNotFound{realPath}
|
||||
return errors.ErrFileNotFound{realPath}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -192,11 +136,15 @@ func (f *fileSystemStore) WalkStagedTargets(paths []string, targetsFn targetsWal
|
|||
file, err := os.Open(realPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return ErrFileNotFound{realPath}
|
||||
return errors.ErrFileNotFound{realPath}
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = targetsFn(path, file)
|
||||
meta, err := util.GenerateFileMeta(file, "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = targetsFn(path, meta)
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -382,7 +330,7 @@ func (f *fileSystemStore) loadKeys(role string) ([]*data.Key, []byte, error) {
|
|||
|
||||
// the keys are encrypted so cannot be loaded if passphraseFunc is not set
|
||||
if f.passphraseFunc == nil {
|
||||
return nil, nil, ErrPassphraseRequired{role}
|
||||
return nil, nil, errors.ErrPassphraseRequired{role}
|
||||
}
|
||||
|
||||
pass, err := f.passphraseFunc(role, false)
|
86
Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/errors"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
)
|
||||
|
||||
func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) LocalStore {
|
||||
if meta == nil {
|
||||
meta = make(map[string]json.RawMessage)
|
||||
}
|
||||
return &memoryStore{
|
||||
meta: meta,
|
||||
files: files,
|
||||
keys: make(map[string][]*data.Key),
|
||||
}
|
||||
}
|
||||
|
||||
type memoryStore struct {
|
||||
meta map[string]json.RawMessage
|
||||
files map[string][]byte
|
||||
keys map[string][]*data.Key
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
return m.meta, nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
m.meta[name] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error {
|
||||
if len(paths) == 0 {
|
||||
for path, data := range m.files {
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(data), "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = targetsFn(path, meta); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
data, ok := m.files[path]
|
||||
if !ok {
|
||||
return errors.ErrFileNotFound{path}
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(data), "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = targetsFn(path, meta); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Commit(map[string]json.RawMessage, bool, map[string]data.Hashes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetKeys(role string) ([]*data.Key, error) {
|
||||
return m.keys[role], nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) SaveKey(role string, key *data.Key) error {
|
||||
if _, ok := m.keys[role]; !ok {
|
||||
m.keys[role] = make([]*data.Key, 0)
|
||||
}
|
||||
m.keys[role] = append(m.keys[role], key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Clean() error {
|
||||
return nil
|
||||
}
|
|
@ -5,8 +5,8 @@ import (
|
|||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
. "github.com/endophage/go-tuf/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
|
|
Loading…
Reference in New Issue