Merge pull request #57 from endophage/client_atomic_update

client side of multi TUF file atomic update
This commit is contained in:
David Lawrence 2015-07-17 17:56:00 -07:00
commit b71547ed3f
26 changed files with 658 additions and 389 deletions

2
Godeps/Godeps.json generated
View File

@ -58,7 +58,7 @@
},
{
"ImportPath": "github.com/endophage/gotuf",
"Rev": "44944cf8926ed3bf246e3e6612e771d99352f648"
"Rev": "a8a23ab6e67bd0e9fbaf563aabd9e6ee7ea344d2"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",

View File

@ -1,6 +1,9 @@
package client
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
@ -17,41 +20,53 @@ import (
"github.com/endophage/gotuf/utils"
)
const maxSize = 5 << 20
type Client struct {
local *tuf.TufRepo
remote store.RemoteStore
keysDB *keys.KeyDB
cache store.MetadataStore
}
func NewClient(local *tuf.TufRepo, remote store.RemoteStore, keysDB *keys.KeyDB) *Client {
func NewClient(local *tuf.TufRepo, remote store.RemoteStore, keysDB *keys.KeyDB, cache store.MetadataStore) *Client {
return &Client{
local: local,
remote: remote,
keysDB: keysDB,
cache: cache,
}
}
// Update an in memory copy of the TUF Repo. If an error is returned, the
// Client instance should be considered corrupted and discarded as it may
// be left in a partially updated state
func (c *Client) Update() error {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
// a. If out of date, get updated snapshot
// i. If snapshot error, download new root and return to 1.
// 3. Check if root correct against snapshot
// a. If incorrect, download new root and return to 1.
// 4. Iteratively download and search targets and delegations to find target meta
logrus.Debug("updating TUF client")
err := c.update()
if err != nil {
switch err.(type) {
case tuf.ErrSigVerifyFail:
case tuf.ErrMetaExpired:
case tuf.ErrLocalRootExpired:
case *tuf.ErrSigVerifyFail, *tuf.ErrMetaExpired, *tuf.ErrLocalRootExpired:
logrus.Debug("retryable error occurred. Root will be downloaded and another update attempted")
if err := c.downloadRoot(); err != nil {
logrus.Errorf("Client Update (Root):", err)
logrus.Errorf("client Update (Root):", err)
return err
}
default:
logrus.Error("an unexpected error occurred while updating TUF client")
return err
}
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
return c.update()
}
return nil
}
func (c *Client) update() error {
@ -67,8 +82,12 @@ func (c *Client) update() error {
}
err = c.checkRoot()
if err != nil {
return err
// In this instance the root has not expired base on time, but is
// expired based on the snapshot dictating a new root has been produced.
logrus.Info(err.Error())
return &tuf.ErrLocalRootExpired{}
}
// will always need top level targets at a minimum
err = c.downloadTargets("targets")
if err != nil {
logrus.Errorf("Client Update (Targets): %s", err.Error())
@ -82,6 +101,19 @@ func (c *Client) update() error {
// hash and size in snapshot are unchanged but the root file has expired,
// there is little expectation that the situation can be remedied.
func (c Client) checkRoot() error {
role := data.RoleName("root")
size := c.local.Snapshot.Signed.Meta[role].Length
hashSha256 := c.local.Snapshot.Signed.Meta[role].Hashes["sha256"]
raw, err := c.cache.GetMeta("root", size)
if err != nil {
return err
}
hash := sha256.Sum256(raw)
if !bytes.Equal(hash[:], hashSha256) {
return fmt.Errorf("Cached root sha256 did not match snapshot root sha256")
}
return nil
}
@ -89,62 +121,206 @@ func (c Client) checkRoot() error {
func (c *Client) downloadRoot() error {
role := data.RoleName("root")
size := c.local.Snapshot.Signed.Meta[role].Length
expectedSha256 := c.local.Snapshot.Signed.Meta[role].Hashes["sha256"]
raw, err := c.remote.GetMeta(role, size)
// if we're bootstrapping we may not have a cached root, an
// error will result in the "previous root version" being
// interpreted as 0.
var download bool
old := &data.Signed{}
cachedRoot, err := c.cache.GetMeta(role, maxSize)
version := 0
if cachedRoot == nil || err != nil {
logrus.Debug("didn't find a cached root, must download")
download = true
} else {
hash := sha256.Sum256(cachedRoot)
if !bytes.Equal(hash[:], expectedSha256) {
logrus.Debug("cached root's hash didn't match expected, must download")
download = true
}
err := json.Unmarshal(cachedRoot, old)
if err == nil {
root, err := data.RootFromSigned(old)
if err == nil {
version = root.Signed.Version
} else {
logrus.Debug("couldn't parse Signed part of cached root, must download")
download = true
}
} else {
logrus.Debug("couldn't parse cached root, must download")
download = true
}
}
var s *data.Signed
var raw []byte
if download {
logrus.Debug("downloading new root")
raw, err = c.remote.GetMeta(role, size)
if err != nil {
return err
}
s := &data.Signed{}
hash := sha256.Sum256(raw)
if !bytes.Equal(hash[:], expectedSha256) {
return fmt.Errorf("Remote root sha256 did not match snapshot root sha256: %#x vs. %#x", hash, []byte(expectedSha256))
}
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
err = signed.Verify(s, role, 0, c.keysDB)
} else {
logrus.Debug("using cached root")
s = old
}
// this will confirm that the root has been signed by the old root role
// as c.keysDB contains the root keys we bootstrapped with.
// Still need to determine if there has been a root key update and
// confirm signature with new root key
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
logrus.Debug("root did not verify with existing keys")
return err
}
// This will cause keyDB to get updated, overwriting any keyIDs associated
// with the roles in root.json
c.local.SetRoot(s)
// verify again now that the old keys have been replaced with the new keys.
// TODO(endophage): be more intelligent and only re-verify if we detect
// there has been a change in root keys
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
logrus.Debug("root did not verify with new keys")
return err
}
if download {
logrus.Debug("caching downloaded root")
// Now that we have accepted new root, write it to cache
if err = c.cache.SetMeta(role, raw); err != nil {
logrus.Errorf("Failed to write root to local cache: %s", err.Error())
}
}
return nil
}
// downloadTimestamp is responsible for downloading the timestamp.json
func (c *Client) downloadTimestamp() error {
logrus.Debug("downloadTimestamp")
role := data.RoleName("timestamp")
raw, err := c.remote.GetMeta(role, 5<<20)
if err != nil {
return err
// We may not have a cached timestamp if this is the first time
// we're interacting with the repo. This will result in the
// version being 0
var download bool
old := &data.Signed{}
version := 0
cachedTS, err := c.cache.GetMeta(role, maxSize)
if err == nil {
err := json.Unmarshal(cachedTS, old)
if err == nil {
ts, err := data.TimestampFromSigned(old)
if err == nil {
version = ts.Signed.Version
}
s := &data.Signed{}
}
}
// unlike root, targets and snapshot, always try and download timestamps
// from remote, only using the cache one if we couldn't reach remote.
raw, err := c.remote.GetMeta(role, maxSize)
var s *data.Signed
if err != nil || len(raw) == 0 {
s = old
} else {
download = true
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
err = signed.Verify(s, role, 0, c.keysDB)
}
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
return err
}
if download {
c.cache.SetMeta(role, raw)
}
c.local.SetTimestamp(s)
return nil
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *Client) downloadSnapshot() error {
logrus.Debug("downloadSnapshot")
role := data.RoleName("snapshot")
size := c.local.Timestamp.Signed.Meta[role].Length
raw, err := c.remote.GetMeta(role, size)
expectedSha256, ok := c.local.Timestamp.Signed.Meta[role].Hashes["sha256"]
if !ok {
return fmt.Errorf("Sha256 is currently the only hash supported by this client. No Sha256 found for snapshot")
}
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(role, size)
if raw == nil || err != nil {
logrus.Debug("no snapshot in cache, must download")
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
snap, err := data.TimestampFromSigned(old)
if err == nil {
version = snap.Signed.Version
} else {
logrus.Debug("Could not parse Signed part of snapshot, must download")
download = true
}
} else {
logrus.Debug("Could not parse snapshot, must download")
download = true
}
}
var s *data.Signed
if download {
logrus.Debug("downloading new snapshot")
raw, err = c.remote.GetMeta(role, size)
if err != nil {
return err
}
s := &data.Signed{}
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
return fmt.Errorf("Retrieved snapshot did not verify against hash in timestamp.")
}
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
err = signed.Verify(s, role, 0, c.keysDB)
} else {
logrus.Debug("using cached snapshot")
s = old
}
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
return err
}
c.local.SetSnapshot(s)
if download {
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
}
}
return nil
}
@ -169,48 +345,88 @@ func (c *Client) downloadTargets(role string) error {
if err != nil {
return err
}
t := c.local.Targets[role].Signed
for _, r := range t.Delegations.Roles {
err := c.downloadTargets(r.Name)
if err != nil {
logrus.Error("Failed to download ", role, err)
return err
}
}
return nil
}
func (c Client) GetTargetsFile(roleName string, keyIDs []string, snapshotMeta data.Files, consistent bool, threshold int) (*data.Signed, error) {
rolePath, err := c.RoleTargetsPath(roleName, snapshotMeta, consistent)
// require role exists in snapshots
roleMeta, ok := snapshotMeta[roleName]
if !ok {
return nil, fmt.Errorf("Snapshot does not contain target role")
}
expectedSha256, ok := snapshotMeta[roleName].Hashes["sha256"]
if !ok {
return nil, fmt.Errorf("Sha256 is currently the only hash supported by this client. No Sha256 found for targets role %s", roleName)
}
// try to get meta file from content addressed cache
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(roleName, roleMeta.Length)
if err != nil || raw == nil {
logrus.Debugf("Couldn't not find cached %s, must download", roleName)
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
targ, err := data.TargetsFromSigned(old)
if err == nil {
version = targ.Signed.Version
} else {
download = true
}
} else {
download = true
}
}
var s *data.Signed
if download {
rolePath, err := c.RoleTargetsPath(roleName, hex.EncodeToString(expectedSha256), consistent)
if err != nil {
return nil, err
}
r, err := c.remote.GetMeta(rolePath, snapshotMeta[roleName].Length)
raw, err = c.remote.GetMeta(rolePath, snapshotMeta[roleName].Length)
if err != nil {
return nil, err
}
s := &data.Signed{}
err = json.Unmarshal(r, s)
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
logrus.Error("Error unmarshalling targets file:", err)
return nil, err
}
err = signed.Verify(s, roleName, 0, c.keysDB)
} else {
logrus.Debug("using cached ", roleName)
s = old
}
err = signed.Verify(s, roleName, version, c.keysDB)
if err != nil {
return nil, err
}
if download {
// if we error when setting meta, we should continue.
err = c.cache.SetMeta(roleName, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
}
}
return s, nil
}
func (c Client) RoleTargetsPath(roleName string, snapshotMeta data.Files, consistent bool) (string, error) {
// RoleTargetsPath generates the appropriate filename for the targets file,
// based on whether the repo is marked as consistent.
func (c Client) RoleTargetsPath(roleName string, hashSha256 string, consistent bool) (string, error) {
if consistent {
roleMeta, ok := snapshotMeta[roleName]
if !ok {
return "", fmt.Errorf("Consistent Snapshots Enabled but no meta found for target role")
}
if _, ok := roleMeta.Hashes["sha256"]; !ok {
return "", fmt.Errorf("Consistent Snapshots Enabled and sha256 not found for targets file in snapshot meta")
}
dir := filepath.Dir(roleName)
if strings.Contains(roleName, "/") {
lastSlashIdx := strings.LastIndex(roleName, "/")
@ -218,14 +434,48 @@ func (c Client) RoleTargetsPath(roleName string, snapshotMeta data.Files, consis
}
roleName = path.Join(
dir,
fmt.Sprintf("%s.%s.json", roleMeta.Hashes["sha256"], roleName),
fmt.Sprintf("%s.%s.json", hashSha256, roleName),
)
}
return roleName, nil
}
func (c Client) TargetMeta(path string) *data.FileMeta {
return c.local.FindTarget(path)
// TargetMeta ensures the repo is up to date, downloading the minimum
// necessary metadata files
func (c Client) TargetMeta(path string) (*data.FileMeta, error) {
c.Update()
var meta *data.FileMeta
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
// FIFO list of targets delegations to inspect for target
roles := []string{data.ValidRoles["targets"]}
var role string
for len(roles) > 0 {
// have to do these lines here because of order of execution in for statement
role = roles[0]
roles = roles[1:]
// Download the target role file if necessary
err := c.downloadTargets(role)
if err != nil {
// as long as we find a valid target somewhere we're happy.
// continue and search other delegated roles if any
continue
}
meta = c.local.TargetMeta(role, path)
if meta != nil {
// we found the target!
return meta, nil
}
delegations := c.local.TargetDelegations(role, path, pathHex)
for _, d := range delegations {
roles = append(roles, d.Name)
}
}
return meta, nil
}
func (c Client) DownloadTarget(dst io.Writer, path string, meta *data.FileMeta) error {

View File

@ -1,33 +0,0 @@
package data
import (
"encoding/hex"
"errors"
)
type HexBytes []byte
func (b *HexBytes) UnmarshalJSON(data []byte) error {
if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("tuf: invalid JSON hex bytes")
}
res := make([]byte, hex.DecodedLen(len(data)-2))
_, err := hex.Decode(res, data[1:len(data)-1])
if err != nil {
return err
}
*b = res
return nil
}
func (b HexBytes) MarshalJSON() ([]byte, error) {
res := make([]byte, hex.EncodedLen(len(b))+2)
res[0] = '"'
res[len(res)-1] = '"'
hex.Encode(res[1:], b)
return res, nil
}
func (b HexBytes) String() string {
return hex.EncodeToString(b)
}

View File

@ -1,44 +0,0 @@
package data
import (
"encoding/json"
"testing"
. "gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type HexBytesSuite struct{}
var _ = Suite(&HexBytesSuite{})
func (HexBytesSuite) TestUnmarshalJSON(c *C) {
var data HexBytes
err := json.Unmarshal([]byte(`"666f6f"`), &data)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "foo")
}
func (HexBytesSuite) TestUnmarshalJSONError(c *C) {
var data HexBytes
// uneven length
err := json.Unmarshal([]byte(`"a"`), &data)
c.Assert(err, Not(IsNil))
// invalid hex
err = json.Unmarshal([]byte(`"zz"`), &data)
c.Assert(err, Not(IsNil))
// wrong type
err = json.Unmarshal([]byte("6"), &data)
c.Assert(err, Not(IsNil))
}
func (HexBytesSuite) TestMarshalJSON(c *C) {
data, err := json.Marshal(HexBytes("foo"))
c.Assert(err, IsNil)
c.Assert(data, DeepEquals, []byte(`"666f6f"`))
}

View File

@ -56,7 +56,7 @@ func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
}, nil
}
func (sp *SignedSnapshot) hashForRole(role string) HexBytes {
func (sp *SignedSnapshot) hashForRole(role string) []byte {
return sp.Signed.Meta[role].Hashes["sha256"]
}

View File

@ -82,17 +82,17 @@ type Signed struct {
type Signature struct {
KeyID string `json:"keyid"`
Method SigAlgorithm `json:"method"`
Signature HexBytes `json:"sig"`
Signature []byte `json:"sig"`
}
type Files map[string]FileMeta
type Hashes map[string]HexBytes
type Hashes map[string][]byte
type FileMeta struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom *json.RawMessage `json:"custom,omitempty"`
Custom json.RawMessage `json:"custom,omitempty"`
}
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {

View File

@ -28,21 +28,21 @@ type signedMeta struct {
}
// VerifyRoot checks if a given root file is valid against a known set of keys.
func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey, threshold int) (data.PublicKey, error) {
// Threshold is always assumed to be 1
func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey) error {
if len(s.Signatures) == 0 {
return nil, ErrNoSignatures
return ErrNoSignatures
}
var decoded map[string]interface{}
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
return nil, err
return err
}
msg, err := cjson.Marshal(decoded)
if err != nil {
return nil, err
return err
}
valid := make(map[string]struct{})
for _, sig := range s.Signatures {
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
@ -56,12 +56,10 @@ func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey,
logrus.Debugf("continuing b/c signature was invalid\n")
continue
}
valid[sig.KeyID] = struct{}{}
// threshold of 1 so return on first success
return verifyMeta(s, "root", minVersion)
}
if len(valid) < threshold {
return nil, ErrRoleThreshold
}
return nil, verifyMeta(s, "root", minVersion)
return ErrRoleThreshold
}
func Verify(s *data.Signed, role string, minVersion int, db *keys.KeyDB) error {

View File

@ -36,7 +36,7 @@ func DBStore(db *sql.DB, imageName string) *dbStore {
}
// GetMeta loads existing TUF metadata files
func (dbs *dbStore) GetMeta(name string) (json.RawMessage, error) {
func (dbs *dbStore) GetMeta(name string) ([]byte, error) {
data, err := dbs.readFile(name)
if err != nil {
return nil, err
@ -45,7 +45,7 @@ func (dbs *dbStore) GetMeta(name string) (json.RawMessage, error) {
}
// SetMeta writes individual TUF metadata files
func (dbs *dbStore) SetMeta(name string, meta json.RawMessage) error {
func (dbs *dbStore) SetMeta(name string, meta []byte) error {
return dbs.writeFile(name, meta)
}
@ -75,7 +75,7 @@ func (dbs *dbStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc)
}
// Commit writes a set of consistent (possibly) TUF metadata files
func (dbs *dbStore) Commit(metafiles map[string]json.RawMessage, consistent bool, hashes map[string]data.Hashes) error {
func (dbs *dbStore) Commit(metafiles map[string][]byte, consistent bool, hashes map[string]data.Hashes) error {
// TODO (endophage): write meta files to cache
return nil
@ -200,7 +200,7 @@ func (dbs *dbStore) loadTargets(path string) map[string]data.FileMeta {
for r.Next() {
var absPath, alg, hash string
var size int64
var custom json.RawMessage
var custom []byte
r.Scan(&absPath, &size, &alg, &hash, &custom)
hashBytes, err := hex.DecodeString(hash)
if err != nil {
@ -219,7 +219,7 @@ func (dbs *dbStore) loadTargets(path string) map[string]data.FileMeta {
},
}
if custom != nil {
file.Custom = &custom
file.Custom = json.RawMessage(custom)
}
files[absPath] = file
}

View File

@ -80,11 +80,11 @@ func TestAddBlob(t *testing.T) {
t.Fatal("Hashes map has been modified")
}
hash := data.HexBytes{0x01, 0x02}
hash := []bytes{0x01, 0x02}
if sha256[0] != hash[0] || sha256[1] != hash[1] {
t.Fatal("SHA256 has been modified")
}
hash = data.HexBytes{0x03, 0x04}
hash = []bytes{0x03, 0x04}
if sha512[0] != hash[0] || sha512[1] != hash[1] {
t.Fatal("SHA512 has been modified")
}

View File

@ -1,48 +0,0 @@
package store
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"path/filepath"
)
// FileCacheStore implements a super simple wrapper around RemoteStore
// to handle local file caching of metadata
type FileCacheStore struct {
RemoteStore
cachePath string
}
func NewFileCacheStore(remote RemoteStore, cachePath string) *FileCacheStore {
return &FileCacheStore{
RemoteStore: remote,
cachePath: cachePath,
}
}
func (s FileCacheStore) cacheFile(name string, data json.RawMessage) error {
path := path.Join(s.cachePath, name)
dir := filepath.Dir(path)
os.MkdirAll(dir, 0600)
return ioutil.WriteFile(path+".json", data, 0600)
}
func (s FileCacheStore) useCachedFile(name string) (json.RawMessage, error) {
path := path.Join(s.cachePath, name+".json")
return ioutil.ReadFile(path)
}
func (s FileCacheStore) GetMeta(name string, size int64) (json.RawMessage, error) {
data, err := s.useCachedFile(name)
if err == nil || data != nil {
return data, nil
}
data, err = s.RemoteStore.GetMeta(name, size)
if err != nil {
return nil, err
}
s.cacheFile(name, data)
return data, nil
}

View File

@ -1,7 +1,6 @@
package store
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
@ -9,16 +8,16 @@ import (
"path/filepath"
)
func NewFilesystemStore(baseDir, metaSubDir, metaExtension, targetsSubDir string) (MetadataStore, error) {
func NewFilesystemStore(baseDir, metaSubDir, metaExtension, targetsSubDir string) (*filesystemStore, error) {
metaDir := path.Join(baseDir, metaSubDir)
targetsDir := path.Join(baseDir, targetsSubDir)
// Make sure we can create the necessary dirs and they are writable
err := os.MkdirAll(metaDir, 0744)
err := os.MkdirAll(metaDir, 0700)
if err != nil {
return nil, err
}
err = os.MkdirAll(targetsDir, 0744)
err = os.MkdirAll(targetsDir, 0700)
if err != nil {
return nil, err
}
@ -38,7 +37,7 @@ type filesystemStore struct {
targetsDir string
}
func (f *filesystemStore) GetMeta(name string, size int64) (json.RawMessage, error) {
func (f *filesystemStore) GetMeta(name string, size int64) ([]byte, error) {
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
path := filepath.Join(f.metaDir, fileName)
meta, err := ioutil.ReadFile(path)
@ -48,10 +47,20 @@ func (f *filesystemStore) GetMeta(name string, size int64) (json.RawMessage, err
return meta, nil
}
func (f *filesystemStore) SetMeta(name string, meta json.RawMessage) error {
func (f *filesystemStore) SetMultiMeta(metas map[string][]byte) error {
for role, blob := range metas {
err := f.SetMeta(role, blob)
if err != nil {
return err
}
}
return nil
}
func (f *filesystemStore) SetMeta(name string, meta []byte) error {
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
path := filepath.Join(f.metaDir, fileName)
if err := ioutil.WriteFile(path, meta, 0644); err != nil {
if err := ioutil.WriteFile(path, meta, 0600); err != nil {
return err
}
return nil

View File

@ -0,0 +1,58 @@
package store
import (
"io/ioutil"
"os"
"path"
"testing"
"github.com/stretchr/testify/assert"
)
const testDir = "/tmp/testFilesystemStore/"
func TestNewFilesystemStore(t *testing.T) {
_, err := NewFilesystemStore(testDir, "metadata", "json", "targets")
assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err)
defer os.RemoveAll(testDir)
info, err := os.Stat(path.Join(testDir, "metadata"))
assert.Nil(t, err, "Error attempting to stat metadata dir: %v", err)
assert.NotNil(t, info, "Nil FileInfo from stat on metadata dir")
assert.True(t, 0700&info.Mode() != 0, "Metadata directory is not writable")
info, err = os.Stat(path.Join(testDir, "targets"))
assert.Nil(t, err, "Error attempting to stat targets dir: %v", err)
assert.NotNil(t, info, "Nil FileInfo from stat on targets dir")
assert.True(t, 0700&info.Mode() != 0, "Targets directory is not writable")
}
func TestSetMeta(t *testing.T) {
s, err := NewFilesystemStore(testDir, "metadata", "json", "targets")
assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err)
defer os.RemoveAll(testDir)
testContent := []byte("test data")
err = s.SetMeta("testMeta", testContent)
assert.Nil(t, err, "SetMeta returned unexpected error: %v", err)
content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "testMeta.json"))
assert.Nil(t, err, "Error reading file: %v", err)
assert.Equal(t, testContent, content, "Content written to file was corrupted.")
}
func TestGetMeta(t *testing.T) {
s, err := NewFilesystemStore(testDir, "metadata", "json", "targets")
assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err)
defer os.RemoveAll(testDir)
testContent := []byte("test data")
ioutil.WriteFile(path.Join(testDir, "metadata", "testMeta.json"), testContent, 0600)
content, err := s.GetMeta("testMeta", int64(len(testContent)))
assert.Nil(t, err, "GetMeta returned unexpected error: %v", err)
assert.Equal(t, testContent, content, "Content read from file was corrupted.")
}

View File

@ -2,11 +2,11 @@ package store
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"path"
@ -53,7 +53,7 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtensio
// GetMeta downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
func (s HTTPStore) GetMeta(name string, size int64) (json.RawMessage, error) {
func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
return nil, err
@ -76,11 +76,11 @@ func (s HTTPStore) GetMeta(name string, size int64) (json.RawMessage, error) {
if err != nil {
return nil, err
}
return json.RawMessage(body), nil
return body, nil
}
func (s HTTPStore) SetMeta(name string, blob json.RawMessage) error {
url, err := s.buildMetaURL(name)
func (s HTTPStore) SetMeta(name string, blob []byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
@ -92,8 +92,38 @@ func (s HTTPStore) SetMeta(name string, blob json.RawMessage) error {
return err
}
func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return err
}
}
err = writer.Close()
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), body)
req.Header.Set("Content-Type", writer.FormDataContentType())
if err != nil {
return err
}
_, err = s.roundTrip.RoundTrip(req)
return err
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
filename := fmt.Sprintf("%s.%s", name, s.metaExtension)
var filename string
if name != "" {
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
}
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}

View File

@ -1,9 +1,14 @@
package store
import (
"bytes"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/tent/canonical-json-go"
@ -18,7 +23,7 @@ func (rt *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error)
return http.DefaultClient.Do(req)
}
func TestGetMeta(t *testing.T) {
func TestHTTPStoreGetMeta(t *testing.T) {
store, err := NewHTTPStore(
"http://mirror1.poly.edu/test-pypi/",
"metadata",
@ -62,6 +67,47 @@ func TestGetMeta(t *testing.T) {
}
func TestSetMultiMeta(t *testing.T) {
metas := map[string][]byte{
"root": []byte("root data"),
"targets": []byte("targets data"),
}
handler := func(w http.ResponseWriter, r *http.Request) {
reader, err := r.MultipartReader()
if err != nil {
t.Fatal(err)
}
var updates map[string][]byte
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
role := strings.TrimSuffix(part.FileName(), ".json")
updates[role], err = ioutil.ReadAll(part)
if err != nil {
t.Fatal(err)
}
}
if d, ok := updates["root"]; !ok || !bytes.Equal(d, []byte("root data")) {
t.Fatal("Did not find root in updates")
}
if d, ok := updates["targets"]; !ok || bytes.Equal(d, []byte("targets data")) {
t.Fatal("Did not find root in updates")
}
}
server := httptest.NewServer(http.HandlerFunc(handler))
defer server.Close()
store, err := NewHTTPStore(server.URL, "metadata", "json", "targets", "key", http.DefaultTransport)
if err != nil {
t.Fatal(err)
}
store.SetMultiMeta(metas)
}
func TestPyCryptoRSAPSSCompat(t *testing.T) {
pubPem := "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAnKuXZeefa2LmgxaL5NsM\nzKOHNe+x/nL6ik+lDBCTV6OdcwAhHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5\nVSCuRJ53UronENl6lsa5mFKP8StYLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDf\nBEPIRp28ev/NViwGOEkBu2UAbwCIdnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK\n6pdzJXlhr9yap3UpgQ/iO9JtoEYB2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq\n3xmN4p+R4VGzfdQN+8Kl/IPjqWB535twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrM\nBI8ztvPiogz+MvXb8WvarZ6TMTh8ifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X\n7sXoaqszEtXdq5ef5zKVxkiyIQZcbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj\n1ANMFPxDQpHudCLxwCzjCb+sVa20HBRPTnzo8LSZkI6jAgMBAAE=\n-----END PUBLIC KEY-----"
//privPem := "-----BEGIN RSA PRIVATE KEY-----\nMIIG4wIBAAKCAYEAnKuXZeefa2LmgxaL5NsMzKOHNe+x/nL6ik+lDBCTV6OdcwAh\nHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5VSCuRJ53UronENl6lsa5mFKP8StY\nLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDfBEPIRp28ev/NViwGOEkBu2UAbwCI\ndnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK6pdzJXlhr9yap3UpgQ/iO9JtoEYB\n2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq3xmN4p+R4VGzfdQN+8Kl/IPjqWB5\n35twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrMBI8ztvPiogz+MvXb8WvarZ6TMTh8\nifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X7sXoaqszEtXdq5ef5zKVxkiyIQZc\nbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj1ANMFPxDQpHudCLxwCzjCb+sVa20\nHBRPTnzo8LSZkI6jAgMBAAECggGAdzyI7z/HLt2IfoAsXDLynNRgVYZluzgawiU3\ngeUjnnGhpSKWERXJC2IWDPBk0YOGgcnQxErNTdfXiFZ/xfRlSgqjVwob2lRe4w4B\npLr+CZXcgznv1VrPUvdolOSp3R2Mahfn7u0qVDUQ/g8jWVI6KW7FACmQhzQkPM8o\ntLGrpcmK+PA465uaHKtYccEB02ILqrK8v++tknv7eIZczrsSKlS1h/HHjSaidYxP\n2DAUiF7wnChrwwQEvuEUHhwVgQcoDMBoow0zwHdbFiFO2ZT54H2oiJWLhpR/x6RK\ngM1seqoPH2sYErPJACMcYsMtF4Tx7b5c4WSj3vDCGb+jeqnNS6nFC3aMnv75mUS2\nYDPU1heJFd8pNHVf0RDejLZZUiJSnXf3vpOxt9Xv2+4He0jeMfLV7zX0mO2Ni3MJ\nx6PiVy4xerHImOuuHzSla5crOq2ECiAxd1wEOFDRD2LRHzfhpk1ghiA5xA1qwc7Z\neRnkVfoy6PPZ4lZakZTm0p8YCQURAoHBAMUIC/7vnayLae7POmgy+np/ty7iMfyd\nV1eO6LTO21KAaGGlhaY26WD/5LcG2FUgc5jKKahprGrmiNLzLUeQPckJmuijSEVM\nl/4DlRvCo867l7fLaVqYzsQBBdeGIFNiT+FBOd8atff87ZBEfH/rXbDi7METD/VR\n4TdblnCsKYAXEJUdkw3IK7SUGERiQZIwKXrH/Map4ibDrljJ71iCgEureU0DBwcg\nwLftmjGMISoLscdRxeubX5uf/yxtHBJeRwKBwQDLjzHhb4gNGdBHUl4hZPAGCq1V\nLX/GpfoOVObW64Lud+tI6N9GNua5/vWduL7MWWOzDTMZysganhKwsJCY5SqAA9p0\nb6ohusf9i1nUnOa2F2j+weuYPXrTYm+ZrESBBdaEJPuj3R5YHVujrBA9Xe0kVOe3\nne151A+0xJOI3tX9CttIaQAsXR7cMDinkDITw6i7X4olRMPCSixHLW97cDsVDRGt\necO1d4dP3OGscN+vKCoL6tDKDotzWHYPwjH47sUCgcEAoVI8WCiipbKkMnaTsNsE\ngKXvO0DSgq3k5HjLCbdQldUzIbgfnH7bSKNcBYtiNxjR7OihgRW8qO5GWsnmafCs\n1dy6a/2835id3cnbHRaZflvUFhVDFn2E1bCsstFLyFn3Y0w/cO9yzC/X5sZcVXRF\nit3R0Selakv3JZckru4XMJwx5JWJYMBjIIAc+miknWg3niL+UT6pPun65xG3mXWI\nS+yC7c4rw+dKQ44UMLs2MDHRBoxqi8T0W/x9NkfDszpjAoHAclH7S4ZdvC3RIR0L\nLGoJuvroGbwx1JiGdOINuooNwGuswge2zTIsJi0gN/H3hcB2E6rIFiYid4BrMrwW\nmSeq1LZVS6siu0qw4p4OVy+/CmjfWKQD8j4k6u6PipiK6IMk1JYIlSCr2AS04JjT\njgNgGVVtxVt2cUM9huIXkXjEaRZdzK7boA60NCkIyGJdHWh3LLQdW4zg/A64C0lj\nIMoJBGuQkAKgfRuh7KI6Q6Qom7BM3OCFXdUJUEBQHc2MTyeZAoHAJdBQGBn1RFZ+\nn75AnbTMZJ6Twp2fVjzWUz/+rnXFlo87ynA18MR2BzaDST4Bvda29UBFGb32Mux9\nOHukqLgIE5jDuqWjy4B5eCoxZf/OvwlgXkX9+gprGR3axn/PZBFPbFB4ZmjbWLzn\nbocn7FJCXf+Cm0cMmv1jIIxej19MUU/duq9iq4RkHY2LG+KrSEQIUVmImCftXdN3\n/qNP5JetY0eH6C+KRc8JqDB0nvbqZNOgYXOfYXo/5Gk8XIHTFihm\n-----END RSA PRIVATE KEY-----"

View File

@ -1,7 +1,6 @@
package store
import (
"encoding/json"
"io"
"github.com/endophage/gotuf/data"
@ -10,8 +9,9 @@ import (
type targetsWalkFunc func(path string, meta data.FileMeta) error
type MetadataStore interface {
GetMeta(name string, size int64) (json.RawMessage, error)
SetMeta(name string, blob json.RawMessage) error
GetMeta(name string, size int64) ([]byte, error)
SetMeta(name string, blob []byte) error
SetMultiMeta(map[string][]byte) error
}
type PublicKeyStore interface {

View File

@ -2,15 +2,17 @@ package store
import (
"bytes"
"encoding/json"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/errors"
)
func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) LocalStore {
func NewMemoryStore(meta map[string][]byte, files map[string][]byte) LocalStore {
if meta == nil {
meta = make(map[string]json.RawMessage)
meta = make(map[string][]byte)
}
if files == nil {
files = make(map[string][]byte)
}
return &memoryStore{
meta: meta,
@ -20,20 +22,27 @@ func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) Local
}
type memoryStore struct {
meta map[string]json.RawMessage
meta map[string][]byte
files map[string][]byte
keys map[string][]data.PrivateKey
}
func (m *memoryStore) GetMeta(name string, size int64) (json.RawMessage, error) {
func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) {
return m.meta[name], nil
}
func (m *memoryStore) SetMeta(name string, meta json.RawMessage) error {
func (m *memoryStore) SetMeta(name string, meta []byte) error {
m.meta[name] = meta
return nil
}
func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error {
for role, blob := range metas {
m.SetMeta(role, blob)
}
return nil
}
func (m *memoryStore) AddBlob(path string, meta data.FileMeta) {
}
@ -68,7 +77,7 @@ func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFun
return nil
}
func (m *memoryStore) Commit(map[string]json.RawMessage, bool, map[string]data.Hashes) error {
func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) error {
return nil
}

View File

@ -15,8 +15,8 @@ func SampleMeta() data.FileMeta {
meta := data.FileMeta{
Length: 1,
Hashes: data.Hashes{
"sha256": data.HexBytes{0x01, 0x02},
"sha512": data.HexBytes{0x03, 0x04},
"sha256": []byte{0x01, 0x02},
"sha512": []byte{0x03, 0x04},
},
}
return meta

View File

@ -453,9 +453,8 @@ func (tr *TufRepo) UpdateTimestamp(s *data.Signed) error {
func (tr *TufRepo) SignRoot(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("signing root...")
if tr.Root.Dirty {
tr.Root.Signed.Expires = expires
tr.Root.Signed.Version++
}
root := tr.keysDB.GetRole(data.ValidRoles["root"])
signed, err := tr.Root.ToSigned()
if err != nil {
@ -471,7 +470,7 @@ func (tr *TufRepo) SignRoot(expires time.Time, cryptoService signed.CryptoServic
func (tr *TufRepo) SignTargets(role string, expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debugf("sign targets called for role %s", role)
if tr.Targets[role].Dirty {
tr.Targets[role].Signed.Expires = expires
tr.Targets[role].Signed.Version++
signed, err := tr.Targets[role].ToSigned()
if err != nil {
@ -486,20 +485,11 @@ func (tr *TufRepo) SignTargets(role string, expires time.Time, cryptoService sig
}
tr.Targets[role].Signatures = signed.Signatures
return signed, nil
} else {
signed, err := tr.Targets[role].ToSigned()
if err != nil {
logrus.Debug("errored getting targets data.Signed object")
return nil, err
}
return signed, nil
}
}
func (tr *TufRepo) SignSnapshot(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("signing snapshot...")
if tr.Root.Dirty {
signedRoot, err := tr.SignRoot(data.DefaultExpires("root"), cryptoService)
signedRoot, err := tr.Root.ToSigned()
if err != nil {
return nil, err
}
@ -508,12 +498,8 @@ func (tr *TufRepo) SignSnapshot(expires time.Time, cryptoService signed.CryptoSe
return nil, err
}
tr.Root.Dirty = false // root dirty until changes captures in snapshot
}
for role, targets := range tr.Targets {
if !targets.Dirty {
continue
}
signedTargets, err := tr.SignTargets(role, data.DefaultExpires("targets"), cryptoService)
signedTargets, err := targets.ToSigned()
if err != nil {
return nil, err
}
@ -521,9 +507,8 @@ func (tr *TufRepo) SignSnapshot(expires time.Time, cryptoService signed.CryptoSe
if err != nil {
return nil, err
}
tr.Targets[role].Dirty = false // target role dirty until changes captured in snapshot
}
if tr.Snapshot.Dirty {
tr.Snapshot.Signed.Expires = expires
tr.Snapshot.Signed.Version++
signed, err := tr.Snapshot.ToSigned()
if err != nil {
@ -536,19 +521,11 @@ func (tr *TufRepo) SignSnapshot(expires time.Time, cryptoService signed.CryptoSe
}
tr.Snapshot.Signatures = signed.Signatures
return signed, nil
} else {
signed, err := tr.Snapshot.ToSigned()
if err != nil {
return nil, err
}
return signed, nil
}
}
func (tr *TufRepo) SignTimestamp(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("SignTimestamp")
if tr.Snapshot.Dirty {
signedSnapshot, err := tr.SignSnapshot(data.DefaultExpires("snapshot"), cryptoService)
signedSnapshot, err := tr.Snapshot.ToSigned()
if err != nil {
return nil, err
}
@ -556,8 +533,7 @@ func (tr *TufRepo) SignTimestamp(expires time.Time, cryptoService signed.CryptoS
if err != nil {
return nil, err
}
}
if tr.Timestamp.Dirty {
tr.Timestamp.Signed.Expires = expires
tr.Timestamp.Signed.Version++
signed, err := tr.Timestamp.ToSigned()
if err != nil {
@ -571,13 +547,6 @@ func (tr *TufRepo) SignTimestamp(expires time.Time, cryptoService signed.CryptoS
tr.Timestamp.Signatures = signed.Signatures
tr.Snapshot.Dirty = false // snapshot is dirty until changes have been captured in timestamp
return signed, nil
} else {
signed, err := tr.Timestamp.ToSigned()
if err != nil {
return nil, err
}
return signed, nil
}
}
func (tr TufRepo) sign(signedData *data.Signed, role data.Role, cryptoService signed.CryptoService) (*data.Signed, error) {

View File

@ -15,12 +15,12 @@ var ErrWrongLength = errors.New("wrong length")
type ErrWrongHash struct {
Type string
Expected data.HexBytes
Actual data.HexBytes
Expected []byte
Actual []byte
}
func (e ErrWrongHash) Error() string {
return fmt.Sprintf("wrong %s hash, expected %s got %s", e.Type, hex.EncodeToString(e.Expected), hex.EncodeToString(e.Actual))
return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual)
}
type ErrNoCommonHash struct {
@ -75,7 +75,7 @@ func NormalizeTarget(path string) string {
func HashedPaths(path string, hashes data.Hashes) []string {
paths := make([]string, 0, len(hashes))
for _, hash := range hashes {
hashedPath := filepath.Join(filepath.Dir(path), hash.String()+"."+filepath.Base(path))
hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path))
paths = append(paths, hashedPath)
}
return paths

View File

@ -23,7 +23,7 @@ func (UtilSuite) TestFileMetaEqual(c *C) {
err func(test) error
}
fileMeta := func(length int64, hashes map[string]string) data.FileMeta {
m := data.FileMeta{Length: length, Hashes: make(map[string]data.HexBytes, len(hashes))}
m := data.FileMeta{Length: length, Hashes: make(map[string][]byte, len(hashes))}
for typ, hash := range hashes {
v, err := hex.DecodeString(hash)
c.Assert(err, IsNil)
@ -76,7 +76,7 @@ func (UtilSuite) TestNormalizeTarget(c *C) {
}
func (UtilSuite) TestHashedPaths(c *C) {
hexBytes := func(s string) data.HexBytes {
hexBytes := func(s string) []byte {
v, err := hex.DecodeString(s)
c.Assert(err, IsNil)
return v

View File

@ -23,6 +23,8 @@ import (
"github.com/endophage/gotuf/store"
)
const maxSize = 5 << 20
// ErrRepoNotInitialized is returned when trying to can publish on an uninitialized
// notary repository
type ErrRepoNotInitialized struct{}
@ -286,9 +288,11 @@ func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
return nil, err
}
meta := c.TargetMeta(name)
meta, err := c.TargetMeta(name)
if meta == nil {
return nil, errors.New("Meta is nil for target")
} else if err != nil {
return nil, err
}
return &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, nil
@ -310,6 +314,7 @@ func (r *NotaryRepository) Publish(getPass passwordRetriever) error {
// Repo hasn't been initialized, It must be initialized before
// it can be published. Return an error and let caller determine
// what it wants to do.
logrus.Error(err.Error())
logrus.Debug("Repository not initialized during Publish")
return &ErrRepoNotInitialized{}
}
@ -390,28 +395,18 @@ func (r *NotaryRepository) Publish(getPass passwordRetriever) error {
if err != nil {
return err
}
update := make(map[string][]byte)
// if we need to update the root, marshal it and push the update to remote
if updateRoot {
rootJSON, err := json.Marshal(root)
if err != nil {
return err
}
err = remote.SetMeta("root", rootJSON)
if err != nil {
return err
update["root"] = rootJSON
}
}
err = remote.SetMeta("targets", targetsJSON)
if err != nil {
return err
}
err = remote.SetMeta("snapshot", snapshotJSON)
if err != nil {
return err
}
return nil
update["targets"] = targetsJSON
update["snapshot"] = snapshotJSON
return remote.SetMultiMeta(update)
}
func (r *NotaryRepository) bootstrapRepo() error {
@ -500,13 +495,33 @@ func (r *NotaryRepository) snapshot() error {
}
func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
var cache store.MetadataStore
cache, err := store.NewFilesystemStore(
filepath.Join(r.tufRepoPath, "cache"),
"metadata",
"json",
"targets",
)
if err != nil {
return nil, err
cache = store.NewMemoryStore(nil, nil)
}
rootJSON, err := remote.GetMeta("root", 5<<20)
var rootJSON []byte
err = nil
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
if err == nil {
// if remote store successfully set up, try and get root from remote
rootJSON, err = remote.GetMeta("root", maxSize)
}
// if remote store couldn't be setup, or we failed to get a root from it
// load the root from cache (offline operation)
if err != nil {
return nil, err
rootJSON, err = cache.GetMeta("root", maxSize)
if err != nil {
// if cache didn't return a root, we cannot proceed
return nil, &store.ErrMetaNotFound{}
}
}
root := &data.Signed{}
err = json.Unmarshal(rootJSON, root)
@ -531,5 +546,6 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
r.tufRepo,
remote,
kdb,
cache,
), nil
}

File diff suppressed because one or more lines are too long

View File

@ -317,33 +317,37 @@ func testAddListTarget(t *testing.T, rootType data.KeyAlgorithm) {
repo.KeyStoreManager.NonRootKeyStore().AddKey(filepath.Join(filepath.FromSlash(gun), tempKey.ID()), &tempKey)
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/root.json", func(w http.ResponseWriter, r *http.Request) {
// Because ListTargets will clear this
savedTUFRepo := repo.tufRepo
rootJSONFile := filepath.Join(tempBaseDir, "tuf", filepath.FromSlash(gun), "metadata", "root.json")
rootFileBytes, err := ioutil.ReadFile(rootJSONFile)
signedTargets, err := savedTUFRepo.SignTargets("targets", data.DefaultExpires("targets"), nil)
assert.NoError(t, err)
signedSnapshot, err := savedTUFRepo.SignSnapshot(data.DefaultExpires("snapshot"), nil)
assert.NoError(t, err)
signedTimestamp, err := savedTUFRepo.SignTimestamp(data.DefaultExpires("timestamp"), nil)
assert.NoError(t, err)
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/root.json", func(w http.ResponseWriter, r *http.Request) {
assert.NoError(t, err)
fmt.Fprint(w, string(rootFileBytes))
})
// Because ListTargets will clear this
savedTUFRepo := repo.tufRepo
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/timestamp.json", func(w http.ResponseWriter, r *http.Request) {
signedTimestamp, err := savedTUFRepo.SignTimestamp(data.DefaultExpires("timestamp"), nil)
assert.NoError(t, err)
timestampJSON, _ := json.Marshal(signedTimestamp)
fmt.Fprint(w, string(timestampJSON))
})
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/snapshot.json", func(w http.ResponseWriter, r *http.Request) {
signedSnapshot, err := savedTUFRepo.SignSnapshot(data.DefaultExpires("snapshot"), nil)
assert.NoError(t, err)
snapshotJSON, _ := json.Marshal(signedSnapshot)
fmt.Fprint(w, string(snapshotJSON))
})
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets.json", func(w http.ResponseWriter, r *http.Request) {
signedTargets, err := savedTUFRepo.SignTargets("targets", data.DefaultExpires("targets"), nil)
assert.NoError(t, err)
targetsJSON, _ := json.Marshal(signedTargets)
fmt.Fprint(w, string(targetsJSON))
})

View File

@ -1,8 +1,8 @@
{
"server": {
"addr": ":4443",
"tls_key_file": "/go/src/github.com/docker/notary/fixtures/notary-server.key",
"tls_cert_file": "/go/src/github.com/docker/notary/fixtures/notary-server.crt"
"tls_key_file": "./fixtures/notary-server.key",
"tls_cert_file": "./fixtures/notary-server.crt"
},
"trust_service": {
"type": "local",

View File

@ -165,7 +165,7 @@ func tufList(cmd *cobra.Command, args []string) {
// Print all the available targets
for _, t := range targetList {
fmt.Println(t.Name, " ", t.Hashes["sha256"], " ", t.Length)
fmt.Printf("%s %x %d\n", t.Name, t.Hashes["sha256"], t.Length)
}
}
@ -188,7 +188,7 @@ func tufLookup(cmd *cobra.Command, args []string) {
fatalf(err.Error())
}
fmt.Println(target.Name, fmt.Sprintf("sha256:%s", target.Hashes["sha256"]), target.Length)
fmt.Println(target.Name, fmt.Sprintf("sha256:%x", target.Hashes["sha256"]), target.Length)
}
func tufPublish(cmd *cobra.Command, args []string) {

View File

@ -288,11 +288,12 @@ func (km *KeyStoreManager) ValidateRoot(root *data.Signed, dnsName string) error
}
// TODO(david): change hardcoded minversion on TUF.
newRootKey, err := signed.VerifyRoot(root, 0, validKeys, 1)
err = signed.VerifyRoot(root, 0, validKeys)
if err != nil {
return err
}
var newRootKey data.PublicKey
// VerifyRoot returns a non-nil value if there is a root key rotation happening.
// If this happens, we should replace the old root of trust with the new one
if newRootKey != nil {