mirror of https://github.com/docker/docs.git
Merge pull request #519 from endophage/consistent_download
Consistent Download
This commit is contained in:
commit
9f67e93381
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
|
@ -885,7 +886,10 @@ func (r *NotaryRepository) Update(forWrite bool) (*tufclient.Client, error) {
|
|||
}
|
||||
err = c.Update()
|
||||
if err != nil {
|
||||
if notFound, ok := err.(store.ErrMetaNotFound); ok && notFound.Resource == data.CanonicalRootRole {
|
||||
// notFound.Resource may include a checksum so when the role is root,
|
||||
// it will be root.json or root.<checksum>.json. Therefore best we can
|
||||
// do it match a "root." prefix
|
||||
if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") {
|
||||
return nil, r.errRepositoryNotExist()
|
||||
}
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ package client
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
regJson "encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
|
@ -1062,41 +1064,76 @@ func fakeServerData(t *testing.T, repo *NotaryRepository, mux *http.ServeMux,
|
|||
data.DefaultExpires("timestamp"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
timestampJSON, _ := json.Marshal(signedTimestamp)
|
||||
snapshotJSON, _ := json.Marshal(signedSnapshot)
|
||||
targetsJSON, _ := json.Marshal(signedTargets)
|
||||
level1JSON, _ := json.Marshal(signedLevel1)
|
||||
level2JSON, _ := json.Marshal(signedLevel2)
|
||||
|
||||
cksmBytes := sha256.Sum256(rootFileBytes)
|
||||
rootChecksum := hex.EncodeToString(cksmBytes[:])
|
||||
|
||||
cksmBytes = sha256.Sum256(snapshotJSON)
|
||||
snapshotChecksum := hex.EncodeToString(cksmBytes[:])
|
||||
|
||||
cksmBytes = sha256.Sum256(targetsJSON)
|
||||
targetsChecksum := hex.EncodeToString(cksmBytes[:])
|
||||
|
||||
cksmBytes = sha256.Sum256(level1JSON)
|
||||
level1Checksum := hex.EncodeToString(cksmBytes[:])
|
||||
|
||||
cksmBytes = sha256.Sum256(level2JSON)
|
||||
level2Checksum := hex.EncodeToString(cksmBytes[:])
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/root.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.NoError(t, err)
|
||||
fmt.Fprint(w, string(rootFileBytes))
|
||||
})
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/root."+rootChecksum+".json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.NoError(t, err)
|
||||
fmt.Fprint(w, string(rootFileBytes))
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/timestamp.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
timestampJSON, _ := json.Marshal(signedTimestamp)
|
||||
fmt.Fprint(w, string(timestampJSON))
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/snapshot.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
snapshotJSON, _ := json.Marshal(signedSnapshot)
|
||||
fmt.Fprint(w, string(snapshotJSON))
|
||||
})
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/snapshot."+snapshotChecksum+".json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, string(snapshotJSON))
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
targetsJSON, _ := json.Marshal(signedTargets)
|
||||
fmt.Fprint(w, string(targetsJSON))
|
||||
})
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets."+targetsChecksum+".json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, string(targetsJSON))
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets/level1.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
level1JSON, err := json.Marshal(signedLevel1)
|
||||
assert.NoError(t, err)
|
||||
fmt.Fprint(w, string(level1JSON))
|
||||
})
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets/level1."+level1Checksum+".json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, string(level1JSON))
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets/level2.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
level2JSON, err := json.Marshal(signedLevel2)
|
||||
assert.NoError(t, err)
|
||||
fmt.Fprint(w, string(level2JSON))
|
||||
})
|
||||
mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets/level2."+level2Checksum+".json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, string(level2JSON))
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -61,17 +62,18 @@ func bumpVersions(t *testing.T, s *testutils.MetadataSwizzler, offset int) {
|
|||
// create a server that just serves static metadata files from a metaStore
|
||||
func readOnlyServer(t *testing.T, cache store.MetadataStore, notFoundStatus int) *httptest.Server {
|
||||
m := mux.NewRouter()
|
||||
m.HandleFunc("/v2/docker.com/notary/_trust/tuf/{role:.*}.json",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
metaBytes, err := cache.GetMeta(vars["role"], -1)
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
w.WriteHeader(notFoundStatus)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
w.Write(metaBytes)
|
||||
}
|
||||
})
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
metaBytes, err := cache.GetMeta(vars["role"], -1)
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
w.WriteHeader(notFoundStatus)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
w.Write(metaBytes)
|
||||
}
|
||||
}
|
||||
m.HandleFunc("/v2/docker.com/notary/_trust/tuf/{role:.*}.{checksum:.*}.json", handler)
|
||||
m.HandleFunc("/v2/docker.com/notary/_trust/tuf/{role:.*}.json", handler)
|
||||
return httptest.NewServer(m)
|
||||
}
|
||||
|
||||
|
|
@ -632,7 +634,7 @@ func testUpdateRemoteNon200Error(t *testing.T, opts updateOpts, errExpected inte
|
|||
require.IsType(t, errExpected, err, "wrong update error when %s is %v (forWrite: %v)",
|
||||
opts.role, opts.notFoundCode, opts.forWrite)
|
||||
if notFound, ok := err.(store.ErrMetaNotFound); ok {
|
||||
require.Equal(t, opts.role, notFound.Resource, "wrong resource missing (forWrite: %v)", opts.forWrite)
|
||||
require.True(t, strings.HasPrefix(notFound.Resource, opts.role), "wrong resource missing (forWrite: %v)", opts.forWrite)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,14 +89,14 @@ func RootHandler(ac auth.AccessController, ctx context.Context, trust signed.Cry
|
|||
prometheus.InstrumentHandlerWithOpts(
|
||||
prometheusOpts("UpdateTuf"),
|
||||
hand(handlers.AtomicUpdateHandler, "push", "pull")))
|
||||
r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/{tufRole:root|targets(?:/[^/\\s]+)*|snapshot|timestamp}.json").Handler(
|
||||
prometheus.InstrumentHandlerWithOpts(
|
||||
prometheusOpts("GetRole"),
|
||||
hand(handlers.GetHandler, "pull")))
|
||||
r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/{tufRole:root|targets(?:/[^/\\s]+)*|snapshot|timestamp}.{checksum:[a-fA-F0-9]{64}|[a-fA-F0-9]{96}|[a-fA-F0-9]{128}}.json").Handler(
|
||||
prometheus.InstrumentHandlerWithOpts(
|
||||
prometheusOpts("GetRoleByHash"),
|
||||
hand(handlers.GetHandler, "pull")))
|
||||
r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/{tufRole:root|targets(?:/[^/\\s]+)*|snapshot|timestamp}.json").Handler(
|
||||
prometheus.InstrumentHandlerWithOpts(
|
||||
prometheusOpts("GetRole"),
|
||||
hand(handlers.GetHandler, "pull")))
|
||||
r.Methods("GET").Path(
|
||||
"/v2/{imageName:.*}/_trust/tuf/{tufRole:snapshot|timestamp}.key").Handler(
|
||||
prometheus.InstrumentHandlerWithOpts(
|
||||
|
|
|
|||
|
|
@ -6,9 +6,6 @@ import (
|
|||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
|
|
@ -179,6 +176,7 @@ func (c *Client) downloadRoot() error {
|
|||
var s *data.Signed
|
||||
var raw []byte
|
||||
if download {
|
||||
// use consistent download if we have the checksum.
|
||||
raw, s, err = c.downloadSigned(role, size, expectedSha256)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -422,7 +420,8 @@ func (c *Client) downloadTargets(role string) error {
|
|||
}
|
||||
|
||||
func (c *Client) downloadSigned(role string, size int64, expectedSha256 []byte) ([]byte, *data.Signed, error) {
|
||||
raw, err := c.remote.GetMeta(role, size)
|
||||
rolePath := utils.ConsistentName(role, expectedSha256)
|
||||
raw, err := c.remote.GetMeta(rolePath, size)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
@ -481,11 +480,7 @@ func (c Client) getTargetsFile(role string, keyIDs []string, snapshotMeta data.F
|
|||
size := snapshotMeta[role].Length
|
||||
var s *data.Signed
|
||||
if download {
|
||||
rolePath, err := c.RoleTargetsPath(role, hex.EncodeToString(expectedSha256), consistent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
raw, s, err = c.downloadSigned(rolePath, size, expectedSha256)
|
||||
raw, s, err = c.downloadSigned(role, size, expectedSha256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -509,24 +504,6 @@ func (c Client) getTargetsFile(role string, keyIDs []string, snapshotMeta data.F
|
|||
return s, nil
|
||||
}
|
||||
|
||||
// RoleTargetsPath generates the appropriate HTTP URL for the targets file,
|
||||
// based on whether the repo is marked as consistent.
|
||||
func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool) (string, error) {
|
||||
if consistent {
|
||||
// Use path instead of filepath since we refer to the TUF role directly instead of its target files
|
||||
dir := path.Dir(role)
|
||||
if strings.Contains(role, "/") {
|
||||
lastSlashIdx := strings.LastIndex(role, "/")
|
||||
role = role[lastSlashIdx+1:]
|
||||
}
|
||||
role = path.Join(
|
||||
dir,
|
||||
fmt.Sprintf("%s.%s.json", hashSha256, role),
|
||||
)
|
||||
}
|
||||
return role, nil
|
||||
}
|
||||
|
||||
// TargetMeta ensures the repo is up to date. It assumes downloadTargets
|
||||
// has already downloaded all delegated roles
|
||||
func (c Client) TargetMeta(role, path string, excludeRoles ...string) (*data.FileMeta, string) {
|
||||
|
|
@ -563,18 +540,3 @@ func (c Client) TargetMeta(role, path string, excludeRoles ...string) (*data.Fil
|
|||
}
|
||||
return meta, ""
|
||||
}
|
||||
|
||||
// DownloadTarget downloads the target to dst from the remote
|
||||
func (c Client) DownloadTarget(dst io.Writer, path string, meta *data.FileMeta) error {
|
||||
reader, err := c.remote.GetTarget(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
r := io.TeeReader(
|
||||
io.LimitReader(reader, meta.Length),
|
||||
dst,
|
||||
)
|
||||
err = utils.ValidateTarget(r, meta)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -235,13 +235,12 @@ func TestCheckRootExpired(t *testing.T) {
|
|||
func TestChecksumMismatch(t *testing.T) {
|
||||
repo := tuf.NewRepo(nil, nil)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := testutils.NewCorruptingMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStorage, nil, localStorage)
|
||||
|
||||
sampleTargets := data.NewTargets()
|
||||
orig, err := json.Marshal(sampleTargets)
|
||||
origSha256 := sha256.Sum256(orig)
|
||||
orig[0] = '}' // corrupt data, should be a {
|
||||
assert.NoError(t, err)
|
||||
|
||||
remoteStorage.SetMeta("targets", orig)
|
||||
|
|
@ -270,7 +269,7 @@ func TestChecksumMatch(t *testing.T) {
|
|||
func TestSizeMismatchLong(t *testing.T) {
|
||||
repo := tuf.NewRepo(nil, nil)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := testutils.NewLongMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStorage, nil, localStorage)
|
||||
|
||||
sampleTargets := data.NewTargets()
|
||||
|
|
@ -279,9 +278,6 @@ func TestSizeMismatchLong(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
l := int64(len(orig))
|
||||
|
||||
orig = append([]byte(" "), orig...)
|
||||
assert.Equal(t, l+1, int64(len(orig)))
|
||||
|
||||
remoteStorage.SetMeta("targets", orig)
|
||||
|
||||
_, _, err = client.downloadSigned("targets", l, origSha256[:])
|
||||
|
|
@ -293,7 +289,7 @@ func TestSizeMismatchLong(t *testing.T) {
|
|||
func TestSizeMismatchShort(t *testing.T) {
|
||||
repo := tuf.NewRepo(nil, nil)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := testutils.NewShortMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStorage, nil, localStorage)
|
||||
|
||||
sampleTargets := data.NewTargets()
|
||||
|
|
@ -302,8 +298,6 @@ func TestSizeMismatchShort(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
l := int64(len(orig))
|
||||
|
||||
orig = orig[1:]
|
||||
|
||||
remoteStorage.SetMeta("targets", orig)
|
||||
|
||||
_, _, err = client.downloadSigned("targets", l, origSha256[:])
|
||||
|
|
@ -457,7 +451,7 @@ func TestDownloadTargetChecksumMismatch(t *testing.T) {
|
|||
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
||||
assert.NoError(t, err)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := testutils.NewCorruptingMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStorage, kdb, localStorage)
|
||||
|
||||
// create and "upload" sample targets
|
||||
|
|
@ -466,13 +460,10 @@ func TestDownloadTargetChecksumMismatch(t *testing.T) {
|
|||
orig, err := json.Marshal(signedOrig)
|
||||
assert.NoError(t, err)
|
||||
origSha256 := sha256.Sum256(orig)
|
||||
orig[0] = '}' // corrupt data, should be a {
|
||||
err = remoteStorage.SetMeta("targets", orig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create local snapshot with targets file
|
||||
// It's necessary to do it this way rather than calling repo.SignSnapshot
|
||||
// so that we have the wrong sha256 in the snapshot.
|
||||
snap := data.SignedSnapshot{
|
||||
Signed: data.Snapshot{
|
||||
Meta: data.Files{
|
||||
|
|
@ -583,28 +574,52 @@ func TestUpdateDownloadRootHappy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUpdateDownloadRootBadChecksum(t *testing.T) {
|
||||
remoteStore := testutils.NewCorruptingMemoryStore(nil, nil)
|
||||
|
||||
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
||||
assert.NoError(t, err)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
remoteStorage := store.NewMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStorage, kdb, localStorage)
|
||||
client := NewClient(repo, remoteStore, kdb, localStorage)
|
||||
|
||||
// sign snapshot to make sure we have a checksum for root
|
||||
_, err = repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create and "upload" sample root, snapshot, and timestamp
|
||||
// sign and "upload" sample root
|
||||
signedOrig, err := repo.SignRoot(data.DefaultExpires("root"))
|
||||
assert.NoError(t, err)
|
||||
orig, err := json.Marshal(signedOrig)
|
||||
assert.NoError(t, err)
|
||||
err = remoteStorage.SetMeta("root", orig)
|
||||
err = remoteStore.SetMeta("root", orig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// sign snapshot to make sure we have current checksum for root
|
||||
_, err = repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = client.downloadRoot()
|
||||
assert.IsType(t, ErrChecksumMismatch{}, err)
|
||||
}
|
||||
|
||||
func TestUpdateDownloadRootChecksumNotFound(t *testing.T) {
|
||||
remoteStore := store.NewMemoryStore(nil, nil)
|
||||
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
||||
assert.NoError(t, err)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
client := NewClient(repo, remoteStore, kdb, localStorage)
|
||||
|
||||
// sign snapshot to make sure we have current checksum for root
|
||||
_, err = repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// sign and "upload" sample root
|
||||
signedOrig, err := repo.SignRoot(data.DefaultExpires("root"))
|
||||
assert.NoError(t, err)
|
||||
orig, err := json.Marshal(signedOrig)
|
||||
assert.NoError(t, err)
|
||||
err = remoteStore.SetMeta("root", orig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// don't sign snapshot again to ensure checksum is out of date (bad)
|
||||
|
||||
err = client.downloadRoot()
|
||||
assert.IsType(t, ErrChecksumMismatch{}, err)
|
||||
assert.IsType(t, store.ErrMetaNotFound{}, err)
|
||||
}
|
||||
|
||||
func TestDownloadTimestampHappy(t *testing.T) {
|
||||
|
|
@ -739,7 +754,7 @@ func TestDownloadSnapshotNoChecksum(t *testing.T) {
|
|||
assert.IsType(t, ErrMissingMeta{}, err)
|
||||
}
|
||||
|
||||
func TestDownloadSnapshotBadChecksum(t *testing.T) {
|
||||
func TestDownloadSnapshotChecksumNotFound(t *testing.T) {
|
||||
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
||||
assert.NoError(t, err)
|
||||
localStorage := store.NewMemoryStore(nil, nil)
|
||||
|
|
@ -761,7 +776,7 @@ func TestDownloadSnapshotBadChecksum(t *testing.T) {
|
|||
// by not signing timestamp again we ensure it has the wrong checksum
|
||||
|
||||
err = client.downloadSnapshot()
|
||||
assert.IsType(t, ErrChecksumMismatch{}, err)
|
||||
assert.IsType(t, store.ErrMetaNotFound{}, err)
|
||||
}
|
||||
|
||||
// TargetMeta returns the file metadata for a file path in the role subtree,
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
|
|
@ -23,17 +21,9 @@ type PublicKeyStore interface {
|
|||
GetKey(role string) ([]byte, error)
|
||||
}
|
||||
|
||||
// TargetStore represents a collection of targets that can be walked similarly
|
||||
// to walking a directory, passing a callback that receives the path and meta
|
||||
// for each target
|
||||
type TargetStore interface {
|
||||
WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error
|
||||
}
|
||||
|
||||
// LocalStore represents a local TUF sture
|
||||
type LocalStore interface {
|
||||
MetadataStore
|
||||
TargetStore
|
||||
}
|
||||
|
||||
// RemoteStore is similar to LocalStore with the added expectation that it should
|
||||
|
|
@ -41,5 +31,4 @@ type LocalStore interface {
|
|||
type RemoteStore interface {
|
||||
MetadataStore
|
||||
PublicKeyStore
|
||||
GetTarget(path string) (io.ReadCloser, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
|
|
@ -12,28 +11,43 @@ import (
|
|||
|
||||
// NewMemoryStore returns a MetadataStore that operates entirely in memory.
|
||||
// Very useful for testing
|
||||
func NewMemoryStore(meta map[string][]byte, files map[string][]byte) RemoteStore {
|
||||
func NewMemoryStore(meta map[string][]byte, files map[string][]byte) *MemoryStore {
|
||||
var consistent = make(map[string][]byte)
|
||||
if meta == nil {
|
||||
meta = make(map[string][]byte)
|
||||
} else {
|
||||
// add all seed meta to consistent
|
||||
for name, data := range meta {
|
||||
checksum := sha256.Sum256(data)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
consistent[path] = data
|
||||
}
|
||||
}
|
||||
if files == nil {
|
||||
files = make(map[string][]byte)
|
||||
}
|
||||
return &memoryStore{
|
||||
meta: meta,
|
||||
files: files,
|
||||
keys: make(map[string][]data.PrivateKey),
|
||||
return &MemoryStore{
|
||||
meta: meta,
|
||||
consistent: consistent,
|
||||
files: files,
|
||||
keys: make(map[string][]data.PrivateKey),
|
||||
}
|
||||
}
|
||||
|
||||
type memoryStore struct {
|
||||
meta map[string][]byte
|
||||
files map[string][]byte
|
||||
keys map[string][]data.PrivateKey
|
||||
// MemoryStore implements a mock RemoteStore entirely in memory.
|
||||
// For testing purposes only.
|
||||
type MemoryStore struct {
|
||||
meta map[string][]byte
|
||||
consistent map[string][]byte
|
||||
files map[string][]byte
|
||||
keys map[string][]data.PrivateKey
|
||||
}
|
||||
|
||||
// GetMeta returns up to size bytes of data references by name.
|
||||
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
|
||||
func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
// as we will always know the size for everything but a timestamp and
|
||||
// sometimes a root, neither of which should be exceptionally large
|
||||
func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
d, ok := m.meta[name]
|
||||
if ok {
|
||||
if size == -1 {
|
||||
|
|
@ -44,15 +58,29 @@ func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
|||
}
|
||||
return d[:size], nil
|
||||
}
|
||||
d, ok = m.consistent[name]
|
||||
if ok {
|
||||
if int64(len(d)) < size {
|
||||
return d, nil
|
||||
}
|
||||
return d[:size], nil
|
||||
}
|
||||
return nil, ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
|
||||
func (m *memoryStore) SetMeta(name string, meta []byte) error {
|
||||
// SetMeta sets the metadata value for the given name
|
||||
func (m *MemoryStore) SetMeta(name string, meta []byte) error {
|
||||
m.meta[name] = meta
|
||||
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
m.consistent[path] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
// SetMultiMeta sets multiple pieces of metadata for multiple names
|
||||
// in a single operation.
|
||||
func (m *MemoryStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
m.SetMeta(role, blob)
|
||||
}
|
||||
|
|
@ -61,57 +89,23 @@ func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error {
|
|||
|
||||
// RemoveMeta removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (m *memoryStore) RemoveMeta(name string) error {
|
||||
delete(m.meta, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetTarget(path string) (io.ReadCloser, error) {
|
||||
return &utils.NoopCloser{Reader: bytes.NewReader(m.files[path])}, nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error {
|
||||
if len(paths) == 0 {
|
||||
for path, dat := range m.files {
|
||||
meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = targetsFn(path, meta); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
dat, ok := m.files[path]
|
||||
if !ok {
|
||||
return ErrMetaNotFound{Resource: path}
|
||||
}
|
||||
meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = targetsFn(path, meta); err != nil {
|
||||
return err
|
||||
}
|
||||
func (m *MemoryStore) RemoveMeta(name string) error {
|
||||
if meta, ok := m.meta[name]; ok {
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
delete(m.meta, name)
|
||||
delete(m.consistent, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) error {
|
||||
return nil
|
||||
// GetKey returns the public key for the given role
|
||||
func (m *MemoryStore) GetKey(role string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("GetKey is not implemented for the MemoryStore")
|
||||
}
|
||||
|
||||
func (m *memoryStore) GetKey(role string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("GetKey is not implemented for the memoryStore")
|
||||
}
|
||||
|
||||
// Clear this existing memory store by setting this store as new empty one
|
||||
func (m *memoryStore) RemoveAll() error {
|
||||
m.meta = make(map[string][]byte)
|
||||
m.files = make(map[string][]byte)
|
||||
m.keys = make(map[string][]data.PrivateKey)
|
||||
// RemoveAll clears the existing memory store by setting this store as new empty one
|
||||
func (m *MemoryStore) RemoveAll() error {
|
||||
*m = *NewMemoryStore(nil, nil)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"github.com/docker/notary/tuf/store"
|
||||
)
|
||||
|
||||
// CorruptingMemoryStore corrupts all data returned by GetMeta
|
||||
type CorruptingMemoryStore struct {
|
||||
store.MemoryStore
|
||||
}
|
||||
|
||||
// NewCorruptingMemoryStore returns a new instance of memory store that
|
||||
// corrupts all data requested from it.
|
||||
func NewCorruptingMemoryStore(meta map[string][]byte, files map[string][]byte) *CorruptingMemoryStore {
|
||||
s := store.NewMemoryStore(meta, files)
|
||||
return &CorruptingMemoryStore{MemoryStore: *s}
|
||||
}
|
||||
|
||||
// GetMeta returns up to size bytes of meta identified by string. It will
|
||||
// always be corrupted by setting the first character to }
|
||||
func (cm CorruptingMemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
d, err := cm.MemoryStore.GetMeta(name, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d[0] = '}' // all our content is JSON so must start with {
|
||||
return d, err
|
||||
}
|
||||
|
||||
// LongMemoryStore corrupts all data returned by GetMeta
|
||||
type LongMemoryStore struct {
|
||||
store.MemoryStore
|
||||
}
|
||||
|
||||
// NewLongMemoryStore returns a new instance of memory store that
|
||||
// returns one byte too much data on any request to GetMeta
|
||||
func NewLongMemoryStore(meta map[string][]byte, files map[string][]byte) *LongMemoryStore {
|
||||
s := store.NewMemoryStore(meta, files)
|
||||
return &LongMemoryStore{MemoryStore: *s}
|
||||
}
|
||||
|
||||
// GetMeta returns one byte too much
|
||||
func (lm LongMemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
d, err := lm.MemoryStore.GetMeta(name, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = append(d, ' ')
|
||||
return d, err
|
||||
}
|
||||
|
||||
// ShortMemoryStore corrupts all data returned by GetMeta
|
||||
type ShortMemoryStore struct {
|
||||
store.MemoryStore
|
||||
}
|
||||
|
||||
// NewShortMemoryStore returns a new instance of memory store that
|
||||
// returns one byte too little data on any request to GetMeta
|
||||
func NewShortMemoryStore(meta map[string][]byte, files map[string][]byte) *ShortMemoryStore {
|
||||
s := store.NewMemoryStore(meta, files)
|
||||
return &ShortMemoryStore{MemoryStore: *s}
|
||||
}
|
||||
|
||||
// GetMeta returns one byte too few
|
||||
func (sm ShortMemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
d, err := sm.MemoryStore.GetMeta(name, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d[1:], err
|
||||
}
|
||||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -146,3 +147,14 @@ func FindRoleIndex(rs []*data.Role, name string) int {
|
|||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ConsistentName generates the appropriate HTTP URL path for the role,
|
||||
// based on whether the repo is marked as consistent. The RemoteStore
|
||||
// is responsible for adding file extensions.
|
||||
func ConsistentName(role string, hashSha256 []byte) string {
|
||||
if len(hashSha256) > 0 {
|
||||
hash := hex.EncodeToString(hashSha256)
|
||||
return fmt.Sprintf("%s.%s", role, hash)
|
||||
}
|
||||
return role
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue