Merge pull request #495 from docker/filestore-getmeta-size

ensure filestore GetMeta only returns up to size bytes.  Add max size constant
This commit is contained in:
Diogo Mónica 2016-01-28 17:36:35 -08:00
commit 96d451e1c5
13 changed files with 224 additions and 52 deletions

View File

@ -25,10 +25,6 @@ import (
"github.com/docker/notary/tuf/store"
)
const (
maxSize = 5 << 20
)
func init() {
data.SetDefaultExpiryTimes(
map[string]int{
@ -747,7 +743,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
tufRepo := tuf.NewRepo(kdb, r.CryptoService)
logrus.Debugf("Loading trusted collection.")
rootJSON, err := r.fileStore.GetMeta("root", 0)
rootJSON, err := r.fileStore.GetMeta("root", -1)
if err != nil {
return err
}
@ -760,7 +756,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
if err != nil {
return err
}
targetsJSON, err := r.fileStore.GetMeta("targets", 0)
targetsJSON, err := r.fileStore.GetMeta("targets", -1)
if err != nil {
return err
}
@ -771,7 +767,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
}
tufRepo.SetTargets("targets", targets)
snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0)
snapshotJSON, err := r.fileStore.GetMeta("snapshot", -1)
if err == nil {
snapshot := &data.SignedSnapshot{}
err = json.Unmarshal(snapshotJSON, snapshot)
@ -876,7 +872,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
// try to read root from cache first. We will trust this root
// until we detect a problem during update which will cause
// us to download a new root and perform a rotation.
rootJSON, cachedRootErr := r.fileStore.GetMeta("root", maxSize)
rootJSON, cachedRootErr := r.fileStore.GetMeta("root", -1)
if cachedRootErr == nil {
signedRoot, cachedRootErr = r.validateRoot(rootJSON)
@ -890,7 +886,8 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
// checking for initialization of the repo).
// if remote store successfully set up, try and get root from remote
tmpJSON, err := remote.GetMeta("root", maxSize)
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
tmpJSON, err := remote.GetMeta("root", -1)
if err != nil {
// we didn't have a root in cache and were unable to load one from
// the server. Nothing we can do but error.

View File

@ -58,7 +58,7 @@ func readOnlyServer(t *testing.T, cache store.MetadataStore, notFoundStatus int)
m.HandleFunc("/v2/docker.com/notary/_trust/tuf/{role:.*}.json",
func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
metaBytes, err := cache.GetMeta(vars["role"], maxSize)
metaBytes, err := cache.GetMeta(vars["role"], -1)
if _, ok := err.(store.ErrMetaNotFound); ok {
w.WriteHeader(notFoundStatus)
} else {
@ -107,7 +107,7 @@ func TestUpdateSucceedsEvenIfCannotWriteNewRepo(t *testing.T) {
}
for r, expected := range serverMeta {
actual, err := repo.fileStore.GetMeta(r, maxSize)
actual, err := repo.fileStore.GetMeta(r, -1)
if r == role {
require.Error(t, err)
require.IsType(t, store.ErrMetaNotFound{}, err,
@ -158,7 +158,7 @@ func TestUpdateSucceedsEvenIfCannotWriteExistingRepo(t *testing.T) {
require.NoError(t, err)
for r, expected := range serverMeta {
actual, err := repo.fileStore.GetMeta(r, maxSize)
actual, err := repo.fileStore.GetMeta(r, -1)
require.NoError(t, err, "problem getting repo metadata for %s", r)
if role == r {
require.False(t, bytes.Equal(expected, actual),
@ -223,7 +223,7 @@ func TestUpdateReplacesCorruptOrMissingMetadata(t *testing.T) {
_, err := repo.Update(forWrite)
require.NoError(t, err)
for r, expected := range serverMeta {
actual, err := repo.fileStore.GetMeta(r, maxSize)
actual, err := repo.fileStore.GetMeta(r, -1)
require.NoError(t, err, "problem getting repo metadata for %s", role)
require.True(t, bytes.Equal(expected, actual),
"%s for %s: expected to recover after update", text, role)
@ -270,7 +270,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) {
for text, messItUp := range waysToMessUpLocalMetadata(repoSwizzler) {
for _, forWrite := range []bool{true, false} {
require.NoError(t, messItUp(data.CanonicalRootRole), "could not fuzz root (%s)", text)
messedUpMeta, err := repo.fileStore.GetMeta(data.CanonicalRootRole, maxSize)
messedUpMeta, err := repo.fileStore.GetMeta(data.CanonicalRootRole, -1)
if _, ok := err.(store.ErrMetaNotFound); ok { // one of the ways to mess up is to delete metadata
@ -289,7 +289,7 @@ func TestUpdateFailsIfServerRootKeyChangedWithoutMultiSign(t *testing.T) {
// same because it has failed to update.
for role, expected := range origMeta {
if role != data.CanonicalTimestampRole && role != data.CanonicalSnapshotRole {
actual, err := repo.fileStore.GetMeta(role, maxSize)
actual, err := repo.fileStore.GetMeta(role, -1)
require.NoError(t, err, "problem getting repo metadata for %s", role)
if role == data.CanonicalRootRole {

View File

@ -2,6 +2,10 @@ package notary
// application wide constants
const (
// MaxDownloadSize is the maximum size we'll download for metadata if no limit is given
MaxDownloadSize int64 = 100 << 20
// MaxTimestampSize is the maximum size of timestamp metadata - 1MiB.
MaxTimestampSize int64 = 1 << 20
// MinRSABitSize is the minimum bit size for RSA keys allowed in notary
MinRSABitSize = 2048
// MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold

View File

@ -11,6 +11,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
tuf "github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/keys"
@ -19,8 +20,6 @@ import (
"github.com/docker/notary/tuf/utils"
)
const maxSize int64 = 5 << 20
// Client is a usability wrapper around a raw TUF repo
type Client struct {
local *tuf.Repo
@ -131,7 +130,9 @@ func (c Client) checkRoot() error {
func (c *Client) downloadRoot() error {
logrus.Debug("Downloading Root...")
role := data.CanonicalRootRole
size := maxSize
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
var size int64 = -1
var expectedSha256 []byte
if c.local.Snapshot != nil {
size = c.local.Snapshot.Signed.Meta[role].Length
@ -252,7 +253,7 @@ func (c *Client) downloadTimestamp() error {
old *data.Signed
version = 0
)
cachedTS, err := c.cache.GetMeta(role, maxSize)
cachedTS, err := c.cache.GetMeta(role, notary.MaxTimestampSize)
if err == nil {
cached := &data.Signed{}
err := json.Unmarshal(cachedTS, cached)
@ -266,7 +267,7 @@ func (c *Client) downloadTimestamp() error {
}
// unlike root, targets and snapshot, always try and download timestamps
// from remote, only using the cache one if we couldn't reach remote.
raw, s, err := c.downloadSigned(role, maxSize, nil)
raw, s, err := c.downloadSigned(role, notary.MaxTimestampSize, nil)
if err != nil || len(raw) == 0 {
if old == nil {
if err == nil {

View File

@ -3,6 +3,7 @@ package client
import (
"crypto/sha256"
"encoding/json"
"fmt"
"strconv"
"testing"
"time"
@ -332,6 +333,51 @@ func TestDownloadTargetsHappy(t *testing.T) {
assert.NoError(t, err)
}
// TestDownloadTargetsLarge: Check that we can download very large targets metadata files,
// which may be caused by adding a large number of targets.
// This test is slow, so it will not run in short mode.
func TestDownloadTargetsLarge(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
assert.NoError(t, err)
localStorage := store.NewMemoryStore(nil, nil)
remoteStorage := store.NewMemoryStore(nil, nil)
client := NewClient(repo, remoteStorage, kdb, localStorage)
hash := sha256.Sum256([]byte{})
f := data.FileMeta{
Length: 1,
Hashes: map[string][]byte{
"sha256": hash[:],
},
}
// Add a ton of target files to the targets role to make this targets metadata huge
// 75,000 targets results in > 5MB (~6.5MB on recent runs)
for i := 0; i < 75000; i++ {
_, err = repo.AddTargets(data.CanonicalTargetsRole, data.Files{strconv.Itoa(i): f})
assert.NoError(t, err)
}
signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets"))
assert.NoError(t, err)
orig, err := json.Marshal(signedOrig)
assert.NoError(t, err)
err = remoteStorage.SetMeta("targets", orig)
assert.NoError(t, err)
// call repo.SignSnapshot to update the targets role in the snapshot
repo.SignSnapshot(data.DefaultExpires("snapshot"))
// Clear the cache to force an online download
client.cache.RemoveAll()
err = client.downloadTargets("targets")
assert.NoError(t, err)
}
func TestDownloadTargetsDeepHappy(t *testing.T) {
kdb, repo, cs, err := testutils.EmptyRepo("docker.com/notary")
assert.NoError(t, err)
@ -606,6 +652,49 @@ func TestDownloadSnapshotHappy(t *testing.T) {
assert.NoError(t, err)
}
// TestDownloadSnapshotLarge: Check that we can download very large snapshot metadata files,
// which may be caused by adding a large number of delegations.
// This test is slow, so it will not run in short mode.
func TestDownloadSnapshotLarge(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kdb, repo, _, err := testutils.EmptyRepo("docker.com/notary")
assert.NoError(t, err)
localStorage := store.NewMemoryStore(nil, nil)
remoteStorage := store.NewMemoryStore(nil, nil)
client := NewClient(repo, remoteStorage, kdb, localStorage)
// Add a ton of empty delegation roles to targets to make snapshot data huge
// This can also be done by adding legitimate delegations but it will be much slower
// 75,000 delegation roles results in > 5MB (~7.3MB on recent runs)
for i := 0; i < 75000; i++ {
newRole := &data.SignedTargets{}
repo.Targets[fmt.Sprintf("targets/%d", i)] = newRole
}
// create and "upload" sample snapshot and timestamp
signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot"))
assert.NoError(t, err)
orig, err := json.Marshal(signedOrig)
assert.NoError(t, err)
err = remoteStorage.SetMeta("snapshot", orig)
assert.NoError(t, err)
signedOrig, err = repo.SignTimestamp(data.DefaultExpires("timestamp"))
assert.NoError(t, err)
orig, err = json.Marshal(signedOrig)
assert.NoError(t, err)
err = remoteStorage.SetMeta("timestamp", orig)
assert.NoError(t, err)
// Clear the cache to force an online download
client.cache.RemoveAll()
err = client.downloadSnapshot()
assert.NoError(t, err)
}
// TestDownloadSnapshotNoChecksum: It should never be valid to download a
// snapshot if we don't have a checksum
func TestDownloadSnapshotNoTimestamp(t *testing.T) {

View File

@ -2,6 +2,7 @@ package store
import (
"fmt"
"github.com/docker/notary"
"io/ioutil"
"os"
"path"
@ -44,7 +45,8 @@ func (f *FilesystemStore) getPath(name string) string {
return filepath.Join(f.metaDir, fileName)
}
// GetMeta returns the meta for the given name (a role)
// GetMeta returns the meta for the given name (a role) up to size bytes
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
meta, err := ioutil.ReadFile(f.getPath(name))
if err != nil {
@ -53,7 +55,14 @@ func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
}
return nil, err
}
return meta, nil
if size == -1 {
size = notary.MaxDownloadSize
}
// Only return up to size bytes
if int64(len(meta)) < size {
return meta, nil
}
return meta[:size], nil
}
// SetMultiMeta sets the metadata for multiple roles in one operation

View File

@ -89,6 +89,18 @@ func TestGetMeta(t *testing.T) {
assert.Nil(t, err, "GetMeta returned unexpected error: %v", err)
assert.Equal(t, testContent, content, "Content read from file was corrupted.")
// Check that -1 size reads everything
content, err = s.GetMeta("testMeta", int64(-1))
assert.Nil(t, err, "GetMeta returned unexpected error: %v", err)
assert.Equal(t, testContent, content, "Content read from file was corrupted.")
// Check that we return only up to size bytes
content, err = s.GetMeta("testMeta", 4)
assert.Nil(t, err, "GetMeta returned unexpected error: %v", err)
assert.Equal(t, []byte("test"), content, "Content read from file was corrupted.")
}
func TestGetSetMetadata(t *testing.T) {

View File

@ -23,6 +23,7 @@ import (
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
"github.com/docker/notary/tuf/validation"
)
@ -140,6 +141,7 @@ func translateStatusToError(resp *http.Response, resource string) error {
// GetMeta downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
@ -158,6 +160,9 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, err
}
if size == -1 {
size = notary.MaxDownloadSize
}
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
}

View File

@ -80,6 +80,56 @@ func TestHTTPStoreGetMeta(t *testing.T) {
}
// Test that passing -1 to httpstore's GetMeta will return all content
func TestHTTPStoreGetAllMeta(t *testing.T) {
handler := func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(testRoot))
}
server := httptest.NewServer(http.HandlerFunc(handler))
defer server.Close()
store, err := NewHTTPStore(
server.URL,
"metadata",
"txt",
"targets",
"key",
&http.Transport{},
)
if err != nil {
t.Fatal(err)
}
j, err := store.GetMeta("root", -1)
if err != nil {
t.Fatal(err)
}
p := &data.Signed{}
err = json.Unmarshal(j, p)
if err != nil {
t.Fatal(err)
}
rootKey, err := base64.StdEncoding.DecodeString(testRootKey)
assert.NoError(t, err)
k := data.NewPublicKey("ecdsa-x509", rootKey)
sigBytes := p.Signatures[0].Signature
if err != nil {
t.Fatal(err)
}
var decoded map[string]interface{}
if err := json.Unmarshal(p.Signed, &decoded); err != nil {
t.Fatal(err)
}
msg, err := json.MarshalCanonical(decoded)
if err != nil {
t.Fatal(err)
}
method := p.Signatures[0].Method
err = signed.Verifiers[method].Verify(k, sigBytes, msg)
if err != nil {
t.Fatal(err)
}
}
func TestSetMultiMeta(t *testing.T) {
metas := map[string][]byte{
"root": []byte("root data"),

View File

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"github.com/docker/notary"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
@ -31,9 +32,13 @@ type memoryStore struct {
keys map[string][]data.PrivateKey
}
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) {
d, ok := m.meta[name]
if ok {
if size == -1 {
size = notary.MaxDownloadSize
}
if int64(len(d)) < size {
return d, nil
}

View File

@ -21,6 +21,10 @@ func TestMemoryStore(t *testing.T) {
require.NoError(t, err)
require.Equal(t, metaContent, meta)
meta, err = s.GetMeta("exists", -1)
require.NoError(t, err)
require.Equal(t, metaContent, meta)
err = s.RemoveAll()
require.NoError(t, err)

View File

@ -14,10 +14,6 @@ import (
"github.com/docker/notary/tuf/store"
)
const (
maxSize = 5 << 20
)
// ErrNoKeyForRole returns an error when the cryptoservice provided to
// MetadataSwizzler has no key for a particular role
type ErrNoKeyForRole struct {
@ -87,7 +83,7 @@ func serializeMetadata(cs signed.CryptoService, s *data.Signed, role string,
// gets a Signed from the metadata store
func signedFromStore(cache store.MetadataStore, role string) (*data.Signed, error) {
b, err := cache.GetMeta(role, maxSize)
b, err := cache.GetMeta(role, -1)
if err != nil {
return nil, err
}
@ -120,7 +116,7 @@ func NewMetadataSwizzler(gun string, initialMetadata map[string][]byte,
// SetInvalidJSON corrupts metadata into something that is no longer valid JSON
func (m *MetadataSwizzler) SetInvalidJSON(role string) error {
metaBytes, err := m.MetadataCache.GetMeta(role, maxSize)
metaBytes, err := m.MetadataCache.GetMeta(role, -1)
if err != nil {
return err
}
@ -327,7 +323,7 @@ func (m *MetadataSwizzler) SetThreshold(role string, newThreshold int) error {
roleSpecifier = path.Dir(role)
}
b, err := m.MetadataCache.GetMeta(roleSpecifier, maxSize)
b, err := m.MetadataCache.GetMeta(roleSpecifier, -1)
if err != nil {
return err
}
@ -377,7 +373,7 @@ func (m *MetadataSwizzler) ChangeRootKey() error {
return err
}
b, err := m.MetadataCache.GetMeta(data.CanonicalRootRole, maxSize)
b, err := m.MetadataCache.GetMeta(data.CanonicalRootRole, -1)
if err != nil {
return err
}
@ -410,7 +406,7 @@ func (m *MetadataSwizzler) UpdateSnapshotHashes(roles ...string) error {
snapshotSigned *data.Signed
err error
)
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize); err != nil {
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1); err != nil {
return err
}
@ -426,7 +422,7 @@ func (m *MetadataSwizzler) UpdateSnapshotHashes(roles ...string) error {
for _, role := range roles {
if role != data.CanonicalSnapshotRole && role != data.CanonicalTimestampRole {
if metaBytes, err = m.MetadataCache.GetMeta(role, maxSize); err != nil {
if metaBytes, err = m.MetadataCache.GetMeta(role, -1); err != nil {
return err
}
@ -458,7 +454,7 @@ func (m *MetadataSwizzler) UpdateTimestampHash() error {
timestampSigned *data.Signed
err error
)
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalTimestampRole, maxSize); err != nil {
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1); err != nil {
return err
}
// we can't just create a new timestamp, because then the expiry would be
@ -467,7 +463,7 @@ func (m *MetadataSwizzler) UpdateTimestampHash() error {
return err
}
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize); err != nil {
if metaBytes, err = m.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1); err != nil {
return err
}

View File

@ -81,7 +81,7 @@ func TestSwizzlerSetInvalidJSON(t *testing.T) {
f.SetInvalidJSON(data.CanonicalSnapshotRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalSnapshotRole {
@ -103,7 +103,7 @@ func TestSwizzlerSetInvalidSigned(t *testing.T) {
f.SetInvalidSigned(data.CanonicalTargetsRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalTargetsRole {
@ -127,7 +127,7 @@ func TestSwizzlerSetInvalidSignedMeta(t *testing.T) {
f.SetInvalidSignedMeta(data.CanonicalTargetsRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalTargetsRole {
@ -151,7 +151,7 @@ func TestSwizzlerSetInvalidMetadataType(t *testing.T) {
f.SetInvalidMetadataType(data.CanonicalTargetsRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalTargetsRole {
@ -174,7 +174,7 @@ func TestSwizzlerInvalidateMetadataSignatures(t *testing.T) {
f.InvalidateMetadataSignatures(data.CanonicalRootRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalRootRole {
@ -205,7 +205,7 @@ func TestSwizzlerRemoveMetadata(t *testing.T) {
f.RemoveMetadata("targets/a")
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
if role != "targets/a" {
require.NoError(t, err)
require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role)
@ -223,7 +223,7 @@ func TestSwizzlerSignMetadataWithInvalidKey(t *testing.T) {
f.SignMetadataWithInvalidKey(data.CanonicalTimestampRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalTimestampRole {
@ -250,7 +250,7 @@ func TestSwizzlerOffsetMetadataVersion(t *testing.T) {
f.OffsetMetadataVersion("targets/a", -2)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != "targets/a" {
@ -273,7 +273,7 @@ func TestSwizzlerExpireMetadata(t *testing.T) {
f.ExpireMetadata(data.CanonicalRootRole)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
if role != data.CanonicalRootRole {
@ -297,7 +297,7 @@ func TestSwizzlerSetThresholdBaseRole(t *testing.T) {
f.SetThreshold(data.CanonicalTargetsRole, 3)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
// the threshold for base roles is set in root
@ -325,7 +325,7 @@ func TestSwizzlerSetThresholdDelegatedRole(t *testing.T) {
f.SetThreshold("targets/a/b", 3)
for role, metaBytes := range origMeta {
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
// the threshold for "targets/a/b" is in "targets/a"
@ -357,7 +357,7 @@ func TestSwizzlerChangeRootKey(t *testing.T) {
for _, role := range roles {
origMeta := origMeta[role]
newMeta, err := f.MetadataCache.GetMeta(role, maxSize)
newMeta, err := f.MetadataCache.GetMeta(role, -1)
require.NoError(t, err)
// the threshold for base roles is set in root
@ -400,7 +400,7 @@ func TestSwizzlerUpdateSnapshotHashesSpecifiedRoles(t *testing.T) {
// nothing has changed, signed data should be the same (signatures might
// change because signatures may have random elements
f.UpdateSnapshotHashes(data.CanonicalTargetsRole)
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize)
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1)
origSigned, newSigned := &data.Signed{}, &data.Signed{}
require.NoError(t, json.Unmarshal(origMeta[data.CanonicalSnapshotRole], origSigned))
@ -414,7 +414,7 @@ func TestSwizzlerUpdateSnapshotHashesSpecifiedRoles(t *testing.T) {
// update the snapshot with just 1 role
f.UpdateSnapshotHashes(data.CanonicalTargetsRole)
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize)
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1)
require.NoError(t, err)
require.False(t, bytes.Equal(origMeta[data.CanonicalSnapshotRole], newMeta))
@ -444,7 +444,7 @@ func TestSwizzlerUpdateSnapshotHashesNoSpecifiedRoles(t *testing.T) {
// nothing has changed, signed data should be the same (signatures might
// change because signatures may have random elements
f.UpdateSnapshotHashes()
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize)
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1)
require.NoError(t, err)
origSigned, newSigned := &data.Signed{}, &data.Signed{}
@ -459,7 +459,7 @@ func TestSwizzlerUpdateSnapshotHashesNoSpecifiedRoles(t *testing.T) {
// update the snapshot with just no specified roles
f.UpdateSnapshotHashes()
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, maxSize)
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalSnapshotRole, -1)
require.NoError(t, err)
require.False(t, bytes.Equal(origMeta[data.CanonicalSnapshotRole], newMeta))
@ -490,7 +490,7 @@ func TestSwizzlerUpdateTimestamp(t *testing.T) {
// nothing has changed, signed data should be the same (signatures might
// change because signatures may have random elements
f.UpdateTimestampHash()
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalTimestampRole, maxSize)
newMeta, err := f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1)
require.NoError(t, err)
origSigned, newSigned := &data.Signed{}, &data.Signed{}
@ -503,7 +503,7 @@ func TestSwizzlerUpdateTimestamp(t *testing.T) {
// update the timestamp
f.UpdateTimestampHash()
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalTimestampRole, maxSize)
newMeta, err = f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1)
require.NoError(t, err)
require.False(t, bytes.Equal(origMeta[data.CanonicalTimestampRole], newMeta))