mirror of https://github.com/docker/docs.git
Merge pull request #580 from docker/builder
Bottleneck TUF Repo validation by using a builder to create one in the client and server
This commit is contained in:
commit
f5b4b5f810
168
client/client.go
168
client/client.go
|
@ -381,8 +381,7 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro
|
||||||
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
||||||
// we explicitly reach it in our iteration of the provided list of roles.
|
// we explicitly reach it in our iteration of the provided list of roles.
|
||||||
func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
|
func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
|
||||||
err := r.Update(false)
|
if err := r.Update(false); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -643,50 +642,33 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
|
||||||
// a not yet published repo or a possibly obsolete local copy) into
|
// a not yet published repo or a possibly obsolete local copy) into
|
||||||
// r.tufRepo. This attempts to load metadata for all roles. Since server
|
// r.tufRepo. This attempts to load metadata for all roles. Since server
|
||||||
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
|
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
|
||||||
// This can also be unified with some cache reading tools from tuf/client.
|
|
||||||
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
|
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
|
||||||
func (r *NotaryRepository) bootstrapRepo() error {
|
func (r *NotaryRepository) bootstrapRepo() error {
|
||||||
tufRepo := tuf.NewRepo(r.CryptoService)
|
b := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
|
||||||
|
|
||||||
logrus.Debugf("Loading trusted collection.")
|
logrus.Debugf("Loading trusted collection.")
|
||||||
rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
root := &data.SignedRoot{}
|
|
||||||
err = json.Unmarshal(rootJSON, root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = tufRepo.SetRoot(root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
targetsJSON, err := r.fileStore.GetMeta(data.CanonicalTargetsRole, -1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
targets := &data.SignedTargets{}
|
|
||||||
err = json.Unmarshal(targetsJSON, targets)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tufRepo.SetTargets(data.CanonicalTargetsRole, targets)
|
|
||||||
|
|
||||||
snapshotJSON, err := r.fileStore.GetMeta(data.CanonicalSnapshotRole, -1)
|
for _, role := range data.BaseRoles {
|
||||||
if err == nil {
|
jsonBytes, err := r.fileStore.GetMeta(role, -1)
|
||||||
snapshot := &data.SignedSnapshot{}
|
|
||||||
err = json.Unmarshal(snapshotJSON, snapshot)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if _, ok := err.(store.ErrMetaNotFound); ok &&
|
||||||
|
// server snapshots are supported, and server timestamp management
|
||||||
|
// is required, so if either of these fail to load that's ok - especially
|
||||||
|
// if the repo is new
|
||||||
|
role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := b.Load(role, jsonBytes, 1, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tufRepo.SetSnapshot(snapshot)
|
|
||||||
} else if _, ok := err.(store.ErrMetaNotFound); !ok {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r.tufRepo = tufRepo
|
tufRepo, err := b.Finish()
|
||||||
|
if err == nil {
|
||||||
|
r.tufRepo = tufRepo
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -756,15 +738,17 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.Update(); err != nil {
|
repo, err := c.Update()
|
||||||
|
if err != nil {
|
||||||
// notFound.Resource may include a checksum so when the role is root,
|
// notFound.Resource may include a checksum so when the role is root,
|
||||||
// it will be root.json or root.<checksum>.json. Therefore best we can
|
// it will be root or root.<checksum>. Therefore best we can
|
||||||
// do it match a "root." prefix
|
// do it match a "root." prefix
|
||||||
if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") {
|
if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") {
|
||||||
return r.errRepositoryNotExist()
|
return r.errRepositoryNotExist()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
r.tufRepo = repo
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -774,7 +758,7 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
||||||
// is initialized or not. If set to true, we will always attempt to download
|
// is initialized or not. If set to true, we will always attempt to download
|
||||||
// and return an error if the remote repository errors.
|
// and return an error if the remote repository errors.
|
||||||
//
|
//
|
||||||
// Partially populates r.tufRepo with this root metadata (only; use
|
// Populates a tuf.RepoBuilder with this root metadata (only use
|
||||||
// tufclient.Client.Update to load the rest).
|
// tufclient.Client.Update to load the rest).
|
||||||
//
|
//
|
||||||
// Fails if the remote server is reachable and does not know the repo
|
// Fails if the remote server is reachable and does not know the repo
|
||||||
|
@ -785,26 +769,42 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
||||||
// Returns a tufclient.Client for the remote server, which may not be actually
|
// Returns a tufclient.Client for the remote server, which may not be actually
|
||||||
// operational (if the URL is invalid but a root.json is cached).
|
// operational (if the URL is invalid but a root.json is cached).
|
||||||
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
|
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
|
||||||
var (
|
minVersion := 1
|
||||||
rootJSON []byte
|
// the old root on disk should not be validated against any trust pinning configuration
|
||||||
err error
|
// because if we have an old root, it itself is the thing that pins trust
|
||||||
signedRoot *data.SignedRoot
|
oldBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
|
||||||
)
|
|
||||||
// try to read root from cache first. We will trust this root
|
|
||||||
// until we detect a problem during update which will cause
|
|
||||||
// us to download a new root and perform a rotation.
|
|
||||||
rootJSON, cachedRootErr := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
|
|
||||||
|
|
||||||
if cachedRootErr == nil {
|
// by default, we want to use the trust pinning configuration on any new root that we download
|
||||||
signedRoot, cachedRootErr = r.validateRoot(rootJSON, false)
|
newBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
|
||||||
|
|
||||||
|
// Try to read root from cache first. We will trust this root until we detect a problem
|
||||||
|
// during update which will cause us to download a new root and perform a rotation.
|
||||||
|
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
|
||||||
|
// preloaded with the old root or one which uses the old root for trust bootstrapping.
|
||||||
|
if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, -1); err == nil {
|
||||||
|
// if we can't load the cached root, fail hard because that is how we pin trust
|
||||||
|
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// again, the root on disk is the source of trust pinning, so use an empty trust
|
||||||
|
// pinning configuration
|
||||||
|
newBuilder = tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
|
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
|
||||||
|
// Ok, the old root is expired - we want to download a new one. But we want to use the
|
||||||
|
// old root to verify the new root, so bootstrap a new builder with the old builder
|
||||||
|
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
|
||||||
|
newBuilder = oldBuilder.BootstrapNewBuilder()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
|
remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
|
||||||
if remoteErr != nil {
|
if remoteErr != nil {
|
||||||
logrus.Error(remoteErr)
|
logrus.Error(remoteErr)
|
||||||
} else if cachedRootErr != nil || checkInitialized {
|
} else if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized {
|
||||||
// remoteErr was nil and we had a cachedRootErr (or are specifically
|
// remoteErr was nil and we were not able to load a root from cache or
|
||||||
// checking for initialization of the repo).
|
// are specifically checking for initialization of the repo.
|
||||||
|
|
||||||
// if remote store successfully set up, try and get root from remote
|
// if remote store successfully set up, try and get root from remote
|
||||||
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
|
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
|
||||||
|
@ -814,11 +814,10 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
||||||
// the server. Nothing we can do but error.
|
// the server. Nothing we can do but error.
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if cachedRootErr != nil {
|
|
||||||
// we always want to use the downloaded root if there was a cache
|
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||||
// error.
|
// we always want to use the downloaded root if we couldn't load from cache
|
||||||
signedRoot, err = r.validateRoot(tmpJSON, true)
|
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -830,58 +829,13 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.tufRepo = tuf.NewRepo(r.CryptoService)
|
// We can only get here if remoteErr != nil (hence we don't download any new root),
|
||||||
|
// and there was no root on disk
|
||||||
if signedRoot == nil {
|
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||||
return nil, ErrRepoNotInitialized{}
|
return nil, ErrRepoNotInitialized{}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.tufRepo.SetRoot(signedRoot)
|
return tufclient.NewClient(oldBuilder, newBuilder, remote, r.fileStore), nil
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tufclient.NewClient(
|
|
||||||
r.tufRepo,
|
|
||||||
remote,
|
|
||||||
r.fileStore,
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateRoot MUST only be used during bootstrapping. It will only validate
|
|
||||||
// signatures of the root based on known keys, not expiry or other metadata.
|
|
||||||
// This is so that an out of date root can be loaded to be used in a rotation
|
|
||||||
// should the TUF update process detect a problem.
|
|
||||||
func (r *NotaryRepository) validateRoot(rootJSON []byte, fromRemote bool) (*data.SignedRoot, error) {
|
|
||||||
// can't just unmarshal into SignedRoot because validate root
|
|
||||||
// needs the root.Signed field to still be []byte for signature
|
|
||||||
// validation
|
|
||||||
root := &data.Signed{}
|
|
||||||
err := json.Unmarshal(rootJSON, root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're downloading a root from a remote source, attempt to load a local root
|
|
||||||
// to ensure that we consider old roots when validating this new one
|
|
||||||
var prevRoot *data.SignedRoot
|
|
||||||
if fromRemote {
|
|
||||||
prevRootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
|
|
||||||
// A previous root exists, so we attempt to use it
|
|
||||||
// If for some reason we can't extract it (ex: it's corrupted), we should error client-side to be conservative
|
|
||||||
if err == nil {
|
|
||||||
prevSignedRoot := &data.Signed{}
|
|
||||||
err = json.Unmarshal(prevRootJSON, prevSignedRoot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, &trustpinning.ErrValidationFail{fmt.Sprintf("unable to unmarshal previously trusted root from disk: %v", err)}
|
|
||||||
}
|
|
||||||
prevRoot, err = data.RootFromSigned(prevSignedRoot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, &trustpinning.ErrValidationFail{fmt.Sprintf("error loading previously trusted root into valid role format: %v", err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return trustpinning.ValidateRoot(prevRoot, root, r.gun, r.trustPinning)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RotateKey removes all existing keys associated with the role, and either
|
// RotateKey removes all existing keys associated with the role, and either
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
regJson "encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
@ -1936,11 +1935,7 @@ func testPublishBadMetadata(t *testing.T, roleName string, repo *NotaryRepositor
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
if roleName == data.CanonicalRootRole && publishFirst {
|
require.IsType(t, &json.SyntaxError{}, err)
|
||||||
require.IsType(t, &trustpinning.ErrValidationFail{}, err)
|
|
||||||
} else {
|
|
||||||
require.IsType(t, ®Json.SyntaxError{}, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// make an unreadable file by creating a directory instead of a file
|
// make an unreadable file by creating a directory instead of a file
|
||||||
|
|
|
@ -2,7 +2,6 @@ package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -15,6 +14,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/go/canonical/json"
|
||||||
"github.com/docker/notary"
|
"github.com/docker/notary"
|
||||||
"github.com/docker/notary/passphrase"
|
"github.com/docker/notary/passphrase"
|
||||||
"github.com/docker/notary/trustpinning"
|
"github.com/docker/notary/trustpinning"
|
||||||
|
@ -108,13 +108,7 @@ func TestUpdateSucceedsEvenIfCannotWriteNewRepo(t *testing.T) {
|
||||||
repo := newBlankRepo(t, ts.URL)
|
repo := newBlankRepo(t, ts.URL)
|
||||||
repo.fileStore = &unwritableStore{MetadataStore: repo.fileStore, roleToNotWrite: role}
|
repo.fileStore = &unwritableStore{MetadataStore: repo.fileStore, roleToNotWrite: role}
|
||||||
err := repo.Update(false)
|
err := repo.Update(false)
|
||||||
|
require.NoError(t, err)
|
||||||
if role == data.CanonicalRootRole {
|
|
||||||
require.Error(t, err) // because checkRoot loads root from cache to check hashes
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for r, expected := range serverMeta {
|
for r, expected := range serverMeta {
|
||||||
actual, err := repo.fileStore.GetMeta(r, -1)
|
actual, err := repo.fileStore.GetMeta(r, -1)
|
||||||
|
@ -161,10 +155,6 @@ func TestUpdateSucceedsEvenIfCannotWriteExistingRepo(t *testing.T) {
|
||||||
repo.fileStore = &unwritableStore{MetadataStore: origFileStore, roleToNotWrite: role}
|
repo.fileStore = &unwritableStore{MetadataStore: origFileStore, roleToNotWrite: role}
|
||||||
err := repo.Update(forWrite)
|
err := repo.Update(forWrite)
|
||||||
|
|
||||||
if role == data.CanonicalRootRole {
|
|
||||||
require.Error(t, err) // because checkRoot loads root from cache to check hashes
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for r, expected := range serverMeta {
|
for r, expected := range serverMeta {
|
||||||
|
@ -189,19 +179,26 @@ type swizzleExpectations struct {
|
||||||
expectErrs []interface{}
|
expectErrs []interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the errors here are only relevant for root - we bail if the root is corrupt, but
|
||||||
|
// other metadata will be replaced
|
||||||
var waysToMessUpLocalMetadata = []swizzleExpectations{
|
var waysToMessUpLocalMetadata = []swizzleExpectations{
|
||||||
// for instance if the metadata got truncated or otherwise block corrupted
|
// for instance if the metadata got truncated or otherwise block corrupted
|
||||||
{desc: "invalid JSON", swizzle: (*testutils.MetadataSwizzler).SetInvalidJSON},
|
{desc: "invalid JSON", swizzle: (*testutils.MetadataSwizzler).SetInvalidJSON,
|
||||||
|
expectErrs: []interface{}{&json.SyntaxError{}}},
|
||||||
// if the metadata was accidentally deleted
|
// if the metadata was accidentally deleted
|
||||||
{desc: "missing metadata", swizzle: (*testutils.MetadataSwizzler).RemoveMetadata},
|
{desc: "missing metadata", swizzle: (*testutils.MetadataSwizzler).RemoveMetadata,
|
||||||
|
expectErrs: []interface{}{store.ErrMetaNotFound{}, ErrRepoNotInitialized{}, ErrRepositoryNotExist{}}},
|
||||||
// if the signature was invalid - maybe the user tried to modify something manually
|
// if the signature was invalid - maybe the user tried to modify something manually
|
||||||
// that they forgot (add a key, or something)
|
// that they forgot (add a key, or something)
|
||||||
{desc: "signed with right key but wrong hash",
|
{desc: "signed with right key but wrong hash",
|
||||||
swizzle: (*testutils.MetadataSwizzler).InvalidateMetadataSignatures},
|
swizzle: (*testutils.MetadataSwizzler).InvalidateMetadataSignatures,
|
||||||
|
expectErrs: []interface{}{&trustpinning.ErrValidationFail{}, signed.ErrRoleThreshold{}}},
|
||||||
// if the user copied the wrong root.json over it by accident or something
|
// if the user copied the wrong root.json over it by accident or something
|
||||||
{desc: "signed with wrong key", swizzle: (*testutils.MetadataSwizzler).SignMetadataWithInvalidKey},
|
{desc: "signed with wrong key", swizzle: (*testutils.MetadataSwizzler).SignMetadataWithInvalidKey,
|
||||||
|
expectErrs: []interface{}{&trustpinning.ErrValidationFail{}, signed.ErrRoleThreshold{}}},
|
||||||
// self explanatory
|
// self explanatory
|
||||||
{desc: "expired metadata", swizzle: (*testutils.MetadataSwizzler).ExpireMetadata},
|
{desc: "expired metadata", swizzle: (*testutils.MetadataSwizzler).ExpireMetadata,
|
||||||
|
expectErrs: []interface{}{signed.ErrExpired{}}},
|
||||||
|
|
||||||
// Not trying any of the other repoSwizzler methods, because those involve modifying
|
// Not trying any of the other repoSwizzler methods, because those involve modifying
|
||||||
// and re-serializing, and that means a user has the root and other keys and was trying to
|
// and re-serializing, and that means a user has the root and other keys and was trying to
|
||||||
|
@ -239,9 +236,12 @@ func TestUpdateReplacesCorruptOrMissingMetadata(t *testing.T) {
|
||||||
for _, forWrite := range []bool{true, false} {
|
for _, forWrite := range []bool{true, false} {
|
||||||
require.NoError(t, messItUp(repoSwizzler, role), "could not fuzz %s (%s)", role, text)
|
require.NoError(t, messItUp(repoSwizzler, role), "could not fuzz %s (%s)", role, text)
|
||||||
err := repo.Update(forWrite)
|
err := repo.Update(forWrite)
|
||||||
// if this is a root role, we should error if it's corrupted data
|
// If this is a root role, we should error if it's corrupted or invalid data;
|
||||||
if role == data.CanonicalRootRole && expt.desc == "invalid JSON" {
|
// missing metadata is ok.
|
||||||
require.Error(t, err)
|
if role == data.CanonicalRootRole && expt.desc != "missing metadata" &&
|
||||||
|
expt.desc != "expired metadata" {
|
||||||
|
|
||||||
|
require.Error(t, err, "%s for %s: expected to error when bootstrapping root", text, role)
|
||||||
// revert our original metadata
|
// revert our original metadata
|
||||||
for role := range origMeta {
|
for role := range origMeta {
|
||||||
require.NoError(t, repo.fileStore.SetMeta(role, origMeta[role]))
|
require.NoError(t, repo.fileStore.SetMeta(role, origMeta[role]))
|
||||||
|
@ -774,9 +774,9 @@ func testUpdateRemoteFileChecksumWrong(t *testing.T, opts updateOpts, errExpecte
|
||||||
_, isErrMaliciousServer := err.(store.ErrMaliciousServer)
|
_, isErrMaliciousServer := err.(store.ErrMaliciousServer)
|
||||||
rightError = isErrChecksum || isErrMaliciousServer
|
rightError = isErrChecksum || isErrMaliciousServer
|
||||||
}
|
}
|
||||||
require.True(t, rightError, err,
|
require.True(t, rightError,
|
||||||
"wrong update error (%v) when %s has the wrong checksum (forWrite: %v)",
|
"wrong update error (%v) when %s has the wrong checksum (forWrite: %v)",
|
||||||
err, opts.role, opts.forWrite)
|
reflect.TypeOf(err), opts.role, opts.forWrite)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1254,10 +1254,18 @@ func testUpdateLocalAndRemoteRootCorrupt(t *testing.T, forWrite bool, localExpt,
|
||||||
err = repo.Update(forWrite)
|
err = repo.Update(forWrite)
|
||||||
require.Error(t, err, "expected failure updating when %s", msg)
|
require.Error(t, err, "expected failure updating when %s", msg)
|
||||||
|
|
||||||
|
expectedErrs := serverExpt.expectErrs
|
||||||
|
// If the local root is corrupt or invalid, we won't even try to update and
|
||||||
|
// will fail with the local metadata error. Missing or expired metadata is ok.
|
||||||
|
if localExpt.desc != "missing metadata" && localExpt.desc != "expired metadata" {
|
||||||
|
expectedErrs = localExpt.expectErrs
|
||||||
|
}
|
||||||
|
|
||||||
errType := reflect.TypeOf(err)
|
errType := reflect.TypeOf(err)
|
||||||
isExpectedType := false
|
isExpectedType := false
|
||||||
var expectedTypes []string
|
var expectedTypes []string
|
||||||
for _, expectErr := range serverExpt.expectErrs {
|
|
||||||
|
for _, expectErr := range expectedErrs {
|
||||||
expectedType := reflect.TypeOf(expectErr)
|
expectedType := reflect.TypeOf(expectErr)
|
||||||
isExpectedType = isExpectedType || errType == expectedType
|
isExpectedType = isExpectedType || errType == expectedType
|
||||||
expectedTypes = append(expectedTypes, expectedType.String())
|
expectedTypes = append(expectedTypes, expectedType.String())
|
||||||
|
@ -1281,7 +1289,7 @@ func TestUpdateRemoteKeyRotated(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testUpdateRemoteKeyRotated(t *testing.T, targetsRole string) {
|
func testUpdateRemoteKeyRotated(t *testing.T, role string) {
|
||||||
_, serverSwizzler := newServerSwizzler(t)
|
_, serverSwizzler := newServerSwizzler(t)
|
||||||
ts := readOnlyServer(t, serverSwizzler.MetadataCache, http.StatusNotFound, "docker.com/notary")
|
ts := readOnlyServer(t, serverSwizzler.MetadataCache, http.StatusNotFound, "docker.com/notary")
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
@ -1294,30 +1302,38 @@ func testUpdateRemoteKeyRotated(t *testing.T, targetsRole string) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
cs := signed.NewEd25519()
|
cs := signed.NewEd25519()
|
||||||
pubKey, err := cs.Create(targetsRole, repo.gun, data.ED25519Key)
|
pubKey, err := cs.Create(role, repo.gun, data.ED25519Key)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// bump the version
|
// bump the version
|
||||||
bumpRole := path.Dir(targetsRole)
|
bumpRole := path.Dir(role)
|
||||||
if !data.IsDelegation(targetsRole) {
|
if !data.IsDelegation(role) {
|
||||||
bumpRole = data.CanonicalRootRole
|
bumpRole = data.CanonicalRootRole
|
||||||
}
|
}
|
||||||
require.NoError(t, serverSwizzler.OffsetMetadataVersion(bumpRole, 1),
|
require.NoError(t, serverSwizzler.OffsetMetadataVersion(bumpRole, 1),
|
||||||
"failed to swizzle remote %s to bump version", bumpRole)
|
"failed to swizzle remote %s to bump version", bumpRole)
|
||||||
// now change the key
|
// now change the key
|
||||||
require.NoError(t, serverSwizzler.RotateKey(targetsRole, pubKey),
|
require.NoError(t, serverSwizzler.RotateKey(role, pubKey),
|
||||||
"failed to swizzle remote %s to rotate key", targetsRole)
|
"failed to swizzle remote %s to rotate key", role)
|
||||||
|
|
||||||
// update the hashes on both snapshot and timestamp
|
// update the hashes on both snapshot and timestamp
|
||||||
require.NoError(t, serverSwizzler.UpdateSnapshotHashes())
|
require.NoError(t, serverSwizzler.UpdateSnapshotHashes())
|
||||||
require.NoError(t, serverSwizzler.UpdateTimestampHash())
|
require.NoError(t, serverSwizzler.UpdateTimestampHash())
|
||||||
|
|
||||||
msg := fmt.Sprintf("swizzling %s remotely to rotate key (forWrite: false)", targetsRole)
|
msg := fmt.Sprintf("swizzling %s remotely to rotate key (forWrite: false)", role)
|
||||||
|
|
||||||
err = repo.Update(false)
|
err = repo.Update(false)
|
||||||
require.Error(t, err, "expected failure updating when %s", msg)
|
require.Error(t, err, "expected failure updating when %s", msg)
|
||||||
require.IsType(t, signed.ErrRoleThreshold{}, err, "expected ErrRoleThreshold when %s: got %s",
|
switch role {
|
||||||
msg, reflect.TypeOf(err))
|
case data.CanonicalRootRole:
|
||||||
|
require.IsType(t, &trustpinning.ErrValidationFail{}, err,
|
||||||
|
"expected trustpinning.ErrValidationFail when %s: got %s",
|
||||||
|
msg, reflect.TypeOf(err))
|
||||||
|
default:
|
||||||
|
require.IsType(t, signed.ErrRoleThreshold{}, err,
|
||||||
|
"expected ErrRoleThreshold when %s: got %s",
|
||||||
|
msg, reflect.TypeOf(err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function that takes a signedRoot, and signs it with the provided keys and only these keys.
|
// Helper function that takes a signedRoot, and signs it with the provided keys and only these keys.
|
||||||
|
@ -1585,3 +1601,32 @@ func TestDownloadSnapshotLargeDelegationsMany(t *testing.T) {
|
||||||
// the snapshot downloaded has numSnapsnotMeta items + one for root and one for targets
|
// the snapshot downloaded has numSnapsnotMeta items + one for root and one for targets
|
||||||
require.Len(t, notaryRepo.tufRepo.Snapshot.Signed.Meta, numSnapsnotMeta+2)
|
require.Len(t, notaryRepo.tufRepo.Snapshot.Signed.Meta, numSnapsnotMeta+2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we have a root on disk, use it as the source of trust pinning rather than the trust pinning
|
||||||
|
// config
|
||||||
|
func TestRootOnDiskTrustPinning(t *testing.T) {
|
||||||
|
meta, serverSwizzler := newServerSwizzler(t)
|
||||||
|
|
||||||
|
ts := readOnlyServer(t, serverSwizzler.MetadataCache, http.StatusNotFound, "docker.com/notary")
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
restrictiveTrustPinning := trustpinning.TrustPinConfig{DisableTOFU: true}
|
||||||
|
|
||||||
|
// for sanity, ensure that without a root on disk, we can't download a new root
|
||||||
|
repo := newBlankRepo(t, ts.URL)
|
||||||
|
defer os.RemoveAll(repo.baseDir)
|
||||||
|
repo.trustPinning = restrictiveTrustPinning
|
||||||
|
|
||||||
|
err := repo.Update(false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, &trustpinning.ErrValidationFail{}, err)
|
||||||
|
|
||||||
|
// show that if we have a root on disk, we can update
|
||||||
|
repo = newBlankRepo(t, ts.URL)
|
||||||
|
defer os.RemoveAll(repo.baseDir)
|
||||||
|
repo.trustPinning = restrictiveTrustPinning
|
||||||
|
// put root on disk
|
||||||
|
require.NoError(t, repo.fileStore.SetMeta(data.CanonicalRootRole, meta[data.CanonicalRootRole]))
|
||||||
|
|
||||||
|
require.NoError(t, repo.Update(false))
|
||||||
|
}
|
||||||
|
|
|
@ -334,7 +334,7 @@ func TestAtomicUpdateValidationFailurePropagated(t *testing.T) {
|
||||||
repo, cs, err := testutils.EmptyRepo(gun)
|
repo, cs, err := testutils.EmptyRepo(gun)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
state := handlerState{store: metaStore, crypto: copyKeys(t, cs, data.CanonicalTimestampRole)}
|
state := handlerState{store: metaStore, crypto: testutils.CopyKeys(t, cs, data.CanonicalTimestampRole)}
|
||||||
|
|
||||||
r, tg, sn, ts, err := testutils.Sign(repo)
|
r, tg, sn, ts, err := testutils.Sign(repo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -376,7 +376,7 @@ func TestAtomicUpdateNonValidationFailureNotPropagated(t *testing.T) {
|
||||||
repo, cs, err := testutils.EmptyRepo(gun)
|
repo, cs, err := testutils.EmptyRepo(gun)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
state := handlerState{store: &failStore{*metaStore}, crypto: copyKeys(t, cs, data.CanonicalTimestampRole)}
|
state := handlerState{store: &failStore{*metaStore}, crypto: testutils.CopyKeys(t, cs, data.CanonicalTimestampRole)}
|
||||||
|
|
||||||
r, tg, sn, ts, err := testutils.Sign(repo)
|
r, tg, sn, ts, err := testutils.Sign(repo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -418,7 +418,7 @@ func TestAtomicUpdateVersionErrorPropagated(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
state := handlerState{
|
state := handlerState{
|
||||||
store: &invalidVersionStore{*metaStore}, crypto: copyKeys(t, cs, data.CanonicalTimestampRole)}
|
store: &invalidVersionStore{*metaStore}, crypto: testutils.CopyKeys(t, cs, data.CanonicalTimestampRole)}
|
||||||
|
|
||||||
r, tg, sn, ts, err := testutils.Sign(repo)
|
r, tg, sn, ts, err := testutils.Sign(repo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -1,19 +1,15 @@
|
||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/docker/notary/server/snapshot"
|
"github.com/docker/go/canonical/json"
|
||||||
"github.com/docker/notary/server/storage"
|
"github.com/docker/notary/server/storage"
|
||||||
"github.com/docker/notary/server/timestamp"
|
"github.com/docker/notary/trustpinning"
|
||||||
"github.com/docker/notary/tuf"
|
"github.com/docker/notary/tuf"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
@ -28,9 +24,6 @@ import (
|
||||||
// created and added if snapshotting has been delegated to the
|
// created and added if snapshotting has been delegated to the
|
||||||
// server
|
// server
|
||||||
func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
|
func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
|
||||||
repo := tuf.NewRepo(cs)
|
|
||||||
rootRole := data.CanonicalRootRole
|
|
||||||
snapshotRole := data.CanonicalSnapshotRole
|
|
||||||
|
|
||||||
// some delegated targets role may be invalid based on other updates
|
// some delegated targets role may be invalid based on other updates
|
||||||
// that have been made by other clients. We'll rebuild the slice of
|
// that have been made by other clients. We'll rebuild the slice of
|
||||||
|
@ -42,49 +35,30 @@ func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaU
|
||||||
roles[v.Role] = v
|
roles[v.Role] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
var root *data.SignedRoot
|
builder := tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
_, oldRootJSON, err := store.GetCurrent(gun, rootRole)
|
if err := loadFromStore(gun, data.CanonicalRootRole, builder, store); err != nil {
|
||||||
if _, ok := err.(storage.ErrNotFound); err != nil && !ok {
|
if _, ok := err.(storage.ErrNotFound); !ok {
|
||||||
// problem with storage. No expectation we can
|
|
||||||
// write if we can't read so bail.
|
|
||||||
logrus.Error("error reading previous root: ", err.Error())
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if rootUpdate, ok := roles[rootRole]; ok {
|
|
||||||
// if root is present, validate its integrity, possibly
|
|
||||||
// against a previous root
|
|
||||||
if root, err = validateRoot(gun, oldRootJSON, rootUpdate.Data); err != nil {
|
|
||||||
logrus.Error("ErrBadRoot: ", err.Error())
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// setting root will update keys db
|
|
||||||
if err = repo.SetRoot(root); err != nil {
|
|
||||||
logrus.Error("ErrValidation: ", err.Error())
|
|
||||||
return nil, validation.ErrValidation{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
logrus.Debug("Successfully validated root")
|
|
||||||
updatesToApply = append(updatesToApply, rootUpdate)
|
|
||||||
} else {
|
|
||||||
if oldRootJSON == nil {
|
|
||||||
return nil, validation.ErrValidation{Msg: "no pre-existing root and no root provided in update."}
|
|
||||||
}
|
|
||||||
parsedOldRoot := &data.SignedRoot{}
|
|
||||||
if err := json.Unmarshal(oldRootJSON, parsedOldRoot); err != nil {
|
|
||||||
return nil, fmt.Errorf("pre-existing root is corrupt")
|
|
||||||
}
|
|
||||||
if err = repo.SetRoot(parsedOldRoot); err != nil {
|
|
||||||
logrus.Error("ErrValidation: ", err.Error())
|
|
||||||
return nil, validation.ErrValidation{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
targetsToUpdate, err := loadAndValidateTargets(gun, repo, roles, store)
|
if rootUpdate, ok := roles[data.CanonicalRootRole]; ok {
|
||||||
|
builder = builder.BootstrapNewBuilder()
|
||||||
|
if err := builder.Load(data.CanonicalRootRole, rootUpdate.Data, 1, false); err != nil {
|
||||||
|
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debug("Successfully validated root")
|
||||||
|
updatesToApply = append(updatesToApply, rootUpdate)
|
||||||
|
} else if !builder.IsLoaded(data.CanonicalRootRole) {
|
||||||
|
return nil, validation.ErrValidation{Msg: "no pre-existing root and no root provided in update."}
|
||||||
|
}
|
||||||
|
|
||||||
|
targetsToUpdate, err := loadAndValidateTargets(gun, builder, roles, store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
updatesToApply = append(updatesToApply, targetsToUpdate...)
|
updatesToApply = append(updatesToApply, targetsToUpdate...)
|
||||||
|
|
||||||
// there's no need to load files from the database if no targets etc...
|
// there's no need to load files from the database if no targets etc...
|
||||||
// were uploaded because that means they haven't been updated and
|
// were uploaded because that means they haven't been updated and
|
||||||
// the snapshot will already contain the correct hashes and sizes for
|
// the snapshot will already contain the correct hashes and sizes for
|
||||||
|
@ -92,27 +66,12 @@ func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaU
|
||||||
logrus.Debug("Successfully validated targets")
|
logrus.Debug("Successfully validated targets")
|
||||||
|
|
||||||
// At this point, root and targets must have been loaded into the repo
|
// At this point, root and targets must have been loaded into the repo
|
||||||
if _, ok := roles[snapshotRole]; ok {
|
if snapshotUpdate, ok := roles[data.CanonicalSnapshotRole]; ok {
|
||||||
var oldSnap *data.SignedSnapshot
|
if err := builder.Load(data.CanonicalSnapshotRole, snapshotUpdate.Data, 1, false); err != nil {
|
||||||
_, oldSnapJSON, err := store.GetCurrent(gun, snapshotRole)
|
|
||||||
if _, ok := err.(storage.ErrNotFound); err != nil && !ok {
|
|
||||||
// problem with storage. No expectation we can
|
|
||||||
// write if we can't read so bail.
|
|
||||||
logrus.Error("error reading previous snapshot: ", err.Error())
|
|
||||||
return nil, err
|
|
||||||
} else if err == nil {
|
|
||||||
oldSnap = &data.SignedSnapshot{}
|
|
||||||
if err := json.Unmarshal(oldSnapJSON, oldSnap); err != nil {
|
|
||||||
oldSnap = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := loadAndValidateSnapshot(snapshotRole, oldSnap, roles[snapshotRole], roles, repo); err != nil {
|
|
||||||
logrus.Error("ErrBadSnapshot: ", err.Error())
|
|
||||||
return nil, validation.ErrBadSnapshot{Msg: err.Error()}
|
return nil, validation.ErrBadSnapshot{Msg: err.Error()}
|
||||||
}
|
}
|
||||||
logrus.Debug("Successfully validated snapshot")
|
logrus.Debug("Successfully validated snapshot")
|
||||||
updatesToApply = append(updatesToApply, roles[snapshotRole])
|
updatesToApply = append(updatesToApply, roles[data.CanonicalSnapshotRole])
|
||||||
} else {
|
} else {
|
||||||
// Check:
|
// Check:
|
||||||
// - we have a snapshot key
|
// - we have a snapshot key
|
||||||
|
@ -120,7 +79,7 @@ func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaU
|
||||||
// Then:
|
// Then:
|
||||||
// - generate a new snapshot
|
// - generate a new snapshot
|
||||||
// - add it to the updates
|
// - add it to the updates
|
||||||
update, err := generateSnapshot(gun, repo, store)
|
update, err := generateSnapshot(gun, builder, store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -128,15 +87,14 @@ func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaU
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate a timestamp immediately
|
// generate a timestamp immediately
|
||||||
update, err := generateTimestamp(gun, repo, store)
|
update, err := generateTimestamp(gun, builder, store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(updatesToApply, *update), nil
|
return append(updatesToApply, *update), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadAndValidateTargets(gun string, repo *tuf.Repo, roles map[string]storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
|
func loadAndValidateTargets(gun string, builder tuf.RepoBuilder, roles map[string]storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
|
||||||
targetsRoles := make(utils.RoleList, 0)
|
targetsRoles := make(utils.RoleList, 0)
|
||||||
for role := range roles {
|
for role := range roles {
|
||||||
if role == data.CanonicalTargetsRole || data.IsDelegation(role) {
|
if role == data.CanonicalTargetsRole || data.IsDelegation(role) {
|
||||||
|
@ -151,60 +109,42 @@ func loadAndValidateTargets(gun string, repo *tuf.Repo, roles map[string]storage
|
||||||
sort.Sort(targetsRoles)
|
sort.Sort(targetsRoles)
|
||||||
|
|
||||||
updatesToApply := make([]storage.MetaUpdate, 0, len(targetsRoles))
|
updatesToApply := make([]storage.MetaUpdate, 0, len(targetsRoles))
|
||||||
for _, role := range targetsRoles {
|
for _, roleName := range targetsRoles {
|
||||||
// don't load parent if current role is "targets",
|
// don't load parent if current role is "targets",
|
||||||
// we must load all ancestor roles for delegations to validate the full parent chain
|
// we must load all ancestor roles, starting from `targets` and working down,
|
||||||
ancestorRole := role
|
// for delegations to validate the full parent chain
|
||||||
|
var parentsToLoad []string
|
||||||
|
ancestorRole := roleName
|
||||||
for ancestorRole != data.CanonicalTargetsRole {
|
for ancestorRole != data.CanonicalTargetsRole {
|
||||||
ancestorRole = path.Dir(ancestorRole)
|
ancestorRole = path.Dir(ancestorRole)
|
||||||
if _, ok := repo.Targets[ancestorRole]; !ok {
|
if !builder.IsLoaded(ancestorRole) {
|
||||||
err := loadTargetsFromStore(gun, ancestorRole, repo, store)
|
parentsToLoad = append(parentsToLoad, ancestorRole)
|
||||||
if err != nil {
|
}
|
||||||
|
}
|
||||||
|
for i := len(parentsToLoad) - 1; i >= 0; i-- {
|
||||||
|
if err := loadFromStore(gun, parentsToLoad[i], builder, store); err != nil {
|
||||||
|
// if the parent doesn't exist, just keep going - loading the role will eventually fail
|
||||||
|
// due to it being an invalid role
|
||||||
|
if _, ok := err.(storage.ErrNotFound); !ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var (
|
|
||||||
t *data.SignedTargets
|
if err := builder.Load(roleName, roles[roleName].Data, 1, false); err != nil {
|
||||||
err error
|
|
||||||
)
|
|
||||||
if t, err = validateTargets(role, roles, repo); err != nil {
|
|
||||||
if _, ok := err.(data.ErrInvalidRole); ok {
|
|
||||||
// role wasn't found in its parent. It has been removed
|
|
||||||
// or never existed. Drop this role from the update
|
|
||||||
// (by not adding it to updatesToApply)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logrus.Error("ErrBadTargets: ", err.Error())
|
logrus.Error("ErrBadTargets: ", err.Error())
|
||||||
return nil, validation.ErrBadTargets{Msg: err.Error()}
|
return nil, validation.ErrBadTargets{Msg: err.Error()}
|
||||||
}
|
}
|
||||||
// this will load keys and roles into the kdb
|
updatesToApply = append(updatesToApply, roles[roleName])
|
||||||
err = repo.SetTargets(role, t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
updatesToApply = append(updatesToApply, roles[role])
|
|
||||||
}
|
}
|
||||||
return updatesToApply, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadTargetsFromStore(gun, role string, repo *tuf.Repo, store storage.MetaStore) error {
|
return updatesToApply, nil
|
||||||
_, tgtJSON, err := store.GetCurrent(gun, role)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t := &data.SignedTargets{}
|
|
||||||
err = json.Unmarshal(tgtJSON, t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return repo.SetTargets(role, t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateSnapshot generates a new snapshot from the previous one in the store - this assumes all
|
// generateSnapshot generates a new snapshot from the previous one in the store - this assumes all
|
||||||
// the other roles except timestamp have already been set on the repo, and will set the generated
|
// the other roles except timestamp have already been set on the repo, and will set the generated
|
||||||
// snapshot on the repo as well
|
// snapshot on the repo as well
|
||||||
func generateSnapshot(gun string, repo *tuf.Repo, store storage.MetaStore) (*storage.MetaUpdate, error) {
|
func generateSnapshot(gun string, builder tuf.RepoBuilder, store storage.MetaStore) (*storage.MetaUpdate, error) {
|
||||||
var prev *data.SignedSnapshot
|
var prev *data.SignedSnapshot
|
||||||
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalSnapshotRole)
|
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalSnapshotRole)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -219,17 +159,21 @@ func generateSnapshot(gun string, repo *tuf.Repo, store storage.MetaStore) (*sto
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metaUpdate, err := snapshot.NewSnapshotUpdate(prev, repo)
|
meta, ver, err := builder.GenerateSnapshot(prev)
|
||||||
|
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case signed.ErrInsufficientSignatures, signed.ErrNoKeys:
|
case nil:
|
||||||
|
return &storage.MetaUpdate{
|
||||||
|
Role: data.CanonicalSnapshotRole,
|
||||||
|
Version: ver,
|
||||||
|
Data: meta,
|
||||||
|
}, nil
|
||||||
|
case signed.ErrInsufficientSignatures, signed.ErrNoKeys, signed.ErrRoleThreshold:
|
||||||
// If we cannot sign the snapshot, then we don't have keys for the snapshot,
|
// If we cannot sign the snapshot, then we don't have keys for the snapshot,
|
||||||
// and the client should have submitted a snapshot
|
// and the client should have submitted a snapshot
|
||||||
return nil, validation.ErrBadHierarchy{
|
return nil, validation.ErrBadHierarchy{
|
||||||
Missing: data.CanonicalSnapshotRole,
|
Missing: data.CanonicalSnapshotRole,
|
||||||
Msg: "no snapshot was included in update and server does not hold current snapshot key for repository"}
|
Msg: "no snapshot was included in update and server does not hold current snapshot key for repository"}
|
||||||
case nil:
|
|
||||||
return metaUpdate, nil
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, validation.ErrValidation{Msg: err.Error()}
|
return nil, validation.ErrValidation{Msg: err.Error()}
|
||||||
}
|
}
|
||||||
|
@ -237,7 +181,7 @@ func generateSnapshot(gun string, repo *tuf.Repo, store storage.MetaStore) (*sto
|
||||||
|
|
||||||
// generateTimestamp generates a new timestamp from the previous one in the store - this assumes all
|
// generateTimestamp generates a new timestamp from the previous one in the store - this assumes all
|
||||||
// the other roles have already been set on the repo, and will set the generated timestamp on the repo as well
|
// the other roles have already been set on the repo, and will set the generated timestamp on the repo as well
|
||||||
func generateTimestamp(gun string, repo *tuf.Repo, store storage.MetaStore) (*storage.MetaUpdate, error) {
|
func generateTimestamp(gun string, builder tuf.RepoBuilder, store storage.MetaStore) (*storage.MetaUpdate, error) {
|
||||||
var prev *data.SignedTimestamp
|
var prev *data.SignedTimestamp
|
||||||
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalTimestampRole)
|
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalTimestampRole)
|
||||||
|
|
||||||
|
@ -254,197 +198,33 @@ func generateTimestamp(gun string, repo *tuf.Repo, store storage.MetaStore) (*st
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metaUpdate, err := timestamp.NewTimestampUpdate(prev, repo)
|
meta, ver, err := builder.GenerateTimestamp(prev)
|
||||||
|
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
return metaUpdate, nil
|
return &storage.MetaUpdate{
|
||||||
|
Role: data.CanonicalTimestampRole,
|
||||||
|
Version: ver,
|
||||||
|
Data: meta,
|
||||||
|
}, nil
|
||||||
case signed.ErrInsufficientSignatures, signed.ErrNoKeys:
|
case signed.ErrInsufficientSignatures, signed.ErrNoKeys:
|
||||||
// If we cannot sign the timestamp, then we don't have keys for the timestamp,
|
// If we cannot sign the timestamp, then we don't have keys for the timestamp,
|
||||||
// and the client screwed up their root
|
// and the client screwed up their root
|
||||||
return nil, validation.ErrBadRoot{
|
return nil, validation.ErrBadRoot{
|
||||||
Msg: fmt.Sprintf("none of the following timestamp keys exist on the server: %s",
|
Msg: fmt.Sprintf("no timestamp keys exist on the server"),
|
||||||
strings.Join(repo.Root.Signed.Roles[data.CanonicalTimestampRole].KeyIDs, ", ")),
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, validation.ErrValidation{Msg: err.Error()}
|
return nil, validation.ErrValidation{Msg: err.Error()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadAndValidateSnapshot validates that the given snapshot update is valid. It also sets the new snapshot
|
func loadFromStore(gun, roleName string, builder tuf.RepoBuilder, store storage.MetaStore) error {
|
||||||
// on the TUF repo, if it is valid
|
_, metaJSON, err := store.GetCurrent(gun, roleName)
|
||||||
func loadAndValidateSnapshot(role string, oldSnap *data.SignedSnapshot, snapUpdate storage.MetaUpdate, roles map[string]storage.MetaUpdate, repo *tuf.Repo) error {
|
|
||||||
s := &data.Signed{}
|
|
||||||
err := json.Unmarshal(snapUpdate.Data, s)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("could not parse snapshot")
|
|
||||||
}
|
|
||||||
// version specifically gets validated when writing to store to
|
|
||||||
// better handle race conditions there.
|
|
||||||
snapshotRole, err := repo.GetBaseRole(role)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := signed.Verify(s, snapshotRole, 0); err != nil {
|
if err := builder.Load(roleName, metaJSON, 1, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
snap, err := data.SnapshotFromSigned(s)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("could not parse snapshot")
|
|
||||||
}
|
|
||||||
if !data.ValidTUFType(snap.Signed.Type, data.CanonicalSnapshotRole) {
|
|
||||||
return errors.New("snapshot has wrong type")
|
|
||||||
}
|
|
||||||
err = checkSnapshotEntries(role, oldSnap, snap, roles)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
repo.SetSnapshot(snap)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkSnapshotEntries(role string, oldSnap, snap *data.SignedSnapshot, roles map[string]storage.MetaUpdate) error {
|
|
||||||
snapshotRole := data.CanonicalSnapshotRole
|
|
||||||
timestampRole := data.CanonicalTimestampRole
|
|
||||||
for r, update := range roles {
|
|
||||||
if r == snapshotRole || r == timestampRole {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m, ok := snap.Signed.Meta[r]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("snapshot missing metadata for %s", r)
|
|
||||||
}
|
|
||||||
if int64(len(update.Data)) != m.Length {
|
|
||||||
return fmt.Errorf("snapshot has incorrect length for %s", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !checkHashes(m, update.Data) {
|
|
||||||
return fmt.Errorf("snapshot has incorrect hashes for %s", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkHashes(meta data.FileMeta, update []byte) bool {
|
|
||||||
for alg, digest := range meta.Hashes {
|
|
||||||
d := utils.DoHash(alg, update)
|
|
||||||
if !bytes.Equal(digest, d) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateTargets(role string, roles map[string]storage.MetaUpdate, repo *tuf.Repo) (*data.SignedTargets, error) {
|
|
||||||
// TODO: when delegations are being validated, validate parent
|
|
||||||
// role exists for any delegation
|
|
||||||
s := &data.Signed{}
|
|
||||||
err := json.Unmarshal(roles[role].Data, s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not parse %s", role)
|
|
||||||
}
|
|
||||||
// version specifically gets validated when writing to store to
|
|
||||||
// better handle race conditions there.
|
|
||||||
var targetOrDelgRole data.BaseRole
|
|
||||||
if role == data.CanonicalTargetsRole {
|
|
||||||
targetOrDelgRole, err = repo.GetBaseRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("no %s role loaded", role)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
delgRole, err := repo.GetDelegationRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("no %s delegation role loaded", role)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetOrDelgRole = delgRole.BaseRole
|
|
||||||
}
|
|
||||||
if err := signed.Verify(s, targetOrDelgRole, 0); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t, err := data.TargetsFromSigned(s, role)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !data.ValidTUFType(t.Signed.Type, data.CanonicalTargetsRole) {
|
|
||||||
return nil, fmt.Errorf("%s has wrong type", role)
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateRoot returns the parsed data.SignedRoot object if the new root:
|
|
||||||
// - is a valid root metadata object
|
|
||||||
// - has the correct number of timestamp keys
|
|
||||||
// - validates against the previous root's signatures (if there was a rotation)
|
|
||||||
// - is valid against itself (signature-wise)
|
|
||||||
func validateRoot(gun string, oldRoot, newRoot []byte) (
|
|
||||||
*data.SignedRoot, error) {
|
|
||||||
|
|
||||||
parsedNewSigned := &data.Signed{}
|
|
||||||
err := json.Unmarshal(newRoot, parsedNewSigned)
|
|
||||||
if err != nil {
|
|
||||||
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// validates the structure of the root metadata
|
|
||||||
parsedNewRoot, err := data.RootFromSigned(parsedNewSigned)
|
|
||||||
if err != nil {
|
|
||||||
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
newRootRole, _ := parsedNewRoot.BuildBaseRole(data.CanonicalRootRole)
|
|
||||||
if err != nil { // should never happen, since the root metadata has been validated
|
|
||||||
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
newTimestampRole, err := parsedNewRoot.BuildBaseRole(data.CanonicalTimestampRole)
|
|
||||||
if err != nil { // should never happen, since the root metadata has been validated
|
|
||||||
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
// According to the TUF spec, any role may have more than one signing
|
|
||||||
// key and require a threshold signature. However, notary-server
|
|
||||||
// creates the timestamp, and there is only ever one, so a threshold
|
|
||||||
// greater than one would just always fail validation
|
|
||||||
if newTimestampRole.Threshold != 1 {
|
|
||||||
return nil, fmt.Errorf("timestamp role has invalid threshold")
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldRoot != nil {
|
|
||||||
if err := checkAgainstOldRoot(oldRoot, newRootRole, parsedNewSigned); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := signed.VerifySignatures(parsedNewSigned, newRootRole); err != nil {
|
|
||||||
return nil, validation.ErrBadRoot{Msg: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsedNewRoot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkAgainstOldRoot errors if an invalid root rotation has taken place
|
|
||||||
func checkAgainstOldRoot(oldRoot []byte, newRootRole data.BaseRole, newSigned *data.Signed) error {
|
|
||||||
parsedOldRoot := &data.SignedRoot{}
|
|
||||||
err := json.Unmarshal(oldRoot, parsedOldRoot)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warn("Old root could not be parsed, and cannot be used to check the new root.")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
oldRootRole, err := parsedOldRoot.BuildBaseRole(data.CanonicalRootRole)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warn("Old root does not have a valid root role, and cannot be used to check the new root.")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always verify the new root against the old root
|
|
||||||
if err := signed.VerifySignatures(newSigned, oldRootRole); err != nil {
|
|
||||||
return validation.ErrBadRoot{Msg: fmt.Sprintf(
|
|
||||||
"rotation detected and new root was not signed with at least %d old keys",
|
|
||||||
oldRootRole.Threshold)}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -29,7 +29,7 @@ func TestValidationErrorFormat(t *testing.T) {
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
client, err := store.NewHTTPStore(
|
client, err := store.NewHTTPStore(
|
||||||
fmt.Sprintf("%s/v2/gun/_trust/tuf/", server.URL),
|
fmt.Sprintf("%s/v2/docker.com/notary/_trust/tuf/", server.URL),
|
||||||
"",
|
"",
|
||||||
"json",
|
"json",
|
||||||
"key",
|
"key",
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/go/canonical/json"
|
"github.com/docker/go/canonical/json"
|
||||||
"github.com/docker/notary/server/storage"
|
"github.com/docker/notary/server/storage"
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
"github.com/docker/notary/tuf"
|
"github.com/docker/notary/tuf"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
@ -67,56 +68,28 @@ func GetOrCreateSnapshot(gun, checksum string, store storage.MetaStore, cryptoSe
|
||||||
return lastModified, currentJSON, nil
|
return lastModified, currentJSON, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
repo := tuf.NewRepo(cryptoService)
|
builder := tuf.NewRepoBuilder(gun, cryptoService, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
// load the current root to ensure we use the correct snapshot key.
|
// load the current root to ensure we use the correct snapshot key.
|
||||||
_, rootJSON, err := store.GetCurrent(gun, data.CanonicalRootRole)
|
_, rootJSON, err := store.GetCurrent(gun, data.CanonicalRootRole)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debug("Previous snapshot, but no root for GUN ", gun)
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := builder.Load(data.CanonicalRootRole, rootJSON, 1, false); err != nil {
|
||||||
|
logrus.Debug("Could not load valid previous root for GUN ", gun)
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
meta, _, err := builder.GenerateSnapshot(prev)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
root := &data.SignedRoot{}
|
|
||||||
if err := json.Unmarshal(rootJSON, root); err != nil {
|
|
||||||
logrus.Error("Failed to unmarshal existing root for GUN ", gun)
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
repo.SetRoot(root)
|
|
||||||
|
|
||||||
snapshotUpdate, err := NewSnapshotUpdate(prev, repo)
|
return nil, meta, nil
|
||||||
if err != nil {
|
|
||||||
logrus.Error("Failed to create a new snapshot")
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return nil, snapshotUpdate.Data, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// snapshotExpired simply checks if the snapshot is past its expiry time
|
// snapshotExpired simply checks if the snapshot is past its expiry time
|
||||||
func snapshotExpired(sn *data.SignedSnapshot) bool {
|
func snapshotExpired(sn *data.SignedSnapshot) bool {
|
||||||
return signed.IsExpired(sn.Signed.Expires)
|
return signed.IsExpired(sn.Signed.Expires)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSnapshotUpdate produces a new snapshot and returns it as a metadata update, given the
|
|
||||||
// previous snapshot and the TUF repo.
|
|
||||||
func NewSnapshotUpdate(prev *data.SignedSnapshot, repo *tuf.Repo) (*storage.MetaUpdate, error) {
|
|
||||||
if prev != nil {
|
|
||||||
repo.SetSnapshot(prev) // SetSnapshot never errors
|
|
||||||
} else {
|
|
||||||
// this will only occur if no snapshot has ever been created for the repository
|
|
||||||
if err := repo.InitSnapshot(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sgnd, err := repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sgndJSON, err := json.Marshal(sgnd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &storage.MetaUpdate{
|
|
||||||
Role: data.CanonicalSnapshotRole,
|
|
||||||
Version: repo.Snapshot.Signed.Version,
|
|
||||||
Data: sgndJSON,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -122,7 +122,9 @@ func TestGetSnapshotNoPreviousSnapshot(t *testing.T) {
|
||||||
repo, crypto, err := testutils.EmptyRepo("gun")
|
repo, crypto, err := testutils.EmptyRepo("gun")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rootJSON, err := json.Marshal(repo.Root)
|
sgnd, err := repo.SignRoot(data.DefaultExpires(data.CanonicalRootRole))
|
||||||
|
require.NoError(t, err)
|
||||||
|
rootJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, snapshotJSON := range [][]byte{nil, []byte("invalid JSON")} {
|
for _, snapshotJSON := range [][]byte{nil, []byte("invalid JSON")} {
|
||||||
|
@ -163,7 +165,10 @@ func TestGetSnapshotReturnsPreviousSnapshotIfUnexpired(t *testing.T) {
|
||||||
repo, crypto, err := testutils.EmptyRepo("gun")
|
repo, crypto, err := testutils.EmptyRepo("gun")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
snapshotJSON, err := json.Marshal(repo.Snapshot)
|
// create an expired snapshot
|
||||||
|
sgnd, err := repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
|
||||||
|
require.NoError(t, err)
|
||||||
|
snapshotJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, store.UpdateCurrent("gun",
|
require.NoError(t, store.UpdateCurrent("gun",
|
||||||
|
@ -183,14 +188,16 @@ func TestGetSnapshotOldSnapshotExpired(t *testing.T) {
|
||||||
repo, crypto, err := testutils.EmptyRepo("gun")
|
repo, crypto, err := testutils.EmptyRepo("gun")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rootJSON, err := json.Marshal(repo.Root)
|
sgnd, err := repo.SignRoot(data.DefaultExpires(data.CanonicalRootRole))
|
||||||
|
require.NoError(t, err)
|
||||||
|
rootJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create an expired snapshot
|
// create an expired snapshot
|
||||||
_, err = repo.SignSnapshot(time.Now().AddDate(-1, -1, -1))
|
sgnd, err = repo.SignSnapshot(time.Now().AddDate(-1, -1, -1))
|
||||||
require.True(t, repo.Snapshot.Signed.Expires.Before(time.Now()))
|
require.True(t, repo.Snapshot.Signed.Expires.Before(time.Now()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
snapshotJSON, err := json.Marshal(repo.Snapshot)
|
snapshotJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// set all the metadata
|
// set all the metadata
|
||||||
|
@ -255,14 +262,16 @@ func TestCreateSnapshotNoKeyInCrypto(t *testing.T) {
|
||||||
repo, _, err := testutils.EmptyRepo("gun")
|
repo, _, err := testutils.EmptyRepo("gun")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rootJSON, err := json.Marshal(repo.Root)
|
sgnd, err := repo.SignRoot(data.DefaultExpires(data.CanonicalRootRole))
|
||||||
|
require.NoError(t, err)
|
||||||
|
rootJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create an expired snapshot
|
// create an expired snapshot
|
||||||
_, err = repo.SignSnapshot(time.Now().AddDate(-1, -1, -1))
|
sgnd, err = repo.SignSnapshot(time.Now().AddDate(-1, -1, -1))
|
||||||
require.True(t, repo.Snapshot.Signed.Expires.Before(time.Now()))
|
require.True(t, repo.Snapshot.Signed.Expires.Before(time.Now()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
snapshotJSON, err := json.Marshal(repo.Snapshot)
|
snapshotJSON, err := json.Marshal(sgnd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// set all the metadata so we know the failure to sign is just because of the key
|
// set all the metadata so we know the failure to sign is just because of the key
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/go/canonical/json"
|
"github.com/docker/go/canonical/json"
|
||||||
"github.com/docker/notary"
|
"github.com/docker/notary"
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
"github.com/docker/notary/tuf"
|
"github.com/docker/notary/tuf"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
@ -135,7 +136,7 @@ func snapshotExpired(ts *data.SignedTimestamp, snapshot []byte) bool {
|
||||||
func createTimestamp(gun string, prev *data.SignedTimestamp, snapshot []byte, store storage.MetaStore,
|
func createTimestamp(gun string, prev *data.SignedTimestamp, snapshot []byte, store storage.MetaStore,
|
||||||
cryptoService signed.CryptoService) (*storage.MetaUpdate, error) {
|
cryptoService signed.CryptoService) (*storage.MetaUpdate, error) {
|
||||||
|
|
||||||
repo := tuf.NewRepo(cryptoService)
|
builder := tuf.NewRepoBuilder(gun, cryptoService, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
// load the current root to ensure we use the correct timestamp key.
|
// load the current root to ensure we use the correct timestamp key.
|
||||||
_, root, err := store.GetCurrent(gun, data.CanonicalRootRole)
|
_, root, err := store.GetCurrent(gun, data.CanonicalRootRole)
|
||||||
|
@ -143,49 +144,24 @@ func createTimestamp(gun string, prev *data.SignedTimestamp, snapshot []byte, st
|
||||||
logrus.Debug("Previous timestamp, but no root for GUN ", gun)
|
logrus.Debug("Previous timestamp, but no root for GUN ", gun)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r := &data.SignedRoot{}
|
if err := builder.Load(data.CanonicalRootRole, root, 1, false); err != nil {
|
||||||
err = json.Unmarshal(root, r)
|
logrus.Debug("Could not load valid previous root for GUN ", gun)
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("Could not unmarshal previous root for GUN ", gun)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
repo.SetRoot(r)
|
|
||||||
|
|
||||||
// load snapshot so we can include it in timestamp
|
// load snapshot so we can include it in timestamp
|
||||||
sn := &data.SignedSnapshot{}
|
if err := builder.Load(data.CanonicalSnapshotRole, snapshot, 1, false); err != nil {
|
||||||
err = json.Unmarshal(snapshot, sn)
|
logrus.Debug("Could not load valid previous snapshot for GUN ", gun)
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("Could not unmarshal previous snapshot for GUN ", gun)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
repo.SetSnapshot(sn)
|
|
||||||
|
|
||||||
return NewTimestampUpdate(prev, repo)
|
meta, ver, err := builder.GenerateTimestamp(prev)
|
||||||
}
|
|
||||||
|
|
||||||
// NewTimestampUpdate produces a new timestamp and returns it as a metadata update, given the
|
|
||||||
// previous timestamp and the TUF repo assuming that the root and current snapshot have already
|
|
||||||
// been loaded.
|
|
||||||
func NewTimestampUpdate(prev *data.SignedTimestamp, repo *tuf.Repo) (*storage.MetaUpdate, error) {
|
|
||||||
if prev != nil {
|
|
||||||
repo.SetTimestamp(prev) // SetTimestamp never errors
|
|
||||||
} else {
|
|
||||||
// this will only occur if no timestamp has ever been created for the repository
|
|
||||||
if err := repo.InitTimestamp(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sgnd, err := repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sgndJSON, err := json.Marshal(sgnd)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &storage.MetaUpdate{
|
return &storage.MetaUpdate{
|
||||||
Role: data.CanonicalTimestampRole,
|
Role: data.CanonicalTimestampRole,
|
||||||
Version: repo.Timestamp.Signed.Version,
|
Version: ver,
|
||||||
Data: sgndJSON,
|
Data: meta,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,6 +98,7 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus
|
||||||
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
|
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
|
||||||
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
|
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
|
||||||
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
|
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
|
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
|
||||||
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
|
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package trustpinning
|
package trustpinning_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -20,6 +20,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/notary/cryptoservice"
|
"github.com/docker/notary/cryptoservice"
|
||||||
"github.com/docker/notary/trustmanager"
|
"github.com/docker/notary/trustmanager"
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
"github.com/docker/notary/tuf/testutils"
|
"github.com/docker/notary/tuf/testutils"
|
||||||
|
@ -60,19 +61,19 @@ func TestValidateRoot(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
// This call to ValidateRoot will succeed since we are using a valid PEM
|
// This call to trustpinning.ValidateRoot will succeed since we are using a valid PEM
|
||||||
// encoded certificate, and have no other certificates for this CN
|
// encoded certificate, and have no other certificates for this CN
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will fail since we are passing in a dnsName that
|
// This call to trustpinning.ValidateRoot will fail since we are passing in a dnsName that
|
||||||
// doesn't match the CN of the certificate.
|
// doesn't match the CN of the certificate.
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "diogomonica.com/notary", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "diogomonica.com/notary", trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "An error was expected")
|
require.Error(t, err, "An error was expected")
|
||||||
require.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
require.Equal(t, err, &trustpinning.ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
||||||
|
|
||||||
//
|
//
|
||||||
// This call to ValidateRoot will fail since we are passing an unparsable RootSigned
|
// This call to trustpinning.ValidateRoot will fail since we are passing an unparsable RootSigned
|
||||||
//
|
//
|
||||||
// Execute our template deleting the old buffer first
|
// Execute our template deleting the old buffer first
|
||||||
signedRootBytes.Reset()
|
signedRootBytes.Reset()
|
||||||
|
@ -81,11 +82,11 @@ func TestValidateRoot(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "illegal base64 data at input byte")
|
require.Error(t, err, "illegal base64 data at input byte")
|
||||||
|
|
||||||
//
|
//
|
||||||
// This call to ValidateRoot will fail since we are passing an invalid PEM cert
|
// This call to trustpinning.ValidateRoot will fail since we are passing an invalid PEM cert
|
||||||
//
|
//
|
||||||
// Execute our template deleting the old buffer first
|
// Execute our template deleting the old buffer first
|
||||||
signedRootBytes.Reset()
|
signedRootBytes.Reset()
|
||||||
|
@ -94,12 +95,12 @@ func TestValidateRoot(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "An error was expected")
|
require.Error(t, err, "An error was expected")
|
||||||
require.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
require.Equal(t, err, &trustpinning.ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
||||||
|
|
||||||
//
|
//
|
||||||
// This call to ValidateRoot will fail since we are passing only CA certificate
|
// This call to trustpinning.ValidateRoot will fail since we are passing only CA certificate
|
||||||
// This will fail due to the lack of a leaf certificate
|
// This will fail due to the lack of a leaf certificate
|
||||||
//
|
//
|
||||||
// Execute our template deleting the old buffer first
|
// Execute our template deleting the old buffer first
|
||||||
|
@ -109,12 +110,12 @@ func TestValidateRoot(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "An error was expected")
|
require.Error(t, err, "An error was expected")
|
||||||
require.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
require.Equal(t, err, &trustpinning.ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
||||||
|
|
||||||
//
|
//
|
||||||
// This call to ValidateRoot could succeed in getting to the TUF validation, since
|
// This call to trustpinning.ValidateRoot could succeed in getting to the TUF validation, since
|
||||||
// we are using a valid PEM encoded certificate chain of intermediate + leaf cert
|
// we are using a valid PEM encoded certificate chain of intermediate + leaf cert
|
||||||
// that are signed by a trusted root authority and the leaf cert has a correct CN.
|
// that are signed by a trusted root authority and the leaf cert has a correct CN.
|
||||||
// It will, however, fail to validate, because the leaf cert does not precede the
|
// It will, however, fail to validate, because the leaf cert does not precede the
|
||||||
|
@ -128,9 +129,9 @@ func TestValidateRoot(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "secure.example.com", TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "secure.example.com", trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "An error was expected")
|
require.Error(t, err, "An error was expected")
|
||||||
require.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
require.Equal(t, err, &trustpinning.ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateRootWithoutTOFUS(t *testing.T) {
|
func TestValidateRootWithoutTOFUS(t *testing.T) {
|
||||||
|
@ -149,8 +150,8 @@ func TestValidateRootWithoutTOFUS(t *testing.T) {
|
||||||
// Unmarshal our signedroot
|
// Unmarshal our signedroot
|
||||||
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot)
|
||||||
|
|
||||||
// This call to ValidateRoot will fail since we are explicitly disabling TOFU and have no local certs
|
// This call to trustpinning.ValidateRoot will fail since we are explicitly disabling TOFU and have no local certs
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,14 +173,14 @@ func TestValidateRootWithPinnedCert(t *testing.T) {
|
||||||
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should succeed with the correct Cert ID (same as root public key ID)
|
// This call to trustpinning.ValidateRoot should succeed with the correct Cert ID (same as root public key ID)
|
||||||
validatedSignedRoot, err := ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {rootPubKeyID}}, DisableTOFU: true})
|
validatedSignedRoot, err := trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {rootPubKeyID}}, DisableTOFU: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
generateRootKeyIDs(typedSignedRoot)
|
generateRootKeyIDs(typedSignedRoot)
|
||||||
require.Equal(t, validatedSignedRoot, typedSignedRoot)
|
require.Equal(t, validatedSignedRoot, typedSignedRoot)
|
||||||
|
|
||||||
// This call to ValidateRoot should also succeed with the correct Cert ID (same as root public key ID), even though we passed an extra bad one
|
// This call to trustpinning.ValidateRoot should also succeed with the correct Cert ID (same as root public key ID), even though we passed an extra bad one
|
||||||
validatedSignedRoot, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {rootPubKeyID, "invalidID"}}, DisableTOFU: true})
|
validatedSignedRoot, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {rootPubKeyID, "invalidID"}}, DisableTOFU: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, validatedSignedRoot, typedSignedRoot)
|
require.Equal(t, validatedSignedRoot, typedSignedRoot)
|
||||||
}
|
}
|
||||||
|
@ -341,11 +342,11 @@ func TestValidateRootWithPinnerCertAndIntermediates(t *testing.T) {
|
||||||
defer os.RemoveAll(tempBaseDir)
|
defer os.RemoveAll(tempBaseDir)
|
||||||
require.NoError(t, err, "failed to create a temporary directory: %s", err)
|
require.NoError(t, err, "failed to create a temporary directory: %s", err)
|
||||||
|
|
||||||
validatedRoot, err := ValidateRoot(
|
validatedRoot, err := trustpinning.ValidateRoot(
|
||||||
nil,
|
nil,
|
||||||
signedRoot,
|
signedRoot,
|
||||||
"docker.io/notary/test",
|
"docker.io/notary/test",
|
||||||
TrustPinConfig{
|
trustpinning.TrustPinConfig{
|
||||||
Certs: map[string][]string{
|
Certs: map[string][]string{
|
||||||
"docker.io/notary/test": {ecdsax509Key.ID()},
|
"docker.io/notary/test": {ecdsax509Key.ID()},
|
||||||
},
|
},
|
||||||
|
@ -375,24 +376,24 @@ func TestValidateRootFailuresWithPinnedCert(t *testing.T) {
|
||||||
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should fail due to an incorrect cert ID
|
// This call to trustpinning.ValidateRoot should fail due to an incorrect cert ID
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {"ABSOLUTELY NOT A CERT ID"}}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {"ABSOLUTELY NOT A CERT ID"}}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should fail due to an empty cert ID
|
// This call to trustpinning.ValidateRoot should fail due to an empty cert ID
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {""}}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {""}}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should fail due to an invalid GUN (even though the cert ID is correct), and TOFUS is set to false
|
// This call to trustpinning.ValidateRoot should fail due to an invalid GUN (even though the cert ID is correct), and TOFUS is set to false
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"not_a_gun": {rootPubKeyID}}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"not_a_gun": {rootPubKeyID}}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should fail due to an invalid cert ID, even though it's a valid key ID for targets
|
// This call to trustpinning.ValidateRoot should fail due to an invalid cert ID, even though it's a valid key ID for targets
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {targetsPubKeyID}}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {targetsPubKeyID}}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot should succeed because we fall through to TOFUS because we have no matching GUNs under Certs
|
// This call to trustpinning.ValidateRoot should succeed because we fall through to TOFUS because we have no matching GUNs under Certs
|
||||||
validatedRoot, err := ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"not_a_gun": {rootPubKeyID}}, DisableTOFU: false})
|
validatedRoot, err := trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"not_a_gun": {rootPubKeyID}}, DisableTOFU: false})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
generateRootKeyIDs(typedSignedRoot)
|
generateRootKeyIDs(typedSignedRoot)
|
||||||
require.Equal(t, typedSignedRoot, validatedRoot)
|
require.Equal(t, typedSignedRoot, validatedRoot)
|
||||||
|
@ -414,16 +415,16 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
typedSignedRoot, err := data.RootFromSigned(&testSignedRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will fail because we have an invalid path for the CA
|
// This call to trustpinning.ValidateRoot will fail because we have an invalid path for the CA
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{CA: map[string]string{"docker.com/notary": filepath.Join(tempBaseDir, "nonexistent")}})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{CA: map[string]string{"docker.com/notary": filepath.Join(tempBaseDir, "nonexistent")}})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will fail because we have no valid GUNs to use, and TOFUS is disabled
|
// This call to trustpinning.ValidateRoot will fail because we have no valid GUNs to use, and TOFUS is disabled
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{CA: map[string]string{"othergun": filepath.Join(tempBaseDir, "nonexistent")}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{CA: map[string]string{"othergun": filepath.Join(tempBaseDir, "nonexistent")}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will succeed because we have no valid GUNs to use and we fall back to enabled TOFUS
|
// This call to trustpinning.ValidateRoot will succeed because we have no valid GUNs to use and we fall back to enabled TOFUS
|
||||||
validatedRoot, err := ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{CA: map[string]string{"othergun": filepath.Join(tempBaseDir, "nonexistent")}, DisableTOFU: false})
|
validatedRoot, err := trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{CA: map[string]string{"othergun": filepath.Join(tempBaseDir, "nonexistent")}, DisableTOFU: false})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
generateRootKeyIDs(typedSignedRoot)
|
generateRootKeyIDs(typedSignedRoot)
|
||||||
require.Equal(t, typedSignedRoot, validatedRoot)
|
require.Equal(t, typedSignedRoot, validatedRoot)
|
||||||
|
@ -432,14 +433,14 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
invalidCAFilepath := filepath.Join(tempBaseDir, "invalid.ca")
|
invalidCAFilepath := filepath.Join(tempBaseDir, "invalid.ca")
|
||||||
require.NoError(t, ioutil.WriteFile(invalidCAFilepath, []byte("ABSOLUTELY NOT A PEM"), 0644))
|
require.NoError(t, ioutil.WriteFile(invalidCAFilepath, []byte("ABSOLUTELY NOT A PEM"), 0644))
|
||||||
|
|
||||||
// Using this invalid CA cert should fail on ValidateRoot
|
// Using this invalid CA cert should fail on trustpinning.ValidateRoot
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{CA: map[string]string{"docker.com/notary": invalidCAFilepath}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{CA: map[string]string{"docker.com/notary": invalidCAFilepath}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
validCAFilepath := "../fixtures/root-ca.crt"
|
validCAFilepath := "../fixtures/root-ca.crt"
|
||||||
|
|
||||||
// If we pass an invalid Certs entry in addition to this valid CA entry, since Certs has priority for pinning we will fail
|
// If we pass an invalid Certs entry in addition to this valid CA entry, since Certs has priority for pinning we will fail
|
||||||
_, err = ValidateRoot(nil, &testSignedRoot, "docker.com/notary", TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {"invalidID"}}, CA: map[string]string{"docker.com/notary": validCAFilepath}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, &testSignedRoot, "docker.com/notary", trustpinning.TrustPinConfig{Certs: map[string][]string{"docker.com/notary": {"invalidID"}}, CA: map[string]string{"docker.com/notary": validCAFilepath}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// Now construct a new root with a valid cert chain, such that signatures are correct over the 'notary-signer' GUN. Pin the root-ca and validate
|
// Now construct a new root with a valid cert chain, such that signatures are correct over the 'notary-signer' GUN. Pin the root-ca and validate
|
||||||
|
@ -466,6 +467,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
data.CanonicalSnapshotRole: &rootRole.RootRole},
|
data.CanonicalSnapshotRole: &rootRole.RootRole},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
testRoot.Signed.Version = 1
|
||||||
require.NoError(t, err, "Failed to create new root")
|
require.NoError(t, err, "Failed to create new root")
|
||||||
|
|
||||||
keyReader, err := os.Open("../fixtures/notary-signer.key")
|
keyReader, err := os.Open("../fixtures/notary-signer.key")
|
||||||
|
@ -492,7 +494,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Check that we validate correctly against a pinned CA and provided bundle
|
// Check that we validate correctly against a pinned CA and provided bundle
|
||||||
validatedRoot, err = ValidateRoot(nil, newTestSignedRoot, "notary-signer", TrustPinConfig{CA: map[string]string{"notary-signer": validCAFilepath}, DisableTOFU: true})
|
validatedRoot, err = trustpinning.ValidateRoot(nil, newTestSignedRoot, "notary-signer", trustpinning.TrustPinConfig{CA: map[string]string{"notary-signer": validCAFilepath}, DisableTOFU: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
generateRootKeyIDs(newTypedSignedRoot)
|
generateRootKeyIDs(newTypedSignedRoot)
|
||||||
require.Equal(t, newTypedSignedRoot, validatedRoot)
|
require.Equal(t, newTypedSignedRoot, validatedRoot)
|
||||||
|
@ -514,7 +516,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
require.NoError(t, ioutil.WriteFile(bundleWithExpiredCertPath, bundleWithExpiredCert, 0644))
|
require.NoError(t, ioutil.WriteFile(bundleWithExpiredCertPath, bundleWithExpiredCert, 0644))
|
||||||
|
|
||||||
// Check that we validate correctly against a pinned CA and provided bundle
|
// Check that we validate correctly against a pinned CA and provided bundle
|
||||||
validatedRoot, err = ValidateRoot(nil, newTestSignedRoot, "notary-signer", TrustPinConfig{CA: map[string]string{"notary-signer": bundleWithExpiredCertPath}, DisableTOFU: true})
|
validatedRoot, err = trustpinning.ValidateRoot(nil, newTestSignedRoot, "notary-signer", trustpinning.TrustPinConfig{CA: map[string]string{"notary-signer": bundleWithExpiredCertPath}, DisableTOFU: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, newTypedSignedRoot, validatedRoot)
|
require.Equal(t, newTypedSignedRoot, validatedRoot)
|
||||||
|
|
||||||
|
@ -529,7 +531,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
allExpiredCertPath := filepath.Join(tempBaseDir, "all_expired_cert.pem")
|
allExpiredCertPath := filepath.Join(tempBaseDir, "all_expired_cert.pem")
|
||||||
require.NoError(t, ioutil.WriteFile(allExpiredCertPath, allExpiredCertBundle, 0644))
|
require.NoError(t, ioutil.WriteFile(allExpiredCertPath, allExpiredCertBundle, 0644))
|
||||||
// Now only use expired certs in the bundle, we should fail
|
// Now only use expired certs in the bundle, we should fail
|
||||||
_, err = ValidateRoot(nil, newTestSignedRoot, "notary-signer", TrustPinConfig{CA: map[string]string{"notary-signer": allExpiredCertPath}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, newTestSignedRoot, "notary-signer", trustpinning.TrustPinConfig{CA: map[string]string{"notary-signer": allExpiredCertPath}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// Add a CA cert for a that won't validate against the root leaf certificate
|
// Add a CA cert for a that won't validate against the root leaf certificate
|
||||||
|
@ -543,7 +545,7 @@ func TestValidateRootWithPinnedCA(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bundleWithWrongCertPath := filepath.Join(tempBaseDir, "bundle_with_expired_cert.pem")
|
bundleWithWrongCertPath := filepath.Join(tempBaseDir, "bundle_with_expired_cert.pem")
|
||||||
require.NoError(t, ioutil.WriteFile(bundleWithWrongCertPath, bundleWithWrongCert, 0644))
|
require.NoError(t, ioutil.WriteFile(bundleWithWrongCertPath, bundleWithWrongCert, 0644))
|
||||||
_, err = ValidateRoot(nil, newTestSignedRoot, "notary-signer", TrustPinConfig{CA: map[string]string{"notary-signer": bundleWithWrongCertPath}, DisableTOFU: true})
|
_, err = trustpinning.ValidateRoot(nil, newTestSignedRoot, "notary-signer", trustpinning.TrustPinConfig{CA: map[string]string{"notary-signer": bundleWithWrongCertPath}, DisableTOFU: true})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,6 +582,7 @@ func testValidateSuccessfulRootRotation(t *testing.T, keyAlg, rootKeyType string
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
origTestRoot.Signed.Version = 1
|
||||||
require.NoError(t, err, "Failed to create new root")
|
require.NoError(t, err, "Failed to create new root")
|
||||||
|
|
||||||
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
||||||
|
@ -606,6 +609,7 @@ func testValidateSuccessfulRootRotation(t *testing.T, keyAlg, rootKeyType string
|
||||||
data.CanonicalSnapshotRole: &rootRole.RootRole},
|
data.CanonicalSnapshotRole: &rootRole.RootRole},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
testRoot.Signed.Version = 1
|
||||||
require.NoError(t, err, "Failed to create new root")
|
require.NoError(t, err, "Failed to create new root")
|
||||||
|
|
||||||
signedTestRoot, err := testRoot.ToSigned()
|
signedTestRoot, err := testRoot.ToSigned()
|
||||||
|
@ -617,9 +621,9 @@ func testValidateSuccessfulRootRotation(t *testing.T, keyAlg, rootKeyType string
|
||||||
typedSignedRoot, err := data.RootFromSigned(signedTestRoot)
|
typedSignedRoot, err := data.RootFromSigned(signedTestRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will succeed since we are using a valid PEM
|
// This call to trustpinning.ValidateRoot will succeed since we are using a valid PEM
|
||||||
// encoded certificate, and have no other certificates for this CN
|
// encoded certificate, and have no other certificates for this CN
|
||||||
validatedRoot, err := ValidateRoot(prevRoot, signedTestRoot, gun, TrustPinConfig{})
|
validatedRoot, err := trustpinning.ValidateRoot(prevRoot, signedTestRoot, gun, trustpinning.TrustPinConfig{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
generateRootKeyIDs(typedSignedRoot)
|
generateRootKeyIDs(typedSignedRoot)
|
||||||
require.Equal(t, typedSignedRoot, validatedRoot)
|
require.Equal(t, typedSignedRoot, validatedRoot)
|
||||||
|
@ -658,6 +662,7 @@ func testValidateRootRotationMissingOrigSig(t *testing.T, keyAlg, rootKeyType st
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
origTestRoot.Signed.Version = 1
|
||||||
require.NoError(t, err, "Failed to create new root")
|
require.NoError(t, err, "Failed to create new root")
|
||||||
|
|
||||||
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
||||||
|
@ -694,9 +699,9 @@ func testValidateRootRotationMissingOrigSig(t *testing.T, keyAlg, rootKeyType st
|
||||||
err = signed.Sign(cs, signedTestRoot, []data.PublicKey{replRootKey}, 1, nil)
|
err = signed.Sign(cs, signedTestRoot, []data.PublicKey{replRootKey}, 1, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will succeed since we are using a valid PEM
|
// This call to trustpinning.ValidateRoot will succeed since we are using a valid PEM
|
||||||
// encoded certificate, and have no other certificates for this CN
|
// encoded certificate, and have no other certificates for this CN
|
||||||
_, err = ValidateRoot(prevRoot, signedTestRoot, gun, TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(prevRoot, signedTestRoot, gun, trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "insuficient signatures on root")
|
require.Error(t, err, "insuficient signatures on root")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -733,6 +738,7 @@ func testValidateRootRotationMissingNewSig(t *testing.T, keyAlg, rootKeyType str
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
origTestRoot.Signed.Version = 1
|
||||||
require.NoError(t, err, "Failed to create new root")
|
require.NoError(t, err, "Failed to create new root")
|
||||||
|
|
||||||
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
signedOrigTestRoot, err := origTestRoot.ToSigned()
|
||||||
|
@ -769,9 +775,9 @@ func testValidateRootRotationMissingNewSig(t *testing.T, keyAlg, rootKeyType str
|
||||||
err = signed.Sign(cs, signedTestRoot, []data.PublicKey{origRootKey}, 1, nil)
|
err = signed.Sign(cs, signedTestRoot, []data.PublicKey{origRootKey}, 1, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// This call to ValidateRoot will succeed since we are using a valid PEM
|
// This call to trustpinning.ValidateRoot will succeed since we are using a valid PEM
|
||||||
// encoded certificate, and have no other certificates for this CN
|
// encoded certificate, and have no other certificates for this CN
|
||||||
_, err = ValidateRoot(prevRoot, signedTestRoot, gun, TrustPinConfig{})
|
_, err = trustpinning.ValidateRoot(prevRoot, signedTestRoot, gun, trustpinning.TrustPinConfig{})
|
||||||
require.Error(t, err, "insuficient signatures on root")
|
require.Error(t, err, "insuficient signatures on root")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,673 @@
|
||||||
|
package tuf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/go/canonical/json"
|
||||||
|
"github.com/docker/notary"
|
||||||
|
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
|
"github.com/docker/notary/tuf/data"
|
||||||
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
"github.com/docker/notary/tuf/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrBuildDone is returned when any functions are called on RepoBuilder, and it
|
||||||
|
// is already finished building
|
||||||
|
var ErrBuildDone = fmt.Errorf(
|
||||||
|
"the builder has finished building and cannot accept any more input or produce any more output")
|
||||||
|
|
||||||
|
// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called
|
||||||
|
// with the wrong type of metadata for thes tate that it's in
|
||||||
|
type ErrInvalidBuilderInput struct{ msg string }
|
||||||
|
|
||||||
|
func (e ErrInvalidBuilderInput) Error() string {
|
||||||
|
return e.msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistentInfo is the consistent name and size of a role, or just the name
|
||||||
|
// of the role and a -1 if no file metadata for the role is known
|
||||||
|
type ConsistentInfo struct {
|
||||||
|
RoleName string
|
||||||
|
fileMeta data.FileMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksumKnown determines whether or not we know enough to provide a size and
|
||||||
|
// consistent name
|
||||||
|
func (c ConsistentInfo) ChecksumKnown() bool {
|
||||||
|
// empty hash, no size : this is the zero value
|
||||||
|
return len(c.fileMeta.Hashes) > 0 || c.fileMeta.Length != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistentName returns the consistent name (rolename.sha256) for the role
|
||||||
|
// given this consistent information
|
||||||
|
func (c ConsistentInfo) ConsistentName() string {
|
||||||
|
return utils.ConsistentName(c.RoleName, c.fileMeta.Hashes[notary.SHA256])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Length returns the expected length of the role as per this consistent
|
||||||
|
// information - if no checksum information is known, the size is -1.
|
||||||
|
func (c ConsistentInfo) Length() int64 {
|
||||||
|
if c.ChecksumKnown() {
|
||||||
|
return c.fileMeta.Length
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// RepoBuilder is an interface for an object which builds a tuf.Repo
|
||||||
|
type RepoBuilder interface {
|
||||||
|
Load(roleName string, content []byte, minVersion int, allowExpired bool) error
|
||||||
|
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
|
||||||
|
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
|
||||||
|
Finish() (*Repo, error)
|
||||||
|
BootstrapNewBuilder() RepoBuilder
|
||||||
|
|
||||||
|
// informative functions
|
||||||
|
IsLoaded(roleName string) bool
|
||||||
|
GetLoadedVersion(roleName string) int
|
||||||
|
GetConsistentInfo(roleName string) ConsistentInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// finishedBuilder refuses any more input or output
|
||||||
|
type finishedBuilder struct{}
|
||||||
|
|
||||||
|
func (f finishedBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
return ErrBuildDone
|
||||||
|
}
|
||||||
|
func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
|
||||||
|
return nil, 0, ErrBuildDone
|
||||||
|
}
|
||||||
|
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
||||||
|
return nil, 0, ErrBuildDone
|
||||||
|
}
|
||||||
|
func (f finishedBuilder) Finish() (*Repo, error) { return nil, ErrBuildDone }
|
||||||
|
func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
|
||||||
|
func (f finishedBuilder) IsLoaded(roleName string) bool { return false }
|
||||||
|
func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
|
||||||
|
func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
||||||
|
return ConsistentInfo{RoleName: roleName}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRepoBuilder is the only way to get a pre-built RepoBuilder
|
||||||
|
func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||||
|
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||||
|
repo: NewRepo(cs),
|
||||||
|
gun: gun,
|
||||||
|
trustpin: trustpin,
|
||||||
|
loadedNotChecksummed: make(map[string][]byte),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
|
||||||
|
// the embed out with a finishedBuilder
|
||||||
|
type repoBuilderWrapper struct {
|
||||||
|
RepoBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
|
||||||
|
switch rbw.RepoBuilder.(type) {
|
||||||
|
case finishedBuilder:
|
||||||
|
return rbw.RepoBuilder.Finish()
|
||||||
|
default:
|
||||||
|
old := rbw.RepoBuilder
|
||||||
|
rbw.RepoBuilder = finishedBuilder{}
|
||||||
|
return old.Finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// repoBuilder actually builds a tuf.Repo
|
||||||
|
type repoBuilder struct {
|
||||||
|
repo *Repo
|
||||||
|
|
||||||
|
// needed for root trust pininng verification
|
||||||
|
gun string
|
||||||
|
trustpin trustpinning.TrustPinConfig
|
||||||
|
|
||||||
|
// in case we load root and/or targets before snapshot and timestamp (
|
||||||
|
// or snapshot and not timestamp), so we know what to verify when the
|
||||||
|
// data with checksums come in
|
||||||
|
loadedNotChecksummed map[string][]byte
|
||||||
|
|
||||||
|
// bootstrapped values to validate a new root
|
||||||
|
prevRoot *data.SignedRoot
|
||||||
|
bootstrappedRootChecksum *data.FileMeta
|
||||||
|
|
||||||
|
// for bootstrapping the next builder
|
||||||
|
nextRootChecksum *data.FileMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) Finish() (*Repo, error) {
|
||||||
|
return rb.repo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
||||||
|
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||||
|
repo: NewRepo(rb.repo.cryptoService),
|
||||||
|
gun: rb.gun,
|
||||||
|
loadedNotChecksummed: make(map[string][]byte),
|
||||||
|
trustpin: rb.trustpin,
|
||||||
|
|
||||||
|
prevRoot: rb.repo.Root,
|
||||||
|
bootstrappedRootChecksum: rb.nextRootChecksum,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLoaded returns whether a particular role has already been loaded
|
||||||
|
func (rb *repoBuilder) IsLoaded(roleName string) bool {
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalRootRole:
|
||||||
|
return rb.repo.Root != nil
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
return rb.repo.Snapshot != nil
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
return rb.repo.Timestamp != nil
|
||||||
|
default:
|
||||||
|
return rb.repo.Targets[roleName] != nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the
|
||||||
|
// minimum valid version number) otherwise
|
||||||
|
func (rb *repoBuilder) GetLoadedVersion(roleName string) int {
|
||||||
|
switch {
|
||||||
|
case roleName == data.CanonicalRootRole && rb.repo.Root != nil:
|
||||||
|
return rb.repo.Root.Signed.Version
|
||||||
|
case roleName == data.CanonicalSnapshotRole && rb.repo.Snapshot != nil:
|
||||||
|
return rb.repo.Snapshot.Signed.Version
|
||||||
|
case roleName == data.CanonicalTimestampRole && rb.repo.Timestamp != nil:
|
||||||
|
return rb.repo.Timestamp.Signed.Version
|
||||||
|
default:
|
||||||
|
if tgts, ok := rb.repo.Targets[roleName]; ok {
|
||||||
|
return tgts.Signed.Version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConsistentInfo returns the consistent name and size of a role, if it is known,
|
||||||
|
// otherwise just the rolename and a -1 for size (both of which are inside a
|
||||||
|
// ConsistentInfo object)
|
||||||
|
func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
||||||
|
info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
// we do not want to get a consistent timestamp, but we do want to
|
||||||
|
// limit its size
|
||||||
|
info.fileMeta.Length = notary.MaxTimestampSize
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
if rb.repo.Timestamp != nil {
|
||||||
|
info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName]
|
||||||
|
}
|
||||||
|
case data.CanonicalRootRole:
|
||||||
|
switch {
|
||||||
|
case rb.bootstrappedRootChecksum != nil:
|
||||||
|
info.fileMeta = *rb.bootstrappedRootChecksum
|
||||||
|
case rb.repo.Snapshot != nil:
|
||||||
|
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if rb.repo.Snapshot != nil {
|
||||||
|
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
if !data.ValidRole(roleName) {
|
||||||
|
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rb.IsLoaded(roleName) {
|
||||||
|
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalRootRole:
|
||||||
|
break
|
||||||
|
case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole:
|
||||||
|
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole})
|
||||||
|
default: // delegations
|
||||||
|
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalRootRole:
|
||||||
|
return rb.loadRoot(content, minVersion, allowExpired)
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
return rb.loadSnapshot(content, minVersion, allowExpired)
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
return rb.loadTimestamp(content, minVersion, allowExpired)
|
||||||
|
case data.CanonicalTargetsRole:
|
||||||
|
return rb.loadTargets(content, minVersion, allowExpired)
|
||||||
|
default:
|
||||||
|
return rb.loadDelegation(roleName, content, minVersion, allowExpired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []string) error {
|
||||||
|
for _, req := range prereqRoles {
|
||||||
|
if !rb.IsLoaded(req) {
|
||||||
|
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSnapshot generates a new snapshot given a previous (optional) snapshot
|
||||||
|
// We can't just load the previous snapshot, because it may have been signed by a different
|
||||||
|
// snapshot key (maybe from a previous root version). Note that we need the root role and
|
||||||
|
// targets role to be loaded, because we need to generate metadata for both (and we need
|
||||||
|
// the root to be loaded so we can get the snapshot role to sign with)
|
||||||
|
func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
|
||||||
|
switch {
|
||||||
|
case rb.repo.cryptoService == nil:
|
||||||
|
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot without a cryptoservice"}
|
||||||
|
case rb.IsLoaded(data.CanonicalSnapshotRole):
|
||||||
|
return nil, 0, ErrInvalidBuilderInput{msg: "snapshot has already been loaded"}
|
||||||
|
case rb.IsLoaded(data.CanonicalTimestampRole):
|
||||||
|
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is no previous snapshot, we need to generate one, and so the targets must
|
||||||
|
// have already been loaded. Otherwise, so long as the previous snapshot structure is
|
||||||
|
// valid (it has a targets meta), we're good.
|
||||||
|
switch prev {
|
||||||
|
case nil:
|
||||||
|
if err := rb.checkPrereqsLoaded([]string{data.CanonicalTargetsRole}); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rb.repo.InitSnapshot(); err != nil {
|
||||||
|
rb.repo.Snapshot = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if err := data.IsValidSnapshotStructure(prev.Signed); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
rb.repo.Snapshot = prev
|
||||||
|
}
|
||||||
|
|
||||||
|
sgnd, err := rb.repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
|
||||||
|
if err != nil {
|
||||||
|
rb.repo.Snapshot = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sgndJSON, err := json.Marshal(sgnd)
|
||||||
|
if err != nil {
|
||||||
|
rb.repo.Snapshot = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadedNotChecksummed should currently contain the root awaiting checksumming,
|
||||||
|
// since it has to have been loaded. Since the snapshot was generated using
|
||||||
|
// the root and targets data (there may not be any) that that have been loaded,
|
||||||
|
// remove all of them from rb.loadedNotChecksummed
|
||||||
|
for tgtName := range rb.repo.Targets {
|
||||||
|
delete(rb.loadedNotChecksummed, tgtName)
|
||||||
|
}
|
||||||
|
delete(rb.loadedNotChecksummed, data.CanonicalRootRole)
|
||||||
|
|
||||||
|
// The timestamp can't have been loaded yet, so we want to cache the snapshot
|
||||||
|
// bytes so we can validate the checksum when a timestamp gets generated or
|
||||||
|
// loaded later.
|
||||||
|
rb.loadedNotChecksummed[data.CanonicalSnapshotRole] = sgndJSON
|
||||||
|
|
||||||
|
return sgndJSON, rb.repo.Snapshot.Signed.Version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateTimestamp generates a new timestamp given a previous (optional) timestamp
|
||||||
|
// We can't just load the previous timestamp, because it may have been signed by a different
|
||||||
|
// timestamp key (maybe from a previous root version)
|
||||||
|
func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
||||||
|
switch {
|
||||||
|
case rb.repo.cryptoService == nil:
|
||||||
|
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate timestamp without a cryptoservice"}
|
||||||
|
case rb.IsLoaded(data.CanonicalTimestampRole):
|
||||||
|
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignTimetamp always serializes the loaded snapshot and signs in the data, so we must always
|
||||||
|
// have the snapshot loaded first
|
||||||
|
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch prev {
|
||||||
|
case nil:
|
||||||
|
if err := rb.repo.InitTimestamp(); err != nil {
|
||||||
|
rb.repo.Timestamp = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if err := data.IsValidTimestampStructure(prev.Signed); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
rb.repo.Timestamp = prev
|
||||||
|
}
|
||||||
|
|
||||||
|
sgnd, err := rb.repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
|
||||||
|
if err != nil {
|
||||||
|
rb.repo.Timestamp = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sgndJSON, err := json.Marshal(sgnd)
|
||||||
|
if err != nil {
|
||||||
|
rb.repo.Timestamp = nil
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The snapshot should have been loaded (and not checksummed, since a timestamp
|
||||||
|
// cannot have been loaded), so it is awaiting checksumming. Since this
|
||||||
|
// timestamp was generated using the snapshot awaiting checksumming, we can
|
||||||
|
// remove it from rb.loadedNotChecksummed. There should be no other items
|
||||||
|
// awaiting checksumming now since loading/generating a snapshot should have
|
||||||
|
// cleared out everything else in `loadNotChecksummed`.
|
||||||
|
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
|
||||||
|
|
||||||
|
return sgndJSON, rb.repo.Timestamp.Signed.Version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadRoot loads a root if one has not been loaded
|
||||||
|
func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
roleName := data.CanonicalRootRole
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// ValidateRoot validates against the previous root's role, as well as validates that the root
|
||||||
|
// itself is self-consistent with its own signatures and thresholds.
|
||||||
|
// This assumes that ValidateRoot calls data.RootFromSigned, which validates
|
||||||
|
// the metadata, rather than just unmarshalling signedObject into a SignedRoot object itself.
|
||||||
|
signedRoot, err := trustpinning.ValidateRoot(rb.prevRoot, signedObj, rb.gun, rb.trustpin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signed.VerifyVersion(&(signedRoot.Signed.SignedCommon), minVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowExpired { // check must go at the end because all other validation should pass
|
||||||
|
if err := signed.VerifyExpiry(&(signedRoot.Signed.SignedCommon), roleName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
|
||||||
|
if err != nil { // this should never happen since the root has been validated
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.repo.Root = signedRoot
|
||||||
|
rb.repo.originalRootRole = rootRole
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) loadTimestamp(content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
roleName := data.CanonicalTimestampRole
|
||||||
|
|
||||||
|
timestampRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||||
|
if err != nil { // this should never happen, since it's already been validated
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSignedAndValidateSigs(timestampRole, content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedTimestamp, err := data.TimestampFromSigned(signedObj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signed.VerifyVersion(&(signedTimestamp.Signed.SignedCommon), minVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowExpired { // check must go at the end because all other validation should pass
|
||||||
|
if err := signed.VerifyExpiry(&(signedTimestamp.Signed.SignedCommon), roleName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rb.validateChecksumsFromTimestamp(signedTimestamp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.repo.Timestamp = signedTimestamp
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
roleName := data.CanonicalSnapshotRole
|
||||||
|
|
||||||
|
snapshotRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||||
|
if err != nil { // this should never happen, since it's already been validated
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSignedAndValidateSigs(snapshotRole, content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedSnapshot, err := data.SnapshotFromSigned(signedObj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signed.VerifyVersion(&(signedSnapshot.Signed.SignedCommon), minVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowExpired { // check must go at the end because all other validation should pass
|
||||||
|
if err := signed.VerifyExpiry(&(signedSnapshot.Signed.SignedCommon), roleName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point, the only thing left to validate is existing checksums - we can use
|
||||||
|
// this snapshot to bootstrap the next builder if needed - and we don't need to do
|
||||||
|
// the 2-value assignment since we've already validated the signedSnapshot, which MUST
|
||||||
|
// have root metadata
|
||||||
|
rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole]
|
||||||
|
rb.nextRootChecksum = &rootMeta
|
||||||
|
|
||||||
|
if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.repo.Snapshot = signedSnapshot
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
roleName := data.CanonicalTargetsRole
|
||||||
|
|
||||||
|
targetsRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||||
|
if err != nil { // this should never happen, since it's already been validated
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSignedAndValidateSigs(targetsRole, content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowExpired { // check must go at the end because all other validation should pass
|
||||||
|
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.repo.Targets[roleName] = signedTargets
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersion int, allowExpired bool) error {
|
||||||
|
delegationRole, err := rb.repo.GetDelegationRole(roleName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSignedAndValidateSigs(delegationRole.BaseRole, content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allowExpired { // check must go at the end because all other validation should pass
|
||||||
|
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rb.repo.Targets[roleName] = signedTargets
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) error {
|
||||||
|
sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole]
|
||||||
|
if ok {
|
||||||
|
// by this point, the SignedTimestamp has been validated so it must have a snapshot hash
|
||||||
|
snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole].Hashes
|
||||||
|
if err := data.CheckHashes(sn, data.CanonicalSnapshotRole, snMeta); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error {
|
||||||
|
var goodRoles []string
|
||||||
|
for roleName, loadedBytes := range rb.loadedNotChecksummed {
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalSnapshotRole, data.CanonicalTimestampRole:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
if err := data.CheckHashes(loadedBytes, roleName, sn.Signed.Meta[roleName].Hashes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
goodRoles = append(goodRoles, roleName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, roleName := range goodRoles {
|
||||||
|
delete(rb.loadedNotChecksummed, roleName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) error {
|
||||||
|
// validate the bootstrap checksum for root, if provided
|
||||||
|
if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil {
|
||||||
|
if err := data.CheckHashes(content, roleName, rb.bootstrappedRootChecksum.Hashes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// but we also want to cache the root content, so that when the snapshot is
|
||||||
|
// loaded it is validated (to make sure everything in the repo is self-consistent)
|
||||||
|
checksums := rb.getChecksumsFor(roleName)
|
||||||
|
if checksums != nil { // as opposed to empty, in which case hash check should fail
|
||||||
|
if err := data.CheckHashes(content, roleName, *checksums); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if roleName != data.CanonicalTimestampRole {
|
||||||
|
// timestamp is the only role which does not need to be checksummed, but
|
||||||
|
// for everything else, cache the contents in the list of roles that have
|
||||||
|
// not been checksummed by the snapshot/timestamp yet
|
||||||
|
rb.loadedNotChecksummed[roleName] = content
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checksums the given bytes, and if they validate, convert to a data.Signed object.
|
||||||
|
// If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that
|
||||||
|
// haven't been checksummed (unless it's a timestamp, which has no checksum reference).
|
||||||
|
func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Signed, error) {
|
||||||
|
if err := rb.validateChecksumFor(content, roleName); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshal to signed
|
||||||
|
signedObj := &data.Signed{}
|
||||||
|
if err := json.Unmarshal(content, signedObj); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return signedObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) {
|
||||||
|
|
||||||
|
signedObj, err := rb.bytesToSigned(content, role.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify signature
|
||||||
|
if err := signed.VerifySignatures(signedObj, role); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return signedObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the checksum reference (the loaded timestamp for the snapshot role, and
|
||||||
|
// the loaded snapshot for every other role except timestamp and snapshot) is nil,
|
||||||
|
// then return nil for the checksums, meaning that the checksum is not yet
|
||||||
|
// available. If the checksum reference *is* loaded, then always returns the
|
||||||
|
// Hashes object for the given role - if it doesn't exist, returns an empty Hash
|
||||||
|
// object (against which any checksum validation would fail).
|
||||||
|
func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes {
|
||||||
|
var hashes data.Hashes
|
||||||
|
switch role {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
return nil
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
if rb.repo.Timestamp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes
|
||||||
|
default:
|
||||||
|
if rb.repo.Snapshot == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hashes = rb.repo.Snapshot.Signed.Meta[role].Hashes
|
||||||
|
}
|
||||||
|
return &hashes
|
||||||
|
}
|
|
@ -0,0 +1,675 @@
|
||||||
|
package tuf_test
|
||||||
|
|
||||||
|
// tests for builder that live in an external package, tuf_test, so that we can use
|
||||||
|
// the testutils without causing an import cycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/notary"
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
|
"github.com/docker/notary/tuf"
|
||||||
|
"github.com/docker/notary/tuf/data"
|
||||||
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
"github.com/docker/notary/tuf/testutils"
|
||||||
|
"github.com/docker/notary/tuf/utils"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _cachedMeta map[string][]byte
|
||||||
|
|
||||||
|
// we just want sample metadata for a role - so we can build cached metadata
|
||||||
|
// and use it once.
|
||||||
|
func getSampleMeta(t *testing.T) (map[string][]byte, string) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
delgNames := []string{"targets/a", "targets/a/b", "targets/a/b/force_parent_metadata"}
|
||||||
|
if _cachedMeta == nil {
|
||||||
|
meta, _, err := testutils.NewRepoMetadata(gun, delgNames...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_cachedMeta = meta
|
||||||
|
}
|
||||||
|
return _cachedMeta, gun
|
||||||
|
}
|
||||||
|
|
||||||
|
// We load only if the rolename is a valid rolename - even if the metadata we provided is valid
|
||||||
|
func TestBuilderLoadsValidRolesOnly(t *testing.T) {
|
||||||
|
meta, gun := getSampleMeta(t)
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
err := builder.Load("NotRoot", meta[data.CanonicalRootRole], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "is an invalid role")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderOnlyAcceptsRootFirstWhenLoading(t *testing.T) {
|
||||||
|
meta, gun := getSampleMeta(t)
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
|
for roleName, content := range meta {
|
||||||
|
if roleName != data.CanonicalRootRole {
|
||||||
|
err := builder.Load(roleName, content, 1, true)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "root must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(roleName))
|
||||||
|
require.Equal(t, 1, builder.GetLoadedVersion(roleName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can load the root
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.True(t, builder.IsLoaded(data.CanonicalRootRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderOnlyAcceptsDelegationsAfterParent(t *testing.T) {
|
||||||
|
meta, gun := getSampleMeta(t)
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
|
// load the root
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
|
||||||
|
// delegations can't be loaded without target
|
||||||
|
for _, delgName := range []string{"targets/a", "targets/a/b"} {
|
||||||
|
err := builder.Load(delgName, meta[delgName], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "targets must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(delgName))
|
||||||
|
require.Equal(t, 1, builder.GetLoadedVersion(delgName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// load the targets
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
|
||||||
|
// targets/a/b can't be loaded because targets/a isn't loaded
|
||||||
|
err := builder.Load("targets/a/b", meta["targets/a/b"], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, data.ErrInvalidRole{}, err)
|
||||||
|
|
||||||
|
// targets/a can be loaded now though because targets is loaded
|
||||||
|
require.NoError(t, builder.Load("targets/a", meta["targets/a"], 1, false))
|
||||||
|
|
||||||
|
// and now targets/a/b can be loaded because targets/a is loaded
|
||||||
|
require.NoError(t, builder.Load("targets/a/b", meta["targets/a/b"], 1, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderAcceptRoleOnce(t *testing.T) {
|
||||||
|
meta, gun := getSampleMeta(t)
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
|
for _, roleName := range append(data.BaseRoles, "targets/a", "targets/a/b") {
|
||||||
|
// first time loading is ok
|
||||||
|
require.NoError(t, builder.Load(roleName, meta[roleName], 1, false))
|
||||||
|
require.True(t, builder.IsLoaded(roleName))
|
||||||
|
require.Equal(t, 1, builder.GetLoadedVersion(roleName))
|
||||||
|
|
||||||
|
// second time loading is not
|
||||||
|
err := builder.Load(roleName, meta[roleName], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "has already been loaded")
|
||||||
|
|
||||||
|
// still loaded
|
||||||
|
require.True(t, builder.IsLoaded(roleName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderStopsAcceptingOrProducingDataOnceDone(t *testing.T) {
|
||||||
|
meta, gun := getSampleMeta(t)
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
|
||||||
|
for _, roleName := range data.BaseRoles {
|
||||||
|
require.NoError(t, builder.Load(roleName, meta[roleName], 1, false))
|
||||||
|
require.True(t, builder.IsLoaded(roleName))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := builder.Finish()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = builder.Load("targets/a", meta["targets/a"], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tuf.ErrBuildDone, err)
|
||||||
|
|
||||||
|
// a new bootstrapped builder can also not have any more input output
|
||||||
|
bootstrapped := builder.BootstrapNewBuilder()
|
||||||
|
|
||||||
|
err = bootstrapped.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 0, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tuf.ErrBuildDone, err)
|
||||||
|
|
||||||
|
for _, b := range []tuf.RepoBuilder{builder, bootstrapped} {
|
||||||
|
_, err = b.Finish()
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tuf.ErrBuildDone, err)
|
||||||
|
|
||||||
|
_, _, err = b.GenerateSnapshot(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tuf.ErrBuildDone, err)
|
||||||
|
|
||||||
|
_, _, err = b.GenerateTimestamp(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tuf.ErrBuildDone, err)
|
||||||
|
|
||||||
|
for roleName := range meta {
|
||||||
|
// a finished builder thinks nothing is loaded
|
||||||
|
require.False(t, b.IsLoaded(roleName))
|
||||||
|
// checksums are all empty, versions are all zero
|
||||||
|
require.Equal(t, 0, b.GetLoadedVersion(roleName))
|
||||||
|
require.Equal(t, tuf.ConsistentInfo{RoleName: roleName}, b.GetConsistentInfo(roleName))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the cases in which GenerateSnapshot fails
|
||||||
|
func TestGenerateSnapshotInvalidOperations(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
repo, cs, err := testutils.EmptyRepo(gun)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// make snapshot have 2 keys and a threshold of 2
|
||||||
|
snapKeys := make([]data.PublicKey, 2)
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
snapKeys[i], err = cs.Create(data.CanonicalSnapshotRole, gun, data.ECDSAKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, repo.ReplaceBaseKeys(data.CanonicalSnapshotRole, snapKeys...))
|
||||||
|
repo.Root.Signed.Roles[data.CanonicalSnapshotRole].Threshold = 2
|
||||||
|
|
||||||
|
meta, err := testutils.SignAndSerialize(repo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, prevSnapshot := range []*data.SignedSnapshot{nil, repo.Snapshot} {
|
||||||
|
// copy keys, since we expect one of these generation attempts to succeed and we do
|
||||||
|
// some key deletion tests later
|
||||||
|
newCS := testutils.CopyKeys(t, cs, data.CanonicalSnapshotRole)
|
||||||
|
|
||||||
|
// --- we can't generate a snapshot if the root isn't loaded
|
||||||
|
builder := tuf.NewRepoBuilder(gun, newCS, trustpinning.TrustPinConfig{})
|
||||||
|
_, _, err := builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "root must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
|
||||||
|
// --- we can't generate a snapshot if the targets isn't loaded and we have no previous snapshot,
|
||||||
|
// --- but if we have a previous snapshot with a valid targets, we're good even if no snapshot
|
||||||
|
// --- is loaded
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
_, _, err = builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
if prevSnapshot == nil {
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "targets must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- we can't generate a snapshot if we've loaded the timestamp already
|
||||||
|
builder = tuf.NewRepoBuilder(gun, newCS, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
if prevSnapshot == nil {
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
}
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "cannot generate snapshot if timestamp has already been loaded")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
|
||||||
|
// --- we cannot generate a snapshot if we've already loaded a snapshot
|
||||||
|
builder = tuf.NewRepoBuilder(gun, newCS, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
if prevSnapshot == nil {
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
}
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "snapshot has already been loaded")
|
||||||
|
|
||||||
|
// --- we cannot generate a snapshot if we can't satisfy the role threshold
|
||||||
|
for i := 0; i < len(snapKeys); i++ {
|
||||||
|
require.NoError(t, newCS.RemoveKey(snapKeys[i].ID()))
|
||||||
|
builder = tuf.NewRepoBuilder(gun, newCS, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
if prevSnapshot == nil {
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
require.IsType(t, signed.ErrInsufficientSignatures{}, err)
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- we cannot generate a snapshot if we don't have a cryptoservice
|
||||||
|
builder = tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
if prevSnapshot == nil {
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateSnapshot(prevSnapshot)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "cannot generate snapshot without a cryptoservice")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- we can't generate a snapshot if we're given an invalid previous snapshot (for instance, an empty one),
|
||||||
|
// --- even if we have a targets loaded
|
||||||
|
builder := tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTargetsRole, meta[data.CanonicalTargetsRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateSnapshot(&data.SignedSnapshot{})
|
||||||
|
require.IsType(t, data.ErrInvalidMetadata{}, err)
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the cases in which GenerateTimestamp fails
|
||||||
|
func TestGenerateTimestampInvalidOperations(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
repo, cs, err := testutils.EmptyRepo(gun)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// make timsetamp have 2 keys and a threshold of 2
|
||||||
|
tsKeys := make([]data.PublicKey, 2)
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
tsKeys[i], err = cs.Create(data.CanonicalTimestampRole, gun, data.ECDSAKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, repo.ReplaceBaseKeys(data.CanonicalTimestampRole, tsKeys...))
|
||||||
|
repo.Root.Signed.Roles[data.CanonicalTimestampRole].Threshold = 2
|
||||||
|
|
||||||
|
meta, err := testutils.SignAndSerialize(repo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, prevTimestamp := range []*data.SignedTimestamp{nil, repo.Timestamp} {
|
||||||
|
// --- we can't generate a timestamp if the root isn't loaded
|
||||||
|
builder := tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
|
_, _, err := builder.GenerateTimestamp(prevTimestamp)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "root must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
|
||||||
|
// --- we can't generate a timestamp if the snapshot isn't loaded, no matter if we have a previous
|
||||||
|
// --- timestamp or not
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
_, _, err = builder.GenerateTimestamp(prevTimestamp)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "snapshot must be loaded first")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
|
||||||
|
// --- we can't generate a timestamp if we've loaded the timestamp already
|
||||||
|
builder = tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateTimestamp(prevTimestamp)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "timestamp has already been loaded")
|
||||||
|
|
||||||
|
// --- we cannot generate a timestamp if we can't satisfy the role threshold
|
||||||
|
for i := 0; i < len(tsKeys); i++ {
|
||||||
|
require.NoError(t, cs.RemoveKey(tsKeys[i].ID()))
|
||||||
|
builder = tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateTimestamp(prevTimestamp)
|
||||||
|
require.IsType(t, signed.ErrInsufficientSignatures{}, err)
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- we cannot generate a timestamp if we don't have a cryptoservice
|
||||||
|
builder = tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateTimestamp(prevTimestamp)
|
||||||
|
require.IsType(t, tuf.ErrInvalidBuilderInput{}, err)
|
||||||
|
require.Contains(t, err.Error(), "cannot generate timestamp without a cryptoservice")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- we can't generate a timsetamp if we're given an invalid previous timestamp (for instance, an empty one),
|
||||||
|
// --- even if we have a snapshot loaded
|
||||||
|
builder := tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
|
||||||
|
_, _, err = builder.GenerateTimestamp(&data.SignedTimestamp{})
|
||||||
|
require.IsType(t, data.ErrInvalidMetadata{}, err)
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetConsistentInfo(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
repo, _, err := testutils.EmptyRepo(gun)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// add some hashes for items in the snapshot that don't correspond to real metadata, but that
|
||||||
|
// will cause ConsistentInfo to behave differently
|
||||||
|
realSha512Sum := sha512.Sum512([]byte("stuff"))
|
||||||
|
repo.Snapshot.Signed.Meta["only512"] = data.FileMeta{Hashes: data.Hashes{notary.SHA512: realSha512Sum[:]}}
|
||||||
|
repo.Snapshot.Signed.Meta["targets/random"] = data.FileMeta{Hashes: data.Hashes{"randomsha": []byte("12345")}}
|
||||||
|
repo.Snapshot.Signed.Meta["targets/nohashes"] = data.FileMeta{Length: 1}
|
||||||
|
|
||||||
|
extraMeta := []string{"only512", "targets/random", "targets/nohashes"}
|
||||||
|
|
||||||
|
meta, err := testutils.SignAndSerialize(repo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
// if neither snapshot nor timestamp are loaded, no matter how much other data is loaded, consistent info
|
||||||
|
// is empty except for timestamp: timestamps have no checksums, and the length is always -1
|
||||||
|
for _, roleToLoad := range []string{data.CanonicalRootRole, data.CanonicalTargetsRole} {
|
||||||
|
require.NoError(t, builder.Load(roleToLoad, meta[roleToLoad], 1, false))
|
||||||
|
for _, checkName := range append(data.BaseRoles, extraMeta...) {
|
||||||
|
ci := builder.GetConsistentInfo(checkName)
|
||||||
|
require.Equal(t, checkName, ci.ConsistentName())
|
||||||
|
|
||||||
|
switch checkName {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
// timestamp's size is always the max timestamp size
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, notary.MaxTimestampSize, ci.Length())
|
||||||
|
default:
|
||||||
|
require.False(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, int64(-1), ci.Length())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// once timestamp is loaded, we can get the consistent info for snapshot but nothing else
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false))
|
||||||
|
for _, checkName := range append(data.BaseRoles, extraMeta...) {
|
||||||
|
ci := builder.GetConsistentInfo(checkName)
|
||||||
|
|
||||||
|
switch checkName {
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
cName := utils.ConsistentName(data.CanonicalSnapshotRole,
|
||||||
|
repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes[notary.SHA256])
|
||||||
|
require.Equal(t, cName, ci.ConsistentName())
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.True(t, ci.Length() > -1)
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
// timestamp's canonical name is always "timestamp" and its size is always the max
|
||||||
|
// timestamp size
|
||||||
|
require.Equal(t, data.CanonicalTimestampRole, ci.ConsistentName())
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, notary.MaxTimestampSize, ci.Length())
|
||||||
|
default:
|
||||||
|
require.Equal(t, checkName, ci.ConsistentName())
|
||||||
|
require.False(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, int64(-1), ci.Length())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// once the snapshot is loaded, we can get real consistent info for all loaded roles
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
for _, checkName := range data.BaseRoles {
|
||||||
|
ci := builder.GetConsistentInfo(checkName)
|
||||||
|
require.True(t, ci.ChecksumKnown(), "%s's checksum is not known", checkName)
|
||||||
|
|
||||||
|
switch checkName {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
// timestamp's canonical name is always "timestamp" and its size is always -1
|
||||||
|
require.Equal(t, data.CanonicalTimestampRole, ci.ConsistentName())
|
||||||
|
require.Equal(t, notary.MaxTimestampSize, ci.Length())
|
||||||
|
default:
|
||||||
|
fileInfo := repo.Snapshot.Signed.Meta
|
||||||
|
if checkName == data.CanonicalSnapshotRole {
|
||||||
|
fileInfo = repo.Timestamp.Signed.Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
cName := utils.ConsistentName(checkName, fileInfo[checkName].Hashes[notary.SHA256])
|
||||||
|
require.Equal(t, cName, ci.ConsistentName())
|
||||||
|
require.True(t, ci.Length() > -1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the fake roles have invalid-ish checksums: the ConsistentInfos for those will return
|
||||||
|
// non-consistent names but non -1 sizes
|
||||||
|
for _, checkName := range extraMeta {
|
||||||
|
ci := builder.GetConsistentInfo(checkName)
|
||||||
|
require.Equal(t, checkName, ci.ConsistentName()) // because no sha256 hash
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.True(t, ci.Length() > -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// a non-existent role's ConsistentInfo is empty
|
||||||
|
ci := builder.GetConsistentInfo("nonExistent")
|
||||||
|
require.Equal(t, "nonExistent", ci.ConsistentName())
|
||||||
|
require.False(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, int64(-1), ci.Length())
|
||||||
|
|
||||||
|
// when we bootstrap a new builder, the root has consistent info because the checksum is provided,
|
||||||
|
// but nothing else does
|
||||||
|
builder = builder.BootstrapNewBuilder()
|
||||||
|
for _, checkName := range append(data.BaseRoles, extraMeta...) {
|
||||||
|
ci := builder.GetConsistentInfo(checkName)
|
||||||
|
|
||||||
|
switch checkName {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
// timestamp's size is always the max timestamp size
|
||||||
|
require.Equal(t, checkName, ci.ConsistentName())
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, notary.MaxTimestampSize, ci.Length())
|
||||||
|
|
||||||
|
case data.CanonicalRootRole:
|
||||||
|
cName := utils.ConsistentName(data.CanonicalRootRole,
|
||||||
|
repo.Snapshot.Signed.Meta[data.CanonicalRootRole].Hashes[notary.SHA256])
|
||||||
|
|
||||||
|
require.Equal(t, cName, ci.ConsistentName())
|
||||||
|
require.True(t, ci.ChecksumKnown())
|
||||||
|
require.True(t, ci.Length() > -1)
|
||||||
|
|
||||||
|
default:
|
||||||
|
require.Equal(t, checkName, ci.ConsistentName())
|
||||||
|
require.False(t, ci.ChecksumKnown())
|
||||||
|
require.Equal(t, int64(-1), ci.Length())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No matter what order timestamp and snapshot is loaded, if the snapshot's checksum doesn't match
|
||||||
|
// what's in the timestamp, the builder will error and refuse to load the latest piece of metadata
|
||||||
|
// whether that is snapshot (because it was loaded after timestamp) or timestamp (because builder
|
||||||
|
// retroactive checks the loaded snapshot's checksum). Timestamp ONLY checks the snapshot checksum.
|
||||||
|
func TestTimestampPreAndPostChecksumming(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
repo, _, err := testutils.EmptyRepo(gun, "targets/other", "targets/other/other")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// add invalid checkums for all the other roles to timestamp too, and show that
|
||||||
|
// cached items aren't checksummed against this
|
||||||
|
fakeChecksum, err := data.NewFileMeta(bytes.NewBuffer([]byte("fake")), notary.SHA256, notary.SHA512)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, roleName := range append(data.BaseRoles, "targets/other") {
|
||||||
|
// add a wrong checksum for every role, including timestamp itself
|
||||||
|
repo.Timestamp.Signed.Meta[roleName] = fakeChecksum
|
||||||
|
}
|
||||||
|
// this will overwrite the snapshot checksum with the right one
|
||||||
|
meta, err := testutils.SignAndSerialize(repo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// ensure that the fake meta for other roles weren't destroyed by signing the timestamp
|
||||||
|
require.Len(t, repo.Timestamp.Signed.Meta, 5)
|
||||||
|
|
||||||
|
snapJSON := append(meta[data.CanonicalSnapshotRole], ' ')
|
||||||
|
|
||||||
|
// --- load timestamp first
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
// timestamp doesn't fail, even though its checksum for root is wrong according to timestamp
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false))
|
||||||
|
// loading the snapshot in fails, because of the checksum the timestamp has
|
||||||
|
err = builder.Load(data.CanonicalSnapshotRole, snapJSON, 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, data.ErrMismatchedChecksum{}, err)
|
||||||
|
require.True(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
// all the other metadata can be loaded in, even though the checksums are wrong according to timestamp
|
||||||
|
for _, roleName := range []string{data.CanonicalTargetsRole, "targets/other"} {
|
||||||
|
require.NoError(t, builder.Load(roleName, meta[roleName], 1, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- load snapshot first
|
||||||
|
builder = tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
for _, roleName := range append(data.BaseRoles, "targets/other") {
|
||||||
|
switch roleName {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
continue
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
require.NoError(t, builder.Load(roleName, snapJSON, 1, false))
|
||||||
|
default:
|
||||||
|
require.NoError(t, builder.Load(roleName, meta[roleName], 1, false))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// timestamp fails because the snapshot checksum is wrong
|
||||||
|
err = builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
checksumErr, ok := err.(data.ErrMismatchedChecksum)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Contains(t, checksumErr.Error(), "checksum for snapshot did not match")
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalTimestampRole))
|
||||||
|
require.True(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates metadata in the following manner:
|
||||||
|
// - the snapshot has bad checksums for itself and for timestamp, to show that those aren't checked
|
||||||
|
// - snapshot has valid checksums for root, targets, and targets/other
|
||||||
|
// - snapshot doesn't have a checksum for targets/other/other, but targets/other/other is a valid
|
||||||
|
// delegation role in targets/other and there is metadata for targets/other/other that is correctly
|
||||||
|
// signed
|
||||||
|
func setupSnapshotChecksumming(t *testing.T, gun string) map[string][]byte {
|
||||||
|
repo, _, err := testutils.EmptyRepo(gun, "targets/other", "targets/other/other")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// add invalid checkums for all the other roles to timestamp too, and show that
|
||||||
|
// cached items aren't checksummed against this
|
||||||
|
fakeChecksum, err := data.NewFileMeta(bytes.NewBuffer([]byte("fake")), notary.SHA256, notary.SHA512)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// fake the snapshot and timestamp checksums
|
||||||
|
repo.Snapshot.Signed.Meta[data.CanonicalSnapshotRole] = fakeChecksum
|
||||||
|
repo.Snapshot.Signed.Meta[data.CanonicalTimestampRole] = fakeChecksum
|
||||||
|
|
||||||
|
meta, err := testutils.SignAndSerialize(repo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// ensure that the fake metadata for other roles wasn't destroyed by signing
|
||||||
|
require.Len(t, repo.Snapshot.Signed.Meta, 5)
|
||||||
|
|
||||||
|
// create delegation metadata that should not be in snapshot, but has a valid role and signature
|
||||||
|
_, err = repo.InitTargets("targets/other/other")
|
||||||
|
require.NoError(t, err)
|
||||||
|
s, err := repo.SignTargets("targets/other/other", data.DefaultExpires(data.CanonicalTargetsRole))
|
||||||
|
require.NoError(t, err)
|
||||||
|
meta["targets/other/other"], err = json.Marshal(s)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return meta
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the snapshot is loaded first (-ish, because really root has to be loaded first)
|
||||||
|
// it will be used to validate the checksums of all other metadata that gets loaded.
|
||||||
|
// If the checksum doesn't match, or if there is no checksum, then the other metadata
|
||||||
|
// cannot be loaded.
|
||||||
|
func TestSnapshotLoadedFirstChecksumsOthers(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
meta := setupSnapshotChecksumming(t, gun)
|
||||||
|
// --- load root then snapshot
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false))
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false))
|
||||||
|
|
||||||
|
// loading timestamp is fine, even though the timestamp metadata has the wrong checksum because
|
||||||
|
// we don't check timestamp checksums
|
||||||
|
require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false))
|
||||||
|
|
||||||
|
// loading the other roles' metadata with a space will fail because of a checksum failure (builder
|
||||||
|
// checks right away if the snapshot is loaded) - in the case of targets/other/other, which should
|
||||||
|
// not be in snapshot at all, loading should fail even without a space because there is no checksum
|
||||||
|
// for it
|
||||||
|
for _, roleNameToLoad := range []string{data.CanonicalTargetsRole, "targets/other"} {
|
||||||
|
err := builder.Load(roleNameToLoad, append(meta[roleNameToLoad], ' '), 0, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
checksumErr, ok := err.(data.ErrMismatchedChecksum)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Contains(t, checksumErr.Error(), fmt.Sprintf("checksum for %s did not match", roleNameToLoad))
|
||||||
|
require.False(t, builder.IsLoaded(roleNameToLoad))
|
||||||
|
|
||||||
|
// now load it for real (since we need targets loaded before trying to load "targets/other")
|
||||||
|
require.NoError(t, builder.Load(roleNameToLoad, meta[roleNameToLoad], 1, false))
|
||||||
|
}
|
||||||
|
// loading the non-existent role wil fail
|
||||||
|
err := builder.Load("targets/other/other", meta["targets/other/other"], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, data.ErrMissingMeta{}, err)
|
||||||
|
require.False(t, builder.IsLoaded("targets/other/other"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any other metadata is loaded first, when the snapshot is loaded it will retroactively go back
|
||||||
|
// and validate that metadata. If anything fails to validate, or there is metadata for which this
|
||||||
|
// snapshot has no checksums for, the snapshot will fail to validate.
|
||||||
|
func TestSnapshotLoadedAfterChecksumsOthersRetroactively(t *testing.T) {
|
||||||
|
gun := "docker.com/notary"
|
||||||
|
meta := setupSnapshotChecksumming(t, gun)
|
||||||
|
|
||||||
|
// --- load all the other metadata first, but with an extra space at the end which should
|
||||||
|
// --- validate fine, except for the checksum.
|
||||||
|
for _, roleNameToPermute := range append(data.BaseRoles, "targets/other") {
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
if roleNameToPermute == data.CanonicalSnapshotRole {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// load all the roles normally, except for roleToPermute, which has one space added
|
||||||
|
// to the end, thus changing the checksum
|
||||||
|
for _, roleNameToLoad := range append(data.BaseRoles, "targets/other") {
|
||||||
|
switch roleNameToLoad {
|
||||||
|
case data.CanonicalSnapshotRole:
|
||||||
|
continue // we load this later
|
||||||
|
case roleNameToPermute:
|
||||||
|
// having a space added at the end should not affect any validity check except checksum
|
||||||
|
require.NoError(t, builder.Load(roleNameToLoad, append(meta[roleNameToLoad], ' '), 0, false))
|
||||||
|
default:
|
||||||
|
require.NoError(t, builder.Load(roleNameToLoad, meta[roleNameToLoad], 1, false))
|
||||||
|
}
|
||||||
|
require.True(t, builder.IsLoaded(roleNameToLoad))
|
||||||
|
}
|
||||||
|
// now load the snapshot - it should fail with the checksum failure for the permuted role
|
||||||
|
err := builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false)
|
||||||
|
switch roleNameToPermute {
|
||||||
|
case data.CanonicalTimestampRole:
|
||||||
|
require.NoError(t, err) // we don't check the timestamp's checksum
|
||||||
|
default:
|
||||||
|
require.Error(t, err)
|
||||||
|
checksumErr, ok := err.(data.ErrMismatchedChecksum)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Contains(t, checksumErr.Error(), fmt.Sprintf("checksum for %s did not match", roleNameToPermute))
|
||||||
|
require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// load all the metadata as is without alteration (so they should validate all checksums)
|
||||||
|
// but also load the metadata that is not contained in the snapshot. Then when the snapshot
|
||||||
|
// is loaded it will fail validation, because it doesn't have target/other/other's checksum
|
||||||
|
builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{})
|
||||||
|
for _, roleNameToLoad := range append(data.BaseRoles, "targets/other", "targets/other/other") {
|
||||||
|
if roleNameToLoad == data.CanonicalSnapshotRole {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
require.NoError(t, builder.Load(roleNameToLoad, meta[roleNameToLoad], 1, false))
|
||||||
|
}
|
||||||
|
err := builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, data.ErrMissingMeta{}, err)
|
||||||
|
}
|
|
@ -2,36 +2,34 @@ package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/notary"
|
"github.com/docker/notary"
|
||||||
tuf "github.com/docker/notary/tuf"
|
tuf "github.com/docker/notary/tuf"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
|
||||||
"github.com/docker/notary/tuf/store"
|
"github.com/docker/notary/tuf/store"
|
||||||
"github.com/docker/notary/tuf/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client is a usability wrapper around a raw TUF repo
|
// Client is a usability wrapper around a raw TUF repo
|
||||||
type Client struct {
|
type Client struct {
|
||||||
local *tuf.Repo
|
remote store.RemoteStore
|
||||||
remote store.RemoteStore
|
cache store.MetadataStore
|
||||||
cache store.MetadataStore
|
oldBuilder tuf.RepoBuilder
|
||||||
|
newBuilder tuf.RepoBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient initialized a Client with the given repo, remote source of content, and cache
|
// NewClient initialized a Client with the given repo, remote source of content, and cache
|
||||||
func NewClient(local *tuf.Repo, remote store.RemoteStore, cache store.MetadataStore) *Client {
|
func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *Client {
|
||||||
return &Client{
|
return &Client{
|
||||||
local: local,
|
oldBuilder: oldBuilder,
|
||||||
remote: remote,
|
newBuilder: newBuilder,
|
||||||
cache: cache,
|
remote: remote,
|
||||||
|
cache: cache,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update performs an update to the TUF repo as defined by the TUF spec
|
// Update performs an update to the TUF repo as defined by the TUF spec
|
||||||
func (c *Client) Update() error {
|
func (c *Client) Update() (*tuf.Repo, error) {
|
||||||
// 1. Get timestamp
|
// 1. Get timestamp
|
||||||
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
|
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
|
||||||
// 2. Check if local snapshot is up to date
|
// 2. Check if local snapshot is up to date
|
||||||
|
@ -44,503 +42,188 @@ func (c *Client) Update() error {
|
||||||
err := c.update()
|
err := c.update()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
||||||
|
logrus.Debug("Resetting the TUF builder...")
|
||||||
|
|
||||||
|
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
|
||||||
|
|
||||||
if err := c.downloadRoot(); err != nil {
|
if err := c.downloadRoot(); err != nil {
|
||||||
logrus.Debug("Client Update (Root):", err)
|
logrus.Debug("Client Update (Root):", err)
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If we error again, we now have the latest root and just want to fail
|
// If we error again, we now have the latest root and just want to fail
|
||||||
// out as there's no expectation the problem can be resolved automatically
|
// out as there's no expectation the problem can be resolved automatically
|
||||||
logrus.Debug("retrying TUF client update")
|
logrus.Debug("retrying TUF client update")
|
||||||
return c.update()
|
if err := c.update(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return c.newBuilder.Finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) update() error {
|
func (c *Client) update() error {
|
||||||
err := c.downloadTimestamp()
|
if err := c.downloadTimestamp(); err != nil {
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = c.downloadSnapshot()
|
if err := c.downloadSnapshot(); err != nil {
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = c.checkRoot()
|
|
||||||
if err != nil {
|
|
||||||
// In this instance the root has not expired base on time, but is
|
|
||||||
// expired based on the snapshot dictating a new root has been produced.
|
|
||||||
logrus.Debug(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// will always need top level targets at a minimum
|
// will always need top level targets at a minimum
|
||||||
err = c.downloadTargets(data.CanonicalTargetsRole)
|
if err := c.downloadTargets(); err != nil {
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Client Update (Targets): %s", err.Error())
|
logrus.Debugf("Client Update (Targets): %s", err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkRoot determines if the hash, and size are still those reported
|
|
||||||
// in the snapshot file. It will also check the expiry, however, if the
|
|
||||||
// hash and size in snapshot are unchanged but the root file has expired,
|
|
||||||
// there is little expectation that the situation can be remedied.
|
|
||||||
func (c Client) checkRoot() error {
|
|
||||||
role := data.CanonicalRootRole
|
|
||||||
size := c.local.Snapshot.Signed.Meta[role].Length
|
|
||||||
|
|
||||||
expectedHashes := c.local.Snapshot.Signed.Meta[role].Hashes
|
|
||||||
|
|
||||||
raw, err := c.cache.GetMeta(data.CanonicalRootRole, size)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := data.CheckHashes(raw, role, expectedHashes); err != nil {
|
|
||||||
return fmt.Errorf("Cached root hashes did not match snapshot root hashes")
|
|
||||||
}
|
|
||||||
|
|
||||||
if int64(len(raw)) != size {
|
|
||||||
return fmt.Errorf("Cached root size did not match snapshot size")
|
|
||||||
}
|
|
||||||
|
|
||||||
root := &data.SignedRoot{}
|
|
||||||
err = json.Unmarshal(raw, root)
|
|
||||||
if err != nil {
|
|
||||||
return ErrCorruptedCache{file: "root.json"}
|
|
||||||
}
|
|
||||||
|
|
||||||
if signed.IsExpired(root.Signed.Expires) {
|
|
||||||
return tuf.ErrLocalRootExpired{}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadRoot is responsible for downloading the root.json
|
// downloadRoot is responsible for downloading the root.json
|
||||||
func (c *Client) downloadRoot() error {
|
func (c *Client) downloadRoot() error {
|
||||||
logrus.Debug("Downloading Root...")
|
|
||||||
role := data.CanonicalRootRole
|
role := data.CanonicalRootRole
|
||||||
|
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||||
|
|
||||||
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
|
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
|
||||||
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
|
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
|
||||||
var size int64 = -1
|
if !consistentInfo.ChecksumKnown() {
|
||||||
|
logrus.Debugf("Loading root with no expected checksum")
|
||||||
|
|
||||||
// We could not expect what the "snapshot" meta has specified.
|
// get the cached root, if it exists, just for version checking
|
||||||
//
|
cachedRoot, _ := c.cache.GetMeta(role, -1)
|
||||||
// In some old clients, there is only the "sha256",
|
// prefer to download a new root
|
||||||
// but both "sha256" and "sha512" in the newer ones.
|
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot)
|
||||||
//
|
return remoteErr
|
||||||
// And possibly more in the future.
|
|
||||||
var expectedHashes data.Hashes
|
|
||||||
|
|
||||||
if c.local.Snapshot != nil {
|
|
||||||
if prevRootMeta, ok := c.local.Snapshot.Signed.Meta[role]; ok {
|
|
||||||
size = prevRootMeta.Length
|
|
||||||
expectedHashes = prevRootMeta.Hashes
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we're bootstrapping we may not have a cached root, an
|
_, err := c.tryLoadCacheThenRemote(consistentInfo)
|
||||||
// error will result in the "previous root version" being
|
return err
|
||||||
// interpreted as 0.
|
|
||||||
var download bool
|
|
||||||
var err error
|
|
||||||
var cachedRoot []byte
|
|
||||||
old := &data.Signed{}
|
|
||||||
version := 0
|
|
||||||
|
|
||||||
// Due to the same reason, we don't really know how many hashes are there.
|
|
||||||
if len(expectedHashes) != 0 {
|
|
||||||
// can only trust cache if we have an expected sha256(for example) to trust
|
|
||||||
cachedRoot, err = c.cache.GetMeta(role, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cachedRoot == nil || err != nil {
|
|
||||||
logrus.Debug("didn't find a cached root, must download")
|
|
||||||
download = true
|
|
||||||
} else {
|
|
||||||
if err := data.CheckHashes(cachedRoot, role, expectedHashes); err != nil {
|
|
||||||
logrus.Debug("cached root's hash didn't match expected, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(cachedRoot, old)
|
|
||||||
if err == nil {
|
|
||||||
root, err := data.RootFromSigned(old)
|
|
||||||
if err == nil {
|
|
||||||
version = root.Signed.Version
|
|
||||||
} else {
|
|
||||||
logrus.Debug("couldn't parse Signed part of cached root, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Debug("couldn't parse cached root, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var s *data.Signed
|
|
||||||
var raw []byte
|
|
||||||
if download {
|
|
||||||
// use consistent download if we have the checksum.
|
|
||||||
raw, s, err = c.downloadSigned(role, size, expectedHashes)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Debug("using cached root")
|
|
||||||
s = old
|
|
||||||
}
|
|
||||||
if err := c.verifyRoot(role, s, version); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if download {
|
|
||||||
logrus.Debug("caching downloaded root")
|
|
||||||
// Now that we have accepted new root, write it to cache
|
|
||||||
if err = c.cache.SetMeta(role, raw); err != nil {
|
|
||||||
logrus.Errorf("Failed to write root to local cache: %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error {
|
|
||||||
// this will confirm that the root has been signed by the old root role
|
|
||||||
// with the root keys we bootstrapped with.
|
|
||||||
// Still need to determine if there has been a root key update and
|
|
||||||
// confirm signature with new root key
|
|
||||||
logrus.Debug("verifying root with existing keys")
|
|
||||||
rootRole, err := c.local.GetBaseRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("no previous root role loaded")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Verify using the rootRole loaded from the known root.json
|
|
||||||
if err = signed.Verify(s, rootRole, minVersion); err != nil {
|
|
||||||
logrus.Debug("root did not verify with existing keys")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debug("updating known root roles and keys")
|
|
||||||
root, err := data.RootFromSigned(s)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Error(err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// replace the existing root.json with the new one (just in memory, we
|
|
||||||
// have another validation step before we fully accept the new root)
|
|
||||||
err = c.local.SetRoot(root)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Error(err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Verify the new root again having loaded the rootRole out of this new
|
|
||||||
// file (verifies self-referential integrity)
|
|
||||||
// TODO(endophage): be more intelligent and only re-verify if we detect
|
|
||||||
// there has been a change in root keys
|
|
||||||
logrus.Debug("verifying root with updated keys")
|
|
||||||
rootRole, err = c.local.GetBaseRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("root role with new keys not loaded")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = signed.Verify(s, rootRole, minVersion)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("root did not verify with new keys")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debug("successfully verified root")
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadTimestamp is responsible for downloading the timestamp.json
|
// downloadTimestamp is responsible for downloading the timestamp.json
|
||||||
// Timestamps are special in that we ALWAYS attempt to download and only
|
// Timestamps are special in that we ALWAYS attempt to download and only
|
||||||
// use cache if the download fails (and the cache is still valid).
|
// use cache if the download fails (and the cache is still valid).
|
||||||
func (c *Client) downloadTimestamp() error {
|
func (c *Client) downloadTimestamp() error {
|
||||||
logrus.Debug("Downloading Timestamp...")
|
logrus.Debug("Loading timestamp...")
|
||||||
role := data.CanonicalTimestampRole
|
role := data.CanonicalTimestampRole
|
||||||
|
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||||
|
|
||||||
// We may not have a cached timestamp if this is the first time
|
// get the cached timestamp, if it exists
|
||||||
// we're interacting with the repo. This will result in the
|
cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize)
|
||||||
// version being 0
|
// always get the remote timestamp, since it supercedes the local one
|
||||||
var (
|
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
|
||||||
old *data.Signed
|
|
||||||
ts *data.SignedTimestamp
|
|
||||||
version = 0
|
|
||||||
)
|
|
||||||
cachedTS, err := c.cache.GetMeta(role, notary.MaxTimestampSize)
|
|
||||||
if err == nil {
|
|
||||||
cached := &data.Signed{}
|
|
||||||
err := json.Unmarshal(cachedTS, cached)
|
|
||||||
if err == nil {
|
|
||||||
ts, err := data.TimestampFromSigned(cached)
|
|
||||||
if err == nil {
|
|
||||||
version = ts.Signed.Version
|
|
||||||
}
|
|
||||||
old = cached
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// unlike root, targets and snapshot, always try and download timestamps
|
|
||||||
// from remote, only using the cache one if we couldn't reach remote.
|
|
||||||
raw, s, err := c.downloadSigned(role, notary.MaxTimestampSize, nil)
|
|
||||||
if err == nil {
|
|
||||||
ts, err = c.verifyTimestamp(s, version)
|
|
||||||
if err == nil {
|
|
||||||
logrus.Debug("successfully verified downloaded timestamp")
|
|
||||||
c.cache.SetMeta(role, raw)
|
|
||||||
c.local.SetTimestamp(ts)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if old == nil {
|
|
||||||
// couldn't retrieve valid data from server and don't have unmarshallable data in cache.
|
|
||||||
logrus.Debug("no cached timestamp available")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debug(err.Error())
|
|
||||||
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
|
||||||
ts, err = c.verifyTimestamp(old, version)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debug("successfully verified cached timestamp")
|
|
||||||
c.local.SetTimestamp(ts)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifies that a timestamp is valid, and returned the SignedTimestamp object to add to the tuf repo
|
switch {
|
||||||
func (c *Client) verifyTimestamp(s *data.Signed, minVersion int) (*data.SignedTimestamp, error) {
|
case remoteErr == nil:
|
||||||
timestampRole, err := c.local.GetBaseRole(data.CanonicalTimestampRole)
|
return nil
|
||||||
if err != nil {
|
case cachedErr == nil:
|
||||||
logrus.Debug("no timestamp role loaded")
|
logrus.Debug(remoteErr.Error())
|
||||||
return nil, err
|
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
||||||
|
|
||||||
|
err := c.newBuilder.Load(role, cachedTS, 1, false)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debug("successfully verified cached timestamp")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
default:
|
||||||
|
logrus.Debug("no cached or remote timestamp available")
|
||||||
|
return remoteErr
|
||||||
}
|
}
|
||||||
if err := signed.Verify(s, timestampRole, minVersion); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return data.TimestampFromSigned(s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadSnapshot is responsible for downloading the snapshot.json
|
// downloadSnapshot is responsible for downloading the snapshot.json
|
||||||
func (c *Client) downloadSnapshot() error {
|
func (c *Client) downloadSnapshot() error {
|
||||||
logrus.Debug("Downloading Snapshot...")
|
logrus.Debug("Loading snapshot...")
|
||||||
role := data.CanonicalSnapshotRole
|
role := data.CanonicalSnapshotRole
|
||||||
if c.local.Timestamp == nil {
|
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||||
return tuf.ErrNotLoaded{Role: data.CanonicalTimestampRole}
|
|
||||||
}
|
|
||||||
size := c.local.Timestamp.Signed.Meta[role].Length
|
|
||||||
expectedHashes := c.local.Timestamp.Signed.Meta[role].Hashes
|
|
||||||
if len(expectedHashes) == 0 {
|
|
||||||
return data.ErrMissingMeta{Role: data.CanonicalSnapshotRole}
|
|
||||||
}
|
|
||||||
|
|
||||||
var download bool
|
_, err := c.tryLoadCacheThenRemote(consistentInfo)
|
||||||
old := &data.Signed{}
|
return err
|
||||||
version := 0
|
|
||||||
raw, err := c.cache.GetMeta(role, size)
|
|
||||||
if raw == nil || err != nil {
|
|
||||||
logrus.Debug("no snapshot in cache, must download")
|
|
||||||
download = true
|
|
||||||
} else {
|
|
||||||
// file may have been tampered with on disk. Always check the hash!
|
|
||||||
if err := data.CheckHashes(raw, role, expectedHashes); err != nil {
|
|
||||||
logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(raw, old)
|
|
||||||
if err == nil {
|
|
||||||
snap, err := data.SnapshotFromSigned(old)
|
|
||||||
if err == nil {
|
|
||||||
version = snap.Signed.Version
|
|
||||||
} else {
|
|
||||||
logrus.Debug("Could not parse Signed part of snapshot, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Debug("Could not parse snapshot, must download")
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var s *data.Signed
|
|
||||||
if download {
|
|
||||||
raw, s, err = c.downloadSigned(role, size, expectedHashes)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Debug("using cached snapshot")
|
|
||||||
s = old
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshotRole, err := c.local.GetBaseRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debug("no snapshot role loaded")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = signed.Verify(s, snapshotRole, version)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debug("successfully verified snapshot")
|
|
||||||
snap, err := data.SnapshotFromSigned(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.local.SetSnapshot(snap)
|
|
||||||
if download {
|
|
||||||
err = c.cache.SetMeta(role, raw)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadTargets downloads all targets and delegated targets for the repository.
|
// downloadTargets downloads all targets and delegated targets for the repository.
|
||||||
// It uses a pre-order tree traversal as it's necessary to download parents first
|
// It uses a pre-order tree traversal as it's necessary to download parents first
|
||||||
// to obtain the keys to validate children.
|
// to obtain the keys to validate children.
|
||||||
func (c *Client) downloadTargets(role string) error {
|
func (c *Client) downloadTargets() error {
|
||||||
logrus.Debug("Downloading Targets...")
|
toDownload := []data.DelegationRole{{
|
||||||
stack := utils.NewStack()
|
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
||||||
stack.Push(role)
|
Paths: []string{""},
|
||||||
for !stack.Empty() {
|
}}
|
||||||
role, err := stack.PopString()
|
for len(toDownload) > 0 {
|
||||||
if err != nil {
|
role := toDownload[0]
|
||||||
return err
|
toDownload = toDownload[1:]
|
||||||
}
|
|
||||||
if c.local.Snapshot == nil {
|
|
||||||
return tuf.ErrNotLoaded{Role: data.CanonicalSnapshotRole}
|
|
||||||
}
|
|
||||||
snap := c.local.Snapshot.Signed
|
|
||||||
root := c.local.Root.Signed
|
|
||||||
|
|
||||||
s, err := c.getTargetsFile(role, snap.Meta, root.ConsistentSnapshot)
|
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
|
||||||
|
if !consistentInfo.ChecksumKnown() {
|
||||||
|
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
children, err := c.getTargetsFile(role, consistentInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(data.ErrMissingMeta); ok && role != data.CanonicalTargetsRole {
|
if _, ok := err.(data.ErrMissingMeta); ok && role.Name != data.CanonicalTargetsRole {
|
||||||
// if the role meta hasn't been published,
|
// if the role meta hasn't been published,
|
||||||
// that's ok, continue
|
// that's ok, continue
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logrus.Error("Error getting targets file:", err)
|
logrus.Debugf("Error getting %s: %s", role.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t, err := data.TargetsFromSigned(s, role)
|
toDownload = append(children, toDownload...)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = c.local.SetTargets(role, t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// push delegated roles contained in the targets file onto the stack
|
|
||||||
for _, r := range t.Signed.Delegations.Roles {
|
|
||||||
if path.Dir(r.Name) == role {
|
|
||||||
// only load children that are direct 1st generation descendants
|
|
||||||
// of the role we've just downloaded
|
|
||||||
stack.Push(r.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) downloadSigned(role string, size int64, expectedHashes data.Hashes) ([]byte, *data.Signed, error) {
|
func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
||||||
rolePath := utils.ConsistentName(role, expectedHashes["sha256"])
|
logrus.Debugf("Loading %s...", role.Name)
|
||||||
raw, err := c.remote.GetMeta(rolePath, size)
|
tgs := &data.SignedTargets{}
|
||||||
|
|
||||||
|
raw, err := c.tryLoadCacheThenRemote(ci)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if expectedHashes != nil {
|
|
||||||
if err := data.CheckHashes(raw, role, expectedHashes); err != nil {
|
|
||||||
return nil, nil, data.ErrMismatchedChecksum{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &data.Signed{}
|
|
||||||
err = json.Unmarshal(raw, s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return raw, s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent bool) (*data.Signed, error) {
|
|
||||||
// require role exists in snapshots
|
|
||||||
roleMeta, ok := snapshotMeta[role]
|
|
||||||
if !ok {
|
|
||||||
return nil, data.ErrMissingMeta{Role: role}
|
|
||||||
}
|
|
||||||
expectedHashes := snapshotMeta[role].Hashes
|
|
||||||
if len(expectedHashes) == 0 {
|
|
||||||
return nil, data.ErrMissingMeta{Role: role}
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to get meta file from content addressed cache
|
|
||||||
var download bool
|
|
||||||
old := &data.Signed{}
|
|
||||||
version := 0
|
|
||||||
raw, err := c.cache.GetMeta(role, roleMeta.Length)
|
|
||||||
if err != nil || raw == nil {
|
|
||||||
logrus.Debugf("Couldn't not find cached %s, must download", role)
|
|
||||||
download = true
|
|
||||||
} else {
|
|
||||||
// file may have been tampered with on disk. Always check the hash!
|
|
||||||
if err := data.CheckHashes(raw, role, expectedHashes); err != nil {
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(raw, old)
|
|
||||||
if err == nil {
|
|
||||||
targ, err := data.TargetsFromSigned(old, role)
|
|
||||||
if err == nil {
|
|
||||||
version = targ.Signed.Version
|
|
||||||
} else {
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
download = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size := snapshotMeta[role].Length
|
|
||||||
var s *data.Signed
|
|
||||||
if download {
|
|
||||||
raw, s, err = c.downloadSigned(role, size, expectedHashes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Debug("using cached ", role)
|
|
||||||
s = old
|
|
||||||
}
|
|
||||||
var targetOrDelgRole data.BaseRole
|
|
||||||
if data.IsDelegation(role) {
|
|
||||||
delgRole, err := c.local.GetDelegationRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("no %s delegation role loaded", role)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetOrDelgRole = delgRole.BaseRole
|
|
||||||
} else {
|
|
||||||
targetOrDelgRole, err = c.local.GetBaseRole(role)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("no %s role loaded", role)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = signed.Verify(s, targetOrDelgRole, version); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Debugf("successfully verified %s", role)
|
|
||||||
if download {
|
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
|
||||||
// if we error when setting meta, we should continue.
|
// the raw has already been loaded into the builder
|
||||||
err = c.cache.SetMeta(role, raw)
|
json.Unmarshal(raw, tgs)
|
||||||
if err != nil {
|
return tgs.GetValidDelegations(role), nil
|
||||||
logrus.Errorf("Failed to write %s to local cache: %s", role, err.Error())
|
}
|
||||||
}
|
|
||||||
}
|
func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
||||||
return s, nil
|
cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length())
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
||||||
|
return c.tryLoadRemote(consistentInfo, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
|
||||||
|
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
|
||||||
|
return cachedTS, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
|
||||||
|
return c.tryLoadRemote(consistentInfo, cachedTS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
||||||
|
consistentName := consistentInfo.ConsistentName()
|
||||||
|
raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length())
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("error downloading %s: %s", consistentName, err)
|
||||||
|
return old, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to load the old data into the old builder - only use it to validate
|
||||||
|
// versions if it loads successfully. If it errors, then the loaded version
|
||||||
|
// will be 1
|
||||||
|
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
|
||||||
|
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
|
||||||
|
|
||||||
|
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
||||||
|
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
||||||
|
return raw, err
|
||||||
|
}
|
||||||
|
logrus.Debugf("successfully verified downloaded %s", consistentName)
|
||||||
|
if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil {
|
||||||
|
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
||||||
|
}
|
||||||
|
return raw, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,130 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/notary/tuf"
|
|
||||||
"github.com/docker/notary/tuf/testutils"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/docker/notary/tuf/data"
|
|
||||||
"github.com/docker/notary/tuf/store"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestDownloadTargetsNoChecksum: it's never valid to download any targets
|
|
||||||
// role (incl. delegations) when a checksum is not available.
|
|
||||||
func TestDownloadTargetsNoChecksum(t *testing.T) {
|
|
||||||
repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
|
||||||
require.NoError(t, err)
|
|
||||||
localStorage := store.NewMemoryStore(nil)
|
|
||||||
remoteStorage := store.NewMemoryStore(nil)
|
|
||||||
client := NewClient(repo, remoteStorage, localStorage)
|
|
||||||
|
|
||||||
// create and "upload" sample targets
|
|
||||||
signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
orig, err := json.Marshal(signedOrig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = remoteStorage.SetMeta("targets", orig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
delete(repo.Snapshot.Signed.Meta["targets"].Hashes, "sha256")
|
|
||||||
delete(repo.Snapshot.Signed.Meta["targets"].Hashes, "sha512")
|
|
||||||
|
|
||||||
err = client.downloadTargets("targets")
|
|
||||||
require.IsType(t, data.ErrMissingMeta{}, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDownloadTargetsNoSnapshot: it's never valid to download any targets
|
|
||||||
// role (incl. delegations) when a checksum is not available.
|
|
||||||
func TestDownloadTargetsNoSnapshot(t *testing.T) {
|
|
||||||
repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
|
||||||
require.NoError(t, err)
|
|
||||||
localStorage := store.NewMemoryStore(nil)
|
|
||||||
remoteStorage := store.NewMemoryStore(nil)
|
|
||||||
client := NewClient(repo, remoteStorage, localStorage)
|
|
||||||
|
|
||||||
// create and "upload" sample targets
|
|
||||||
signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
orig, err := json.Marshal(signedOrig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = remoteStorage.SetMeta("targets", orig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
repo.Snapshot = nil
|
|
||||||
|
|
||||||
err = client.downloadTargets("targets")
|
|
||||||
require.IsType(t, tuf.ErrNotLoaded{}, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateDownloadRootChecksumNotFound(t *testing.T) {
|
|
||||||
remoteStore := store.NewMemoryStore(nil)
|
|
||||||
repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
|
||||||
require.NoError(t, err)
|
|
||||||
localStorage := store.NewMemoryStore(nil)
|
|
||||||
client := NewClient(repo, remoteStore, localStorage)
|
|
||||||
|
|
||||||
// sign snapshot to make sure we have current checksum for root
|
|
||||||
_, err = repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// sign and "upload" sample root
|
|
||||||
signedOrig, err := repo.SignRoot(data.DefaultExpires("root"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
orig, err := json.Marshal(signedOrig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = remoteStore.SetMeta("root", orig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// don't sign snapshot again to ensure checksum is out of date (bad)
|
|
||||||
|
|
||||||
err = client.downloadRoot()
|
|
||||||
require.IsType(t, store.ErrMetaNotFound{}, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDownloadSnapshotNoTimestamp(t *testing.T) {
|
|
||||||
repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
|
||||||
require.NoError(t, err)
|
|
||||||
localStorage := store.NewMemoryStore(nil)
|
|
||||||
remoteStorage := store.NewMemoryStore(nil)
|
|
||||||
client := NewClient(repo, remoteStorage, localStorage)
|
|
||||||
|
|
||||||
// create and "upload" sample snapshot and timestamp
|
|
||||||
signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
orig, err := json.Marshal(signedOrig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = remoteStorage.SetMeta("snapshot", orig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
repo.Timestamp = nil
|
|
||||||
|
|
||||||
err = client.downloadSnapshot()
|
|
||||||
require.IsType(t, tuf.ErrNotLoaded{}, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDownloadSnapshotNoChecksum: It should never be valid to download a
|
|
||||||
// snapshot if we don't have a checksum
|
|
||||||
func TestDownloadSnapshotNoChecksum(t *testing.T) {
|
|
||||||
repo, _, err := testutils.EmptyRepo("docker.com/notary")
|
|
||||||
require.NoError(t, err)
|
|
||||||
localStorage := store.NewMemoryStore(nil)
|
|
||||||
remoteStorage := store.NewMemoryStore(nil)
|
|
||||||
client := NewClient(repo, remoteStorage, localStorage)
|
|
||||||
|
|
||||||
// create and "upload" sample snapshot and timestamp
|
|
||||||
signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
orig, err := json.Marshal(signedOrig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = remoteStorage.SetMeta("snapshot", orig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
delete(repo.Timestamp.Signed.Meta["snapshot"].Hashes, "sha256")
|
|
||||||
delete(repo.Timestamp.Signed.Meta["snapshot"].Hashes, "sha512")
|
|
||||||
|
|
||||||
err = client.downloadSnapshot()
|
|
||||||
require.IsType(t, data.ErrMissingMeta{}, err)
|
|
||||||
}
|
|
|
@ -31,9 +31,9 @@ func isValidRootStructure(r Root) error {
|
||||||
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
|
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Version < 0 {
|
if r.Version < 1 {
|
||||||
return ErrInvalidMetadata{
|
return ErrInvalidMetadata{
|
||||||
role: CanonicalRootRole, msg: "version cannot be negative"}
|
role: CanonicalRootRole, msg: "version cannot be less than 1"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// all the base roles MUST appear in the root.json - other roles are allowed,
|
// all the base roles MUST appear in the root.json - other roles are allowed,
|
||||||
|
|
|
@ -225,4 +225,12 @@ func TestRootFromSignedValidatesVersion(t *testing.T) {
|
||||||
root.Signed.Version = -1
|
root.Signed.Version = -1
|
||||||
_, err := rootToSignedAndBack(t, root)
|
_, err := rootToSignedAndBack(t, root)
|
||||||
require.IsType(t, ErrInvalidMetadata{}, err)
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
root.Signed.Version = 0
|
||||||
|
_, err = rootToSignedAndBack(t, root)
|
||||||
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
root.Signed.Version = 1
|
||||||
|
_, err = rootToSignedAndBack(t, root)
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,19 +22,19 @@ type Snapshot struct {
|
||||||
Meta Files `json:"meta"`
|
Meta Files `json:"meta"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValidSnapshotStructure returns an error, or nil, depending on whether the content of the
|
// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
|
||||||
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
|
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
|
||||||
// the metadata content is valid.
|
// the metadata content is valid.
|
||||||
func isValidSnapshotStructure(s Snapshot) error {
|
func IsValidSnapshotStructure(s Snapshot) error {
|
||||||
expectedType := TUFTypes[CanonicalSnapshotRole]
|
expectedType := TUFTypes[CanonicalSnapshotRole]
|
||||||
if s.Type != expectedType {
|
if s.Type != expectedType {
|
||||||
return ErrInvalidMetadata{
|
return ErrInvalidMetadata{
|
||||||
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
|
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Version < 0 {
|
if s.Version < 1 {
|
||||||
return ErrInvalidMetadata{
|
return ErrInvalidMetadata{
|
||||||
role: CanonicalSnapshotRole, msg: "version cannot be negative"}
|
role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} {
|
for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} {
|
||||||
|
@ -157,7 +157,7 @@ func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
|
||||||
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
|
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := isValidSnapshotStructure(sp); err != nil {
|
if err := IsValidSnapshotStructure(sp); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sigs := make([]Signature, len(s.Signatures))
|
sigs := make([]Signature, len(s.Signatures))
|
||||||
|
|
|
@ -200,6 +200,14 @@ func TestSnapshotFromSignedValidatesVersion(t *testing.T) {
|
||||||
sn.Signed.Version = -1
|
sn.Signed.Version = -1
|
||||||
_, err := snapshotToSignedAndBack(t, sn)
|
_, err := snapshotToSignedAndBack(t, sn)
|
||||||
require.IsType(t, ErrInvalidMetadata{}, err)
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
sn.Signed.Version = 0
|
||||||
|
_, err = snapshotToSignedAndBack(t, sn)
|
||||||
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
sn.Signed.Version = 1
|
||||||
|
_, err = snapshotToSignedAndBack(t, sn)
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMeta returns the checksum, or an error if it is missing.
|
// GetMeta returns the checksum, or an error if it is missing.
|
||||||
|
|
|
@ -38,8 +38,8 @@ func isValidTargetsStructure(t Targets, roleName string) error {
|
||||||
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Version < 0 {
|
if t.Version < 1 {
|
||||||
return ErrInvalidMetadata{role: roleName, msg: "version cannot be negative"}
|
return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, roleObj := range t.Delegations.Roles {
|
for _, roleObj := range t.Delegations.Roles {
|
||||||
|
|
|
@ -236,4 +236,16 @@ func TestTargetsFromSignedValidatesVersion(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = TargetsFromSigned(s, "targets/a")
|
_, err = TargetsFromSigned(s, "targets/a")
|
||||||
require.IsType(t, ErrInvalidMetadata{}, err)
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
tg.Signed.Version = 0
|
||||||
|
s, err = tg.ToSigned()
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = TargetsFromSigned(s, "targets/a")
|
||||||
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
tg.Signed.Version = 1
|
||||||
|
s, err = tg.ToSigned()
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = TargetsFromSigned(s, "targets/a")
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,19 +21,19 @@ type Timestamp struct {
|
||||||
Meta Files `json:"meta"`
|
Meta Files `json:"meta"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
|
// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
|
||||||
// is valid for timestamp metadata. This does not check signatures or expiry, just that
|
// is valid for timestamp metadata. This does not check signatures or expiry, just that
|
||||||
// the metadata content is valid.
|
// the metadata content is valid.
|
||||||
func isValidTimestampStructure(t Timestamp) error {
|
func IsValidTimestampStructure(t Timestamp) error {
|
||||||
expectedType := TUFTypes[CanonicalTimestampRole]
|
expectedType := TUFTypes[CanonicalTimestampRole]
|
||||||
if t.Type != expectedType {
|
if t.Type != expectedType {
|
||||||
return ErrInvalidMetadata{
|
return ErrInvalidMetadata{
|
||||||
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Version < 0 {
|
if t.Version < 1 {
|
||||||
return ErrInvalidMetadata{
|
return ErrInvalidMetadata{
|
||||||
role: CanonicalTimestampRole, msg: "version cannot be negative"}
|
role: CanonicalTimestampRole, msg: "version cannot be less than one"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Meta is a map of FileMeta, so if the role isn't in the map it returns
|
// Meta is a map of FileMeta, so if the role isn't in the map it returns
|
||||||
|
@ -124,7 +124,7 @@ func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
|
||||||
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
|
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := isValidTimestampStructure(ts); err != nil {
|
if err := IsValidTimestampStructure(ts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sigs := make([]Signature, len(s.Signatures))
|
sigs := make([]Signature, len(s.Signatures))
|
||||||
|
|
|
@ -200,6 +200,14 @@ func TestTimestampFromSignedValidatesVersion(t *testing.T) {
|
||||||
ts.Signed.Version = -1
|
ts.Signed.Version = -1
|
||||||
_, err := timestampToSignedAndBack(t, ts)
|
_, err := timestampToSignedAndBack(t, ts)
|
||||||
require.IsType(t, ErrInvalidMetadata{}, err)
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
ts.Signed.Version = 0
|
||||||
|
_, err = timestampToSignedAndBack(t, ts)
|
||||||
|
require.IsType(t, ErrInvalidMetadata{}, err)
|
||||||
|
|
||||||
|
ts.Signed.Version = 1
|
||||||
|
_, err = timestampToSignedAndBack(t, ts)
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSnapshot returns the snapshot checksum, or an error if it is missing.
|
// GetSnapshot returns the snapshot checksum, or an error if it is missing.
|
||||||
|
|
|
@ -44,10 +44,15 @@ func (e ErrLowVersion) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
|
// ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
|
||||||
type ErrRoleThreshold struct{}
|
type ErrRoleThreshold struct {
|
||||||
|
Msg string
|
||||||
|
}
|
||||||
|
|
||||||
func (e ErrRoleThreshold) Error() string {
|
func (e ErrRoleThreshold) Error() string {
|
||||||
return "valid signatures did not meet threshold"
|
if e.Msg == "" {
|
||||||
|
return "valid signatures did not meet threshold"
|
||||||
|
}
|
||||||
|
return e.Msg
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidKeyType indicates the types for the key and signature it's associated with are
|
// ErrInvalidKeyType indicates the types for the key and signature it's associated with are
|
||||||
|
|
|
@ -21,39 +21,28 @@ var (
|
||||||
ErrWrongType = errors.New("tuf: meta file has wrong type")
|
ErrWrongType = errors.New("tuf: meta file has wrong type")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Verify checks the signatures and metadata (expiry, version) for the signed role
|
|
||||||
// data
|
|
||||||
func Verify(s *data.Signed, role data.BaseRole, minVersion int) error {
|
|
||||||
if err := verifyMeta(s, role.Name, minVersion); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return VerifySignatures(s, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyMeta(s *data.Signed, role string, minVersion int) error {
|
|
||||||
sm := &data.SignedCommon{}
|
|
||||||
if err := json.Unmarshal(*s.Signed, sm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !data.ValidTUFType(sm.Type, role) {
|
|
||||||
return ErrWrongType
|
|
||||||
}
|
|
||||||
if IsExpired(sm.Expires) {
|
|
||||||
logrus.Errorf("Metadata for %s expired", role)
|
|
||||||
return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
|
|
||||||
}
|
|
||||||
if sm.Version < minVersion {
|
|
||||||
return ErrLowVersion{sm.Version, minVersion}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired checks if the given time passed before the present time
|
// IsExpired checks if the given time passed before the present time
|
||||||
func IsExpired(t time.Time) bool {
|
func IsExpired(t time.Time) bool {
|
||||||
return t.Before(time.Now())
|
return t.Before(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerifyExpiry returns ErrExpired if the metadata is expired
|
||||||
|
func VerifyExpiry(s *data.SignedCommon, role string) error {
|
||||||
|
if IsExpired(s.Expires) {
|
||||||
|
logrus.Errorf("Metadata for %s expired", role)
|
||||||
|
return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyVersion returns ErrLowVersion if the metadata version is lower than the min version
|
||||||
|
func VerifyVersion(s *data.SignedCommon, minVersion int) error {
|
||||||
|
if s.Version < minVersion {
|
||||||
|
return ErrLowVersion{Actual: s.Version, Current: minVersion}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// VerifySignatures checks the we have sufficient valid signatures for the given role
|
// VerifySignatures checks the we have sufficient valid signatures for the given role
|
||||||
func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
||||||
if len(s.Signatures) == 0 {
|
if len(s.Signatures) == 0 {
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package signed
|
package signed
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go/canonical/json"
|
"github.com/docker/go/canonical/json"
|
||||||
|
"github.com/docker/notary"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
|
@ -23,8 +23,8 @@ func TestRoleNoKeys(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k}, 1, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k}, 1, nil))
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.IsType(t, ErrRoleThreshold{}, err)
|
require.IsType(t, ErrRoleThreshold{}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,11 +40,45 @@ func TestNotEnoughSigs(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k}, 1, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k}, 1, nil))
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.IsType(t, ErrRoleThreshold{}, err)
|
require.IsType(t, ErrRoleThreshold{}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNoSigs(t *testing.T) {
|
||||||
|
cs := NewEd25519()
|
||||||
|
k, err := cs.Create("root", "", data.ED25519Key)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roleWithKeys := data.BaseRole{Name: "root", Keys: data.Keys{k.ID(): k}, Threshold: 2}
|
||||||
|
|
||||||
|
meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")}
|
||||||
|
|
||||||
|
b, err := json.MarshalCanonical(meta)
|
||||||
|
require.NoError(t, err)
|
||||||
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
|
require.Equal(t, ErrNoSignatures, VerifySignatures(s, roleWithKeys))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExactlyEnoughSigs(t *testing.T) {
|
||||||
|
cs := NewEd25519()
|
||||||
|
k, err := cs.Create(data.CanonicalRootRole, "", data.ED25519Key)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roleWithKeys := data.BaseRole{
|
||||||
|
Name: data.CanonicalRootRole, Keys: data.Keys{k.ID(): k}, Threshold: 1}
|
||||||
|
|
||||||
|
meta := &data.SignedCommon{Type: data.TUFTypes[data.CanonicalRootRole], Version: 1,
|
||||||
|
Expires: data.DefaultExpires(data.CanonicalRootRole)}
|
||||||
|
|
||||||
|
b, err := json.MarshalCanonical(meta)
|
||||||
|
require.NoError(t, err)
|
||||||
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k}, 1, nil))
|
||||||
|
require.Equal(t, 1, len(s.Signatures))
|
||||||
|
|
||||||
|
require.NoError(t, VerifySignatures(s, roleWithKeys))
|
||||||
|
}
|
||||||
|
|
||||||
func TestMoreThanEnoughSigs(t *testing.T) {
|
func TestMoreThanEnoughSigs(t *testing.T) {
|
||||||
cs := NewEd25519()
|
cs := NewEd25519()
|
||||||
k1, err := cs.Create("root", "", data.ED25519Key)
|
k1, err := cs.Create("root", "", data.ED25519Key)
|
||||||
|
@ -58,10 +92,10 @@ func TestMoreThanEnoughSigs(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k1, k2}, 2, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k1, k2}, 2, nil))
|
||||||
require.Equal(t, 2, len(s.Signatures))
|
require.Equal(t, 2, len(s.Signatures))
|
||||||
|
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,10 +110,10 @@ func TestValidSigWithIncorrectKeyID(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k1}, 1, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k1}, 1, nil))
|
||||||
require.Equal(t, 1, len(s.Signatures))
|
require.Equal(t, 1, len(s.Signatures))
|
||||||
s.Signatures[0].KeyID = "invalidIDA"
|
s.Signatures[0].KeyID = "invalidIDA"
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.IsType(t, ErrInvalidKeyID{}, err)
|
require.IsType(t, ErrInvalidKeyID{}, err)
|
||||||
}
|
}
|
||||||
|
@ -95,9 +129,9 @@ func TestDuplicateSigs(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k}, 1, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k}, 1, nil))
|
||||||
s.Signatures = append(s.Signatures, s.Signatures[0])
|
s.Signatures = append(s.Signatures, s.Signatures[0])
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.IsType(t, ErrRoleThreshold{}, err)
|
require.IsType(t, ErrRoleThreshold{}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,101 +148,28 @@ func TestUnknownKeyBelowThreshold(t *testing.T) {
|
||||||
b, err := json.MarshalCanonical(meta)
|
b, err := json.MarshalCanonical(meta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
||||||
Sign(cs, s, []data.PublicKey{k, unknown}, 2, nil)
|
require.NoError(t, Sign(cs, s, []data.PublicKey{k, unknown}, 2, nil))
|
||||||
s.Signatures = append(s.Signatures)
|
s.Signatures = append(s.Signatures)
|
||||||
err = Verify(s, roleWithKeys, 1)
|
err = VerifySignatures(s, roleWithKeys)
|
||||||
require.IsType(t, ErrRoleThreshold{}, err)
|
require.IsType(t, ErrRoleThreshold{}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test(t *testing.T) {
|
func TestVerifyVersion(t *testing.T) {
|
||||||
cryptoService := NewEd25519()
|
tufType := data.TUFTypes[data.CanonicalRootRole]
|
||||||
type test struct {
|
meta := data.SignedCommon{Type: tufType, Version: 1, Expires: data.DefaultExpires(data.CanonicalRootRole)}
|
||||||
name string
|
require.Equal(t, ErrLowVersion{Actual: 1, Current: 2}, VerifyVersion(&meta, 2))
|
||||||
roleData data.BaseRole
|
require.NoError(t, VerifyVersion(&meta, 1))
|
||||||
s *data.Signed
|
|
||||||
ver int
|
|
||||||
exp *time.Time
|
|
||||||
typ string
|
|
||||||
role string
|
|
||||||
err error
|
|
||||||
mut func(*test)
|
|
||||||
}
|
|
||||||
|
|
||||||
expiredTime := time.Now().Add(-time.Hour)
|
|
||||||
minVer := 10
|
|
||||||
tests := []test{
|
|
||||||
{
|
|
||||||
name: "no signatures",
|
|
||||||
mut: func(t *test) { t.s.Signatures = []data.Signature{} },
|
|
||||||
err: ErrNoSignatures,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unknown role",
|
|
||||||
role: "foo",
|
|
||||||
err: errors.New("tuf: meta file has wrong type"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "exactly enough signatures",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wrong type",
|
|
||||||
typ: "bar",
|
|
||||||
err: ErrWrongType,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "low version",
|
|
||||||
ver: minVer - 1,
|
|
||||||
err: ErrLowVersion{minVer - 1, minVer},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: "root",
|
|
||||||
name: "expired",
|
|
||||||
exp: &expiredTime,
|
|
||||||
err: ErrExpired{"root", expiredTime.Format("Mon Jan 2 15:04:05 MST 2006")},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, run := range tests {
|
|
||||||
if run.role == "" {
|
|
||||||
run.role = "root"
|
|
||||||
}
|
|
||||||
if run.ver == 0 {
|
|
||||||
run.ver = minVer
|
|
||||||
}
|
|
||||||
if run.exp == nil {
|
|
||||||
expires := time.Now().Add(time.Hour)
|
|
||||||
run.exp = &expires
|
|
||||||
}
|
|
||||||
if run.typ == "" {
|
|
||||||
run.typ = data.TUFTypes[run.role]
|
|
||||||
}
|
|
||||||
if run.s == nil {
|
|
||||||
k, _ := cryptoService.Create("root", "", data.ED25519Key)
|
|
||||||
run.roleData = data.BaseRole{Name: "root", Keys: data.Keys{k.ID(): k}, Threshold: 1}
|
|
||||||
meta := &data.SignedCommon{Type: run.typ, Version: run.ver, Expires: *run.exp}
|
|
||||||
|
|
||||||
b, err := json.MarshalCanonical(meta)
|
|
||||||
require.NoError(t, err)
|
|
||||||
s := &data.Signed{Signed: (*json.RawMessage)(&b)}
|
|
||||||
Sign(cryptoService, s, []data.PublicKey{k}, 1, nil)
|
|
||||||
run.s = s
|
|
||||||
}
|
|
||||||
if run.mut != nil {
|
|
||||||
run.mut(&run)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := Verify(run.s, run.roleData, minVer)
|
|
||||||
if e, ok := run.err.(ErrExpired); ok {
|
|
||||||
requireErrExpired(t, err, e)
|
|
||||||
} else {
|
|
||||||
require.Equal(t, run.err, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func requireErrExpired(t *testing.T, err error, expected ErrExpired) {
|
func TestVerifyExpiry(t *testing.T) {
|
||||||
actual, ok := err.(ErrExpired)
|
tufType := data.TUFTypes[data.CanonicalRootRole]
|
||||||
if !ok {
|
notExpired := data.DefaultExpires(data.CanonicalRootRole)
|
||||||
t.Fatalf("expected err to have type ErrExpired, got %T", err)
|
expired := time.Now().Add(-1 * notary.Year)
|
||||||
}
|
|
||||||
require.Equal(t, actual.Expired, expected.Expired)
|
require.NoError(t, VerifyExpiry(
|
||||||
|
&data.SignedCommon{Type: tufType, Version: 1, Expires: notExpired}, data.CanonicalRootRole))
|
||||||
|
err := VerifyExpiry(
|
||||||
|
&data.SignedCommon{Type: tufType, Version: 1, Expires: expired}, data.CanonicalRootRole)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.IsType(t, ErrExpired{}, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go/canonical/json"
|
"github.com/docker/go/canonical/json"
|
||||||
|
@ -13,6 +14,7 @@ import (
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/utils"
|
"github.com/docker/notary/tuf/utils"
|
||||||
fuzz "github.com/google/gofuzz"
|
fuzz "github.com/google/gofuzz"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
tuf "github.com/docker/notary/tuf"
|
tuf "github.com/docker/notary/tuf"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
|
@ -52,6 +54,19 @@ func CreateKey(cs signed.CryptoService, gun, role, keyAlgorithm string) (data.Pu
|
||||||
return key, nil
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyKeys copies keys of a particular role to a new cryptoservice, and returns that cryptoservice
|
||||||
|
func CopyKeys(t *testing.T, from signed.CryptoService, roles ...string) signed.CryptoService {
|
||||||
|
memKeyStore := trustmanager.NewKeyMemoryStore(passphrase.ConstantRetriever("pass"))
|
||||||
|
for _, role := range roles {
|
||||||
|
for _, keyID := range from.ListKeys(role) {
|
||||||
|
key, _, err := from.GetPrivateKey(keyID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
memKeyStore.AddKey(trustmanager.KeyInfo{Role: role}, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cryptoservice.NewCryptoService(memKeyStore)
|
||||||
|
}
|
||||||
|
|
||||||
// EmptyRepo creates an in memory crypto service
|
// EmptyRepo creates an in memory crypto service
|
||||||
// and initializes a repo with no targets. Delegations are only created
|
// and initializes a repo with no targets. Delegations are only created
|
||||||
// if delegation roles are passed in.
|
// if delegation roles are passed in.
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/notary/tuf"
|
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/docker/notary/tuf/signed"
|
"github.com/docker/notary/tuf/signed"
|
||||||
"github.com/docker/notary/tuf/store"
|
"github.com/docker/notary/tuf/store"
|
||||||
|
@ -387,8 +386,6 @@ func TestSwizzlerChangeRootKey(t *testing.T) {
|
||||||
err := f.ChangeRootKey()
|
err := f.ChangeRootKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tufRepo := tuf.NewRepo(f.CryptoService)
|
|
||||||
|
|
||||||
// we want to test these in a specific order
|
// we want to test these in a specific order
|
||||||
roles := []string{data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole,
|
roles := []string{data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole,
|
||||||
data.CanonicalTimestampRole, "targets/a", "targets/a/b"}
|
data.CanonicalTimestampRole, "targets/a", "targets/a/b"}
|
||||||
|
@ -408,25 +405,23 @@ func TestSwizzlerChangeRootKey(t *testing.T) {
|
||||||
|
|
||||||
require.NotEqual(t, len(origRoot.Signed.Keys), len(newRoot.Signed.Keys))
|
require.NotEqual(t, len(origRoot.Signed.Keys), len(newRoot.Signed.Keys))
|
||||||
|
|
||||||
var rootRole data.Role
|
|
||||||
for r, origRole := range origRoot.Signed.Roles {
|
for r, origRole := range origRoot.Signed.Roles {
|
||||||
newRole := newRoot.Signed.Roles[r]
|
newRole := newRoot.Signed.Roles[r]
|
||||||
require.Len(t, origRole.KeyIDs, 1)
|
require.Len(t, origRole.KeyIDs, 1)
|
||||||
require.Len(t, newRole.KeyIDs, 1)
|
require.Len(t, newRole.KeyIDs, 1)
|
||||||
if r == data.CanonicalRootRole {
|
if r == data.CanonicalRootRole {
|
||||||
require.NotEqual(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
|
require.NotEqual(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
|
||||||
rootRole = data.Role{RootRole: *newRole, Name: data.CanonicalRootRole}
|
|
||||||
} else {
|
} else {
|
||||||
require.Equal(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
|
require.Equal(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, tufRepo.SetRoot(newRoot))
|
rootRole, err := newRoot.BuildBaseRole(data.CanonicalRootRole)
|
||||||
|
require.NoError(t, err)
|
||||||
signedThing, err := newRoot.ToSigned()
|
signedThing, err := newRoot.ToSigned()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
newKey := newRoot.Signed.Keys[rootRole.KeyIDs[0]]
|
require.NoError(t, signed.VerifySignatures(signedThing, rootRole))
|
||||||
require.NoError(t, signed.Verify(signedThing,
|
require.NoError(t, signed.VerifyVersion(&(newRoot.Signed.SignedCommon), 1))
|
||||||
data.BaseRole{Name: data.CanonicalRootRole, Keys: map[string]data.PublicKey{newKey.ID(): newKey}, Threshold: 1}, 1))
|
|
||||||
default:
|
default:
|
||||||
require.True(t, bytes.Equal(origMeta, newMeta), "bytes have changed for role %s", role)
|
require.True(t, bytes.Equal(origMeta, newMeta), "bytes have changed for role %s", role)
|
||||||
}
|
}
|
||||||
|
|
31
tuf/tuf.go
31
tuf/tuf.go
|
@ -549,37 +549,6 @@ func (tr *Repo) InitTimestamp() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRoot sets the Repo.Root field to the SignedRoot object.
|
|
||||||
func (tr *Repo) SetRoot(s *data.SignedRoot) error {
|
|
||||||
tr.Root = s
|
|
||||||
var err error
|
|
||||||
// originalRootRole is the root role prior to any mutations that might
|
|
||||||
// occur on tr.Root.
|
|
||||||
tr.originalRootRole, err = tr.Root.BuildBaseRole(data.CanonicalRootRole)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTimestamp parses the Signed object into a SignedTimestamp object
|
|
||||||
// and sets the Repo.Timestamp field.
|
|
||||||
func (tr *Repo) SetTimestamp(s *data.SignedTimestamp) error {
|
|
||||||
tr.Timestamp = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSnapshot parses the Signed object into a SignedSnapshots object
|
|
||||||
// and sets the Repo.Snapshot field.
|
|
||||||
func (tr *Repo) SetSnapshot(s *data.SignedSnapshot) error {
|
|
||||||
tr.Snapshot = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTargets sets the SignedTargets object agaist the role in the
|
|
||||||
// Repo.Targets map.
|
|
||||||
func (tr *Repo) SetTargets(role string, s *data.SignedTargets) error {
|
|
||||||
tr.Targets[role] = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TargetMeta returns the FileMeta entry for the given path in the
|
// TargetMeta returns the FileMeta entry for the given path in the
|
||||||
// targets file associated with the given role. This may be nil if
|
// targets file associated with the given role. This may be nil if
|
||||||
// the target isn't found in the targets file.
|
// the target isn't found in the targets file.
|
||||||
|
|
|
@ -1248,8 +1248,8 @@ func TestSignRootOldKeyCertMissing(t *testing.T) {
|
||||||
// Now forget all about the old certificate: drop it from the Root carried keys
|
// Now forget all about the old certificate: drop it from the Root carried keys
|
||||||
delete(repo.Root.Signed.Keys, oldRootCertKey.ID())
|
delete(repo.Root.Signed.Keys, oldRootCertKey.ID())
|
||||||
repo2 := NewRepo(cs)
|
repo2 := NewRepo(cs)
|
||||||
err = repo2.SetRoot(repo.Root)
|
repo2.Root = repo.Root
|
||||||
require.NoError(t, err)
|
repo2.originalRootRole = updatedRootRole
|
||||||
|
|
||||||
// Create a second signature
|
// Create a second signature
|
||||||
signedRoot, err = repo2.SignRoot(data.DefaultExpires(data.CanonicalRootRole))
|
signedRoot, err = repo2.SignRoot(data.DefaultExpires(data.CanonicalRootRole))
|
||||||
|
|
Loading…
Reference in New Issue