Verify layer tarstream

This adds verification for getting layer data out
of layerstore. These failures should only be possible
if layer metadata files have been manually changed
of if something is wrong with tar-split algorithm.

Failing early makes sure we don’t upload invalid data
to the registries where it would fail after someone
tries to pull it.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit e29e580f7fe628e936925681a4885d0b655bb151)
This commit is contained in:
Tonis Tiigi 2016-02-09 11:38:37 -08:00 committed by Tibor Vass
parent 46e41dc2f8
commit 50a498ea1c
5 changed files with 126 additions and 14 deletions

View File

@ -6,6 +6,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
@ -56,7 +57,7 @@ func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) {
} }
} }
func newTestStore(t *testing.T) (Store, func()) { func newTestStore(t *testing.T) (Store, string, func()) {
td, err := ioutil.TempDir("", "layerstore-") td, err := ioutil.TempDir("", "layerstore-")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -72,7 +73,7 @@ func newTestStore(t *testing.T) (Store, func()) {
t.Fatal(err) t.Fatal(err)
} }
return ls, func() { return ls, td, func() {
graphcleanup() graphcleanup()
os.RemoveAll(td) os.RemoveAll(td)
} }
@ -265,7 +266,7 @@ func assertLayerEqual(t *testing.T, l1, l2 Layer) {
} }
func TestMountAndRegister(t *testing.T) { func TestMountAndRegister(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644))
@ -306,7 +307,7 @@ func TestMountAndRegister(t *testing.T) {
} }
func TestLayerRelease(t *testing.T) { func TestLayerRelease(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@ -351,7 +352,7 @@ func TestLayerRelease(t *testing.T) {
} }
func TestStoreRestore(t *testing.T) { func TestStoreRestore(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@ -472,7 +473,7 @@ func TestStoreRestore(t *testing.T) {
} }
func TestTarStreamStability(t *testing.T) { func TestTarStreamStability(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
files1 := []FileApplier{ files1 := []FileApplier{
@ -668,7 +669,7 @@ func assertActivityCount(t *testing.T, l RWLayer, expected int) {
} }
func TestRegisterExistingLayer(t *testing.T) { func TestRegisterExistingLayer(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
baseFiles := []FileApplier{ baseFiles := []FileApplier{
@ -702,3 +703,69 @@ func TestRegisterExistingLayer(t *testing.T) {
assertReferences(t, layer2a, layer2b) assertReferences(t, layer2a, layer2b)
} }
func TestTarStreamVerification(t *testing.T) {
ls, tmpdir, cleanup := newTestStore(t)
defer cleanup()
files1 := []FileApplier{
newTestFile("/foo", []byte("abc"), 0644),
newTestFile("/bar", []byte("def"), 0644),
}
files2 := []FileApplier{
newTestFile("/foo", []byte("abc"), 0644),
newTestFile("/bar", []byte("def"), 0600), // different perm
}
tar1, err := tarFromFiles(files1...)
if err != nil {
t.Fatal(err)
}
tar2, err := tarFromFiles(files2...)
if err != nil {
t.Fatal(err)
}
layer1, err := ls.Register(bytes.NewReader(tar1), "")
if err != nil {
t.Fatal(err)
}
layer2, err := ls.Register(bytes.NewReader(tar2), "")
if err != nil {
t.Fatal(err)
}
id1 := digest.Digest(layer1.ChainID())
id2 := digest.Digest(layer2.ChainID())
// Replace tar data files
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatal(err)
}
src.Close()
dst.Close()
ts, err := layer2.TarStream()
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, ts)
if err == nil {
t.Fatal("expected data verification to fail")
}
if !strings.Contains(err.Error(), "could not verify layer data") {
t.Fatalf("wrong error returned from tarstream: %q", err)
}
}

View File

@ -16,7 +16,7 @@ func graphDiffSize(ls Store, l Layer) (int64, error) {
// Unix as Windows graph driver does not support Changes which is indirectly // Unix as Windows graph driver does not support Changes which is indirectly
// invoked by calling DiffSize on the driver // invoked by calling DiffSize on the driver
func TestLayerSize(t *testing.T) { func TestLayerSize(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
content1 := []byte("Base contents") content1 := []byte("Base contents")

View File

@ -268,7 +268,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
} }
func TestMountMigration(t *testing.T) { func TestMountMigration(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
baseFiles := []FileApplier{ baseFiles := []FileApplier{

View File

@ -11,7 +11,7 @@ import (
) )
func TestMountInit(t *testing.T) { func TestMountInit(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
@ -63,7 +63,7 @@ func TestMountInit(t *testing.T) {
} }
func TestMountSize(t *testing.T) { func TestMountSize(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
content1 := []byte("Base contents") content1 := []byte("Base contents")
@ -105,7 +105,7 @@ func TestMountSize(t *testing.T) {
} }
func TestMountChanges(t *testing.T) { func TestMountChanges(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
basefiles := []FileApplier{ basefiles := []FileApplier{

View File

@ -1,6 +1,11 @@
package layer package layer
import "io" import (
"fmt"
"io"
"github.com/docker/distribution/digest"
)
type roLayer struct { type roLayer struct {
chainID ChainID chainID ChainID
@ -29,7 +34,11 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
pw.Close() pw.Close()
} }
}() }()
return pr, nil rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
if err != nil {
return nil, err
}
return rc, nil
} }
func (rl *roLayer) ChainID() ChainID { func (rl *roLayer) ChainID() ChainID {
@ -117,3 +126,39 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error {
return nil return nil
} }
func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
return nil, err
}
return &verifiedReadCloser{
rc: rc,
dgst: dgst,
verifier: verifier,
}, nil
}
type verifiedReadCloser struct {
rc io.ReadCloser
dgst digest.Digest
verifier digest.Verifier
}
func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
n, err = vrc.rc.Read(p)
if n > 0 {
if n, err := vrc.verifier.Write(p[:n]); err != nil {
return n, err
}
}
if err == io.EOF {
if !vrc.verifier.Verified() {
err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
}
}
return
}
func (vrc *verifiedReadCloser) Close() error {
return vrc.rc.Close()
}