chunked: estargz support
in addition to zstd:chunked, add support for the estargz format. estargz is maintained at github.com/containerd/stargz-snapshotter Images using estargz can be used on old clients and registries that have no support for the zstd compression algorithm. Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
f199da6df4
commit
db11cbef4f
|
|
@ -6,6 +6,7 @@ require (
|
|||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/Microsoft/go-winio v0.5.0
|
||||
github.com/Microsoft/hcsshim v0.8.20
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.7.0
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/google/go-intervals v0.0.2
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
|
|
|
|||
|
|
@ -164,6 +164,8 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ
|
|||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.7.0 h1:1d/rydzTywc76lnjJb6qbPCiTiCwts49AzKps/Ecblw=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
|
|
@ -388,6 +390,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
|
|
@ -711,6 +714,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
|
|||
|
|
@ -1,14 +1,18 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
|
|
@ -50,25 +54,129 @@ func isZstdChunkedFrameMagic(data []byte) bool {
|
|||
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
|
||||
}
|
||||
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer reader.Close()
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
/* Read the ToC offset:
|
||||
- 10 bytes gzip header
|
||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
||||
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
||||
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
||||
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
|
||||
- 8 bytes gzip footer
|
||||
*/
|
||||
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "parse ToC offset")
|
||||
}
|
||||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
// set a reasonable limit
|
||||
if size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk = ImageSourceChunk{
|
||||
Offset: uint64(tocOffset),
|
||||
Length: uint64(size),
|
||||
}
|
||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var tocReader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
tocReader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tocReader.Close()
|
||||
|
||||
r, err := pgzip.NewReader(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
aTar := archivetar.NewReader(r)
|
||||
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
manifestUncompressed := make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
manifestChecksum := manifestDigester.Hash()
|
||||
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if manifestDigester.Digest() != d {
|
||||
return nil, 0, errors.New("invalid manifest checksum")
|
||||
}
|
||||
|
||||
return manifestUncompressed, tocOffset, nil
|
||||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||
// be specified.
|
||||
// This function uses the io.containers.zstd-chunked. annotations when specified.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||
footerSize := int64(internal.FooterSizeSupported)
|
||||
if blobSize <= footerSize {
|
||||
return nil, errors.New("blob too small")
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
|
||||
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
||||
if manifestChecksumAnnotation == "" {
|
||||
return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||
}
|
||||
|
||||
var offset, length, lengthUncompressed, manifestType uint64
|
||||
|
||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
} else {
|
||||
chunk := ImageSourceChunk{
|
||||
|
|
@ -77,18 +185,18 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||
|
|
@ -96,20 +204,20 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||
manifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||
if !isZstdChunkedFrameMagic(footer[32:40]) {
|
||||
return nil, errors.New("invalid magic number")
|
||||
return nil, 0, errors.New("invalid magic number")
|
||||
}
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
return nil, errors.New("invalid manifest type")
|
||||
return nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if length > (1<<20)*50 {
|
||||
return nil, errors.New("manifest too big")
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
if lengthUncompressed > (1<<20)*50 {
|
||||
return nil, errors.New("manifest too big")
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk := ImageSourceChunk{
|
||||
|
|
@ -119,47 +227,47 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
manifest := make([]byte, length)
|
||||
if _, err := io.ReadFull(reader, manifest); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
manifestChecksum := manifestDigester.Hash()
|
||||
if _, err := manifestChecksum.Write(manifest); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
d, err := digest.Parse(manifestChecksumAnnotation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
if manifestDigester.Digest() != d {
|
||||
return nil, errors.New("invalid manifest checksum")
|
||||
return nil, 0, errors.New("invalid manifest checksum")
|
||||
}
|
||||
|
||||
decoder, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
defer decoder.Close()
|
||||
|
||||
b := make([]byte, 0, lengthUncompressed)
|
||||
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
|
||||
return decoded, nil
|
||||
return decoded, int64(offset), nil
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
return manifest, int64(offset), nil
|
||||
}
|
||||
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
var metadata []internal.ZstdFileMetadata
|
||||
var metadata []internal.FileMetadata
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
|
|
@ -112,7 +112,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
for k, v := range hdr.Xattrs {
|
||||
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||
}
|
||||
m := internal.ZstdFileMetadata{
|
||||
m := internal.FileMetadata{
|
||||
Type: typ,
|
||||
Name: hdr.Name,
|
||||
Linkname: hdr.Linkname,
|
||||
|
|
|
|||
|
|
@ -17,12 +17,12 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type ZstdTOC struct {
|
||||
type TOC struct {
|
||||
Version int `json:"version"`
|
||||
Entries []ZstdFileMetadata `json:"entries"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
}
|
||||
|
||||
type ZstdFileMetadata struct {
|
||||
type FileMetadata struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Linkname string `json:"linkName,omitempty"`
|
||||
|
|
@ -114,11 +114,11 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
|
||||
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error {
|
||||
// 8 is the size of the zstd skippable frame header + the frame size
|
||||
manifestOffset := offset + 8
|
||||
|
||||
toc := ZstdTOC{
|
||||
toc := TOC{
|
||||
Version: 1,
|
||||
Entries: metadata,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
driversCopy "github.com/containers/storage/drivers/copy"
|
||||
|
|
@ -23,6 +24,7 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/types"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -35,13 +37,22 @@ const (
|
|||
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
|
||||
containersOverrideXattr = "user.containers.override_stat"
|
||||
bigDataKey = "zstd-chunked-manifest"
|
||||
|
||||
fileTypeZstdChunked = iota
|
||||
fileTypeEstargz = iota
|
||||
)
|
||||
|
||||
type chunkedZstdDiffer struct {
|
||||
type compressedFileType int
|
||||
|
||||
type chunkedDiffer struct {
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
layersMetadata map[string][]internal.ZstdFileMetadata
|
||||
layersMetadata map[string][]internal.FileMetadata
|
||||
layersTarget map[string]string
|
||||
tocOffset int64
|
||||
fileType compressedFileType
|
||||
|
||||
gzipReader *pgzip.Reader
|
||||
}
|
||||
|
||||
func timeToTimespec(time time.Time) (ts unix.Timespec) {
|
||||
|
|
@ -101,11 +112,11 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
|
|||
return dstFile, st.Size(), err
|
||||
}
|
||||
|
||||
func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
|
||||
maps := make(map[string]map[string]*internal.ZstdFileMetadata)
|
||||
func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string]*internal.FileMetadata {
|
||||
maps := make(map[string]map[string]*internal.FileMetadata)
|
||||
|
||||
for layerID, v := range layersMetadata {
|
||||
r := make(map[string]*internal.ZstdFileMetadata)
|
||||
r := make(map[string]*internal.FileMetadata)
|
||||
for i := range v {
|
||||
r[v[i].Digest] = &v[i]
|
||||
}
|
||||
|
|
@ -114,13 +125,13 @@ func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetada
|
|||
return maps
|
||||
}
|
||||
|
||||
func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
|
||||
func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) {
|
||||
allLayers, err := store.Layers()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layersMetadata := make(map[string][]internal.ZstdFileMetadata)
|
||||
layersMetadata := make(map[string][]internal.FileMetadata)
|
||||
layersTarget := make(map[string]string)
|
||||
for _, r := range allLayers {
|
||||
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
|
||||
|
|
@ -132,7 +143,7 @@ func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var toc internal.ZstdTOC
|
||||
var toc internal.TOC
|
||||
if err := json.Unmarshal(manifest, &toc); err != nil {
|
||||
continue
|
||||
}
|
||||
|
|
@ -152,11 +163,14 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
|
|||
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
|
||||
}
|
||||
if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
|
||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss)
|
||||
}
|
||||
return nil, errors.New("blob type not supported for partial retrieval")
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedZstdDiffer, error) {
|
||||
manifest, err := readZstdChunkedManifest(iss, blobSize, annotations)
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -165,11 +179,33 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedZstdDiffer{
|
||||
return &chunkedDiffer{
|
||||
stream: iss,
|
||||
manifest: manifest,
|
||||
layersMetadata: layersMetadata,
|
||||
layersTarget: layersTarget,
|
||||
tocOffset: tocOffset,
|
||||
fileType: fileTypeZstdChunked,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layersMetadata, layersTarget, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
stream: iss,
|
||||
manifest: manifest,
|
||||
layersMetadata: layersMetadata,
|
||||
layersTarget: layersTarget,
|
||||
tocOffset: tocOffset,
|
||||
fileType: fileTypeEstargz,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -179,7 +215,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
// otherFile contains the metadata for the file.
|
||||
// dirfd is an open file descriptor to the destination root directory.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func copyFileFromOtherLayer(file internal.ZstdFileMetadata, source string, otherFile *internal.ZstdFileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func copyFileFromOtherLayer(file internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
|
|
@ -205,7 +241,7 @@ func copyFileFromOtherLayer(file internal.ZstdFileMetadata, source string, other
|
|||
// layersMetadata contains the metadata for each layer in the storage.
|
||||
// layersTarget maps each layer to its checkout on disk.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func findFileInOtherLayers(file internal.ZstdFileMetadata, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func findFileInOtherLayers(file internal.FileMetadata, dirfd int, layersMetadata map[string]map[string]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
// this is ugly, needs to be indexed
|
||||
for layerID, checksums := range layersMetadata {
|
||||
m, found := checksums[file.Digest]
|
||||
|
|
@ -240,7 +276,7 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
|
|||
// file is the file to look for.
|
||||
// dirfd is an open fd to the destination checkout.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func findFileOnTheHost(file internal.ZstdFileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func findFileOnTheHost(file internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
|
||||
if !strings.HasPrefix(sourceFile, "/usr/") {
|
||||
// limit host deduplication to files under /usr.
|
||||
|
|
@ -300,7 +336,7 @@ func findFileOnTheHost(file internal.ZstdFileMetadata, dirfd int, useHardLinks b
|
|||
return true, dstFile, written, nil
|
||||
}
|
||||
|
||||
func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||
func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error {
|
||||
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -327,7 +363,7 @@ func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOp
|
|||
}
|
||||
|
||||
type missingFile struct {
|
||||
File *internal.ZstdFileMetadata
|
||||
File *internal.FileMetadata
|
||||
Gap int64
|
||||
}
|
||||
|
||||
|
|
@ -341,7 +377,7 @@ type missingChunk struct {
|
|||
}
|
||||
|
||||
// setFileAttrs sets the file attributes for file given metadata
|
||||
func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||
func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
if file == nil || file.Fd() < 0 {
|
||||
return errors.Errorf("invalid file")
|
||||
}
|
||||
|
|
@ -401,7 +437,7 @@ func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (
|
|||
return os.NewFile(uintptr(fd), name), nil
|
||||
}
|
||||
|
||||
func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
|
||||
func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) {
|
||||
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -413,18 +449,48 @@ func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, mode os.
|
|||
}
|
||||
}()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
checksum := digester.Hash()
|
||||
to := io.MultiWriter(file, checksum)
|
||||
|
||||
switch c.fileType {
|
||||
case fileTypeZstdChunked:
|
||||
z, err := zstd.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer z.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
checksum := digester.Hash()
|
||||
_, err = z.WriteTo(io.MultiWriter(file, checksum))
|
||||
if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
case fileTypeEstargz:
|
||||
if c.gzipReader == nil {
|
||||
r, err := pgzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.gzipReader = r
|
||||
} else {
|
||||
if err := c.gzipReader.Reset(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer c.gzipReader.Close()
|
||||
|
||||
if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown file type %q", c.fileType)
|
||||
}
|
||||
|
||||
manifestChecksum, err := digest.Parse(metadata.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -435,7 +501,7 @@ func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, mode os.
|
|||
return setFileAttrs(file, mode, metadata, options)
|
||||
}
|
||||
|
||||
func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
|
||||
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
|
||||
for mc := 0; ; mc++ {
|
||||
var part io.ReadCloser
|
||||
select {
|
||||
|
|
@ -448,9 +514,10 @@ func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string,
|
|||
if mc == len(missingChunks) {
|
||||
break
|
||||
}
|
||||
return errors.Errorf("invalid stream returned %d %d", mc, len(missingChunks))
|
||||
return errors.Errorf("invalid stream returned")
|
||||
}
|
||||
if mc == len(missingChunks) {
|
||||
part.Close()
|
||||
return errors.Errorf("too many chunks returned")
|
||||
}
|
||||
|
||||
|
|
@ -459,6 +526,7 @@ func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string,
|
|||
limitReader := io.LimitReader(part, mf.Gap)
|
||||
_, err := io.Copy(ioutil.Discard, limitReader)
|
||||
if err != nil {
|
||||
part.Close()
|
||||
return err
|
||||
}
|
||||
continue
|
||||
|
|
@ -466,7 +534,7 @@ func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string,
|
|||
|
||||
limitReader := io.LimitReader(part, mf.Length())
|
||||
|
||||
if err := createFileFromZstdStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
|
||||
if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
|
||||
part.Close()
|
||||
return err
|
||||
}
|
||||
|
|
@ -505,18 +573,20 @@ func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk
|
|||
newMissingChunks = append(newMissingChunks, missingChunks[i])
|
||||
} else {
|
||||
prev := &newMissingChunks[len(newMissingChunks)-1]
|
||||
prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
|
||||
if gap > 0 {
|
||||
gapFile := missingFile{
|
||||
Gap: int64(gap),
|
||||
}
|
||||
prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
|
||||
prev.Files = append(append(prev.Files, gapFile), missingChunks[i].Files...)
|
||||
prev.Files = append(prev.Files, gapFile)
|
||||
}
|
||||
prev.Files = append(prev.Files, missingChunks[i].Files...)
|
||||
}
|
||||
}
|
||||
|
||||
return newMissingChunks
|
||||
}
|
||||
|
||||
func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
|
||||
func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
|
||||
var chunksToRequest []ImageSourceChunk
|
||||
for _, c := range missingChunks {
|
||||
chunksToRequest = append(chunksToRequest, c.RawChunk)
|
||||
|
|
@ -527,7 +597,7 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
|
|||
var err error
|
||||
var errs chan error
|
||||
for {
|
||||
streams, errs, err = input.stream.GetBlobAt(chunksToRequest)
|
||||
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
|
@ -546,13 +616,13 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
|
|||
return err
|
||||
}
|
||||
|
||||
if err := storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
|
||||
if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||
func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
parent := filepath.Dir(metadata.Name)
|
||||
base := filepath.Base(metadata.Name)
|
||||
|
||||
|
|
@ -581,7 +651,7 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata,
|
|||
return setFileAttrs(file, mode, metadata, options)
|
||||
}
|
||||
|
||||
func safeLink(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||
func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -613,7 +683,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata,
|
|||
return setFileAttrs(newFile, mode, metadata, options)
|
||||
}
|
||||
|
||||
func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||
func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
||||
destDirFd := dirfd
|
||||
if destDir != "." {
|
||||
|
|
@ -691,7 +761,7 @@ type hardLinkToCreate struct {
|
|||
dest string
|
||||
dirfd int
|
||||
mode os.FileMode
|
||||
metadata *internal.ZstdFileMetadata
|
||||
metadata *internal.FileMetadata
|
||||
}
|
||||
|
||||
func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool {
|
||||
|
|
@ -701,12 +771,12 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
|
|||
return def
|
||||
}
|
||||
|
||||
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
bigData := map[string][]byte{
|
||||
bigDataKey: d.manifest,
|
||||
bigDataKey: c.manifest,
|
||||
}
|
||||
output := graphdriver.DriverWithDifferOutput{
|
||||
Differ: d,
|
||||
Differ: c,
|
||||
BigData: bigData,
|
||||
}
|
||||
|
||||
|
|
@ -726,30 +796,21 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false)
|
||||
|
||||
// Generate the manifest
|
||||
var toc internal.ZstdTOC
|
||||
if err := json.Unmarshal(d.manifest, &toc); err != nil {
|
||||
var toc internal.TOC
|
||||
if err := json.Unmarshal(c.manifest, &toc); err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
|
||||
var missingChunks []missingChunk
|
||||
var mergedEntries []internal.ZstdFileMetadata
|
||||
|
||||
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
|
||||
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == TypeChunk {
|
||||
l := len(mergedEntries)
|
||||
if l == 0 || mergedEntries[l-1].Type != TypeReg {
|
||||
return output, errors.New("chunk type without a regular file")
|
||||
}
|
||||
mergedEntries[l-1].EndOffset = e.EndOffset
|
||||
continue
|
||||
}
|
||||
mergedEntries = append(mergedEntries, e)
|
||||
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
if options.ForceMask != nil {
|
||||
|
|
@ -768,7 +829,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
}
|
||||
defer unix.Close(dirfd)
|
||||
|
||||
otherLayersCache := prepareOtherLayersCache(d.layersMetadata)
|
||||
otherLayersCache := prepareOtherLayersCache(c.layersMetadata)
|
||||
|
||||
// hardlinks can point to missing files. So create them after all files
|
||||
// are retrieved
|
||||
|
|
@ -870,7 +931,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
|
||||
totalChunksSize += r.Size
|
||||
|
||||
found, dstFile, _, err := findFileInOtherLayers(r, dirfd, otherLayersCache, d.layersTarget, useHardLinks)
|
||||
found, dstFile, _, err := findFileInOtherLayers(r, dirfd, otherLayersCache, c.layersTarget, useHardLinks)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
|
@ -908,9 +969,11 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
Offset: uint64(r.Offset),
|
||||
Length: uint64(r.EndOffset - r.Offset),
|
||||
}
|
||||
|
||||
file := missingFile{
|
||||
File: &toc.Entries[i],
|
||||
File: &mergedEntries[i],
|
||||
}
|
||||
|
||||
missingChunks = append(missingChunks, missingChunk{
|
||||
RawChunk: rawChunk,
|
||||
Files: []missingFile{
|
||||
|
|
@ -922,7 +985,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||
if len(missingChunks) > 0 {
|
||||
missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
|
||||
if err := retrieveMissingFiles(d, dest, dirfd, missingChunks, options); err != nil {
|
||||
if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil {
|
||||
return output, err
|
||||
}
|
||||
}
|
||||
|
|
@ -938,3 +1001,37 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
|
|||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) {
|
||||
var mergedEntries []internal.FileMetadata
|
||||
var prevEntry *internal.FileMetadata
|
||||
for _, entry := range entries {
|
||||
e := entry
|
||||
|
||||
// ignore the metadata files for the estargz format.
|
||||
if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.Type == TypeChunk {
|
||||
if prevEntry == nil || prevEntry.Type != TypeReg {
|
||||
return nil, errors.New("chunk type without a regular file")
|
||||
}
|
||||
prevEntry.EndOffset = e.EndOffset
|
||||
continue
|
||||
}
|
||||
mergedEntries = append(mergedEntries, e)
|
||||
prevEntry = &e
|
||||
}
|
||||
// stargz/estargz doesn't store EndOffset so let's calculate it here
|
||||
lastOffset := c.tocOffset
|
||||
for i := len(mergedEntries) - 1; i >= 0; i-- {
|
||||
if mergedEntries[i].EndOffset == 0 {
|
||||
mergedEntries[i].EndOffset = lastOffset
|
||||
}
|
||||
if mergedEntries[i].Offset != 0 {
|
||||
lastOffset = mergedEntries[i].Offset
|
||||
}
|
||||
}
|
||||
return mergedEntries, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (s seekable) GetBlobAt(req []ImageSourceChunk) (chan io.ReadCloser, chan er
|
|||
return m, e, nil
|
||||
}
|
||||
|
||||
var someFiles = []internal.ZstdFileMetadata{
|
||||
var someFiles = []internal.FileMetadata{
|
||||
{
|
||||
Type: "dir",
|
||||
Name: "/foo",
|
||||
|
|
@ -130,12 +130,12 @@ func TestGenerateAndParseManifest(t *testing.T) {
|
|||
t: t,
|
||||
}
|
||||
|
||||
manifest, err := readZstdChunkedManifest(s, 8192, annotations)
|
||||
manifest, _, err := readZstdChunkedManifest(s, 8192, annotations)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
var toc internal.ZstdTOC
|
||||
var toc internal.TOC
|
||||
if err := json.Unmarshal(manifest, &toc); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
|
|
|||
202
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
generated
vendored
Normal file
202
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
639
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
Normal file
639
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
Normal file
|
|
@ -0,0 +1,639 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type options struct {
|
||||
chunkSize int
|
||||
compressionLevel int
|
||||
prioritizedFiles []string
|
||||
missedPrioritizedFiles *[]string
|
||||
}
|
||||
|
||||
type Option func(o *options) error
|
||||
|
||||
// WithChunkSize option specifies the chunk size of eStargz blob to build.
|
||||
func WithChunkSize(chunkSize int) Option {
|
||||
return func(o *options) error {
|
||||
o.chunkSize = chunkSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCompressionLevel option specifies the gzip compression level.
|
||||
// The default is gzip.BestCompression.
|
||||
// See also: https://godoc.org/compress/gzip#pkg-constants
|
||||
func WithCompressionLevel(level int) Option {
|
||||
return func(o *options) error {
|
||||
o.compressionLevel = level
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrioritizedFiles option specifies the list of prioritized files.
|
||||
// These files must be complete paths that are absolute or relative to "/"
|
||||
// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
|
||||
// are treated as "/foo/bar".
|
||||
func WithPrioritizedFiles(files []string) Option {
|
||||
return func(o *options) error {
|
||||
o.prioritizedFiles = files
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllowPrioritizeNotFound makes Build continue the execution even if some
|
||||
// of prioritized files specified by WithPrioritizedFiles option aren't found
|
||||
// in the input tar. Instead, this records all missed file names to the passed
|
||||
// slice.
|
||||
func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
|
||||
return func(o *options) error {
|
||||
if missedFiles == nil {
|
||||
return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
|
||||
}
|
||||
o.missedPrioritizedFiles = missedFiles
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Blob is an eStargz blob.
|
||||
type Blob struct {
|
||||
io.ReadCloser
|
||||
diffID digest.Digester
|
||||
tocDigest digest.Digest
|
||||
}
|
||||
|
||||
// DiffID returns the digest of uncompressed blob.
|
||||
// It is only valid to call DiffID after Close.
|
||||
func (b *Blob) DiffID() digest.Digest {
|
||||
return b.diffID.Digest()
|
||||
}
|
||||
|
||||
// TOCDigest returns the digest of uncompressed TOC JSON.
|
||||
func (b *Blob) TOCDigest() digest.Digest {
|
||||
return b.tocDigest
|
||||
}
|
||||
|
||||
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
|
||||
// or plain tar) passed through the argument. If there are some prioritized files are listed in
|
||||
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
|
||||
// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
|
||||
// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
|
||||
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
var opts options
|
||||
opts.compressionLevel = gzip.BestCompression // BestCompression by default
|
||||
for _, o := range opt {
|
||||
if err := o(&opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
layerFiles := newTempFiles()
|
||||
defer func() {
|
||||
if rErr != nil {
|
||||
if err := layerFiles.CleanupAll(); err != nil {
|
||||
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
tarBlob, err := decompressBlob(tarBlob, layerFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
|
||||
writers := make([]*Writer, len(tarParts))
|
||||
payloads := make([]*os.File, len(tarParts))
|
||||
var mu sync.Mutex
|
||||
var eg errgroup.Group
|
||||
for i, parts := range tarParts {
|
||||
i, parts := i, parts
|
||||
// builds verifiable stargz sub-blobs
|
||||
eg.Go(func() error {
|
||||
esgzFile, err := layerFiles.TempFile("", "esgzdata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sw := NewWriterLevel(esgzFile, opts.compressionLevel)
|
||||
sw.ChunkSize = opts.chunkSize
|
||||
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
writers[i] = sw
|
||||
payloads[i] = esgzFile
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
rErr = err
|
||||
return nil, err
|
||||
}
|
||||
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
|
||||
if err != nil {
|
||||
rErr = err
|
||||
return nil, err
|
||||
}
|
||||
var rs []io.Reader
|
||||
for _, p := range payloads {
|
||||
fs, err := fileSectionReader(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, fs)
|
||||
}
|
||||
diffID := digest.Canonical.Digester()
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
r, err := gzip.NewReader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(diffID.Hash(), r); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return &Blob{
|
||||
ReadCloser: readCloser{
|
||||
Reader: pr,
|
||||
closeFunc: layerFiles.CleanupAll,
|
||||
},
|
||||
tocDigest: tocDgst,
|
||||
diffID: diffID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// closeWithCombine takes unclosed Writers and close them. This also returns the
|
||||
// toc that combined all Writers into.
|
||||
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
||||
// combined into a single eStargz and tocAndFooter returned by this function can
|
||||
// be appended at the tail of that combined blob.
|
||||
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Reader, tocDgst digest.Digest, err error) {
|
||||
if len(ws) == 0 {
|
||||
return nil, "", fmt.Errorf("at least one writer must be passed")
|
||||
}
|
||||
for _, w := range ws {
|
||||
if w.closed {
|
||||
return nil, "", fmt.Errorf("writer must be unclosed")
|
||||
}
|
||||
defer func(w *Writer) { w.closed = true }(w)
|
||||
if err := w.closeGz(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if err := w.bw.Flush(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
var (
|
||||
mtoc = new(jtoc)
|
||||
currentOffset int64
|
||||
)
|
||||
mtoc.Version = ws[0].toc.Version
|
||||
for _, w := range ws {
|
||||
for _, e := range w.toc.Entries {
|
||||
// Recalculate Offset of non-empty files/chunks
|
||||
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
|
||||
e.Offset += currentOffset
|
||||
}
|
||||
mtoc.Entries = append(mtoc.Entries, e)
|
||||
}
|
||||
if w.toc.Version > mtoc.Version {
|
||||
mtoc.Version = w.toc.Version
|
||||
}
|
||||
currentOffset += w.cw.n
|
||||
}
|
||||
|
||||
tocJSON, err := json.MarshalIndent(mtoc, "", "\t")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
zw, _ := gzip.NewWriterLevel(pw, compressionLevel)
|
||||
tw := tar.NewWriter(zw)
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return io.MultiReader(
|
||||
pr,
|
||||
bytes.NewReader(footerBytes(currentOffset)),
|
||||
), digest.FromBytes(tocJSON), nil
|
||||
}
|
||||
|
||||
// divideEntries divides passed entries to the parts at least the number specified by the
|
||||
// argument.
|
||||
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
|
||||
var estimatedSize int64
|
||||
for _, e := range entries {
|
||||
estimatedSize += e.header.Size
|
||||
}
|
||||
unitSize := estimatedSize / int64(minPartsNum)
|
||||
var (
|
||||
nextEnd = unitSize
|
||||
offset int64
|
||||
)
|
||||
set = append(set, []*entry{})
|
||||
for _, e := range entries {
|
||||
set[len(set)-1] = append(set[len(set)-1], e)
|
||||
offset += e.header.Size
|
||||
if offset > nextEnd {
|
||||
set = append(set, []*entry{})
|
||||
nextEnd += unitSize
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
|
||||
// sortEntries reads the specified tar blob and returns a list of tar entries.
|
||||
// If some of prioritized files are specified, the list starts from these
|
||||
// files with keeping the order specified by the argument.
|
||||
func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
|
||||
|
||||
// Import tar file.
|
||||
intar, err := importTar(in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to sort")
|
||||
}
|
||||
|
||||
// Sort the tar file respecting to the prioritized files list.
|
||||
sorted := &tarFile{}
|
||||
for _, l := range prioritized {
|
||||
if err := moveRec(l, intar, sorted); err != nil {
|
||||
if errors.Is(err, errNotFound) && missedPrioritized != nil {
|
||||
*missedPrioritized = append(*missedPrioritized, l)
|
||||
continue // allow not found
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to sort tar entries")
|
||||
}
|
||||
}
|
||||
if len(prioritized) == 0 {
|
||||
sorted.add(&entry{
|
||||
header: &tar.Header{
|
||||
Name: NoPrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
},
|
||||
payload: bytes.NewReader([]byte{landmarkContents}),
|
||||
})
|
||||
} else {
|
||||
sorted.add(&entry{
|
||||
header: &tar.Header{
|
||||
Name: PrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
},
|
||||
payload: bytes.NewReader([]byte{landmarkContents}),
|
||||
})
|
||||
}
|
||||
|
||||
// Dump all entry and concatinate them.
|
||||
return append(sorted.dump(), intar.dump()...), nil
|
||||
}
|
||||
|
||||
// readerFromEntries returns a reader of tar archive that contains entries passed
|
||||
// through the arguments.
|
||||
func readerFromEntries(entries ...*entry) io.Reader {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
tw := tar.NewWriter(pw)
|
||||
defer tw.Close()
|
||||
for _, entry := range entries {
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(tw, entry.payload); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||
tf := &tarFile{}
|
||||
pw, err := newCountReader(in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make position watcher")
|
||||
}
|
||||
tr := tar.NewReader(pw)
|
||||
|
||||
// Walk through all nodes.
|
||||
for {
|
||||
// Fetch and parse next header.
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "failed to parse tar file")
|
||||
}
|
||||
}
|
||||
switch cleanEntryName(h.Name) {
|
||||
case PrefetchLandmark, NoPrefetchLandmark:
|
||||
// Ignore existing landmark
|
||||
continue
|
||||
}
|
||||
|
||||
// Add entry. If it already exists, replace it.
|
||||
if _, ok := tf.get(h.Name); ok {
|
||||
tf.remove(h.Name)
|
||||
}
|
||||
tf.add(&entry{
|
||||
header: h,
|
||||
payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
|
||||
})
|
||||
}
|
||||
|
||||
return tf, nil
|
||||
}
|
||||
|
||||
func moveRec(name string, in *tarFile, out *tarFile) error {
|
||||
name = cleanEntryName(name)
|
||||
if name == "" { // root directory. stop recursion.
|
||||
if e, ok := in.get(name); ok {
|
||||
// entry of the root directory exists. we should move it as well.
|
||||
// this case will occur if tar entries are prefixed with "./", "/", etc.
|
||||
out.add(e)
|
||||
in.remove(name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, okIn := in.get(name)
|
||||
_, okOut := out.get(name)
|
||||
if !okIn && !okOut {
|
||||
return errors.Wrapf(errNotFound, "file: %q", name)
|
||||
}
|
||||
|
||||
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
|
||||
if err := moveRec(parent, in, out); err != nil {
|
||||
return err
|
||||
}
|
||||
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
|
||||
if err := moveRec(e.header.Linkname, in, out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if e, ok := in.get(name); ok {
|
||||
out.add(e)
|
||||
in.remove(name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
header *tar.Header
|
||||
payload io.ReadSeeker
|
||||
}
|
||||
|
||||
type tarFile struct {
|
||||
index map[string]*entry
|
||||
stream []*entry
|
||||
}
|
||||
|
||||
func (f *tarFile) add(e *entry) {
|
||||
if f.index == nil {
|
||||
f.index = make(map[string]*entry)
|
||||
}
|
||||
f.index[cleanEntryName(e.header.Name)] = e
|
||||
f.stream = append(f.stream, e)
|
||||
}
|
||||
|
||||
func (f *tarFile) remove(name string) {
|
||||
name = cleanEntryName(name)
|
||||
if f.index != nil {
|
||||
delete(f.index, name)
|
||||
}
|
||||
var filtered []*entry
|
||||
for _, e := range f.stream {
|
||||
if cleanEntryName(e.header.Name) == name {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
f.stream = filtered
|
||||
}
|
||||
|
||||
func (f *tarFile) get(name string) (e *entry, ok bool) {
|
||||
if f.index == nil {
|
||||
return nil, false
|
||||
}
|
||||
e, ok = f.index[cleanEntryName(name)]
|
||||
return
|
||||
}
|
||||
|
||||
func (f *tarFile) dump() []*entry {
|
||||
return f.stream
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.Reader
|
||||
closeFunc func() error
|
||||
}
|
||||
|
||||
func (rc readCloser) Close() error {
|
||||
return rc.closeFunc()
|
||||
}
|
||||
|
||||
func fileSectionReader(file *os.File) (*io.SectionReader, error) {
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return io.NewSectionReader(file, 0, info.Size()), nil
|
||||
}
|
||||
|
||||
func newTempFiles() *tempFiles {
|
||||
return &tempFiles{}
|
||||
}
|
||||
|
||||
type tempFiles struct {
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
}
|
||||
|
||||
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
||||
f, err := ioutil.TempFile(dir, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tf.filesMu.Lock()
|
||||
tf.files = append(tf.files, f)
|
||||
tf.filesMu.Unlock()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (tf *tempFiles) CleanupAll() error {
|
||||
tf.filesMu.Lock()
|
||||
defer tf.filesMu.Unlock()
|
||||
var allErr []error
|
||||
for _, f := range tf.files {
|
||||
if err := f.Close(); err != nil {
|
||||
allErr = append(allErr, err)
|
||||
}
|
||||
if err := os.Remove(f.Name()); err != nil {
|
||||
allErr = append(allErr, err)
|
||||
}
|
||||
}
|
||||
tf.files = nil
|
||||
return errorutil.Aggregate(allErr)
|
||||
}
|
||||
|
||||
func newCountReader(r io.ReaderAt) (*countReader, error) {
|
||||
pos := int64(0)
|
||||
return &countReader{r: r, cPos: &pos}, nil
|
||||
}
|
||||
|
||||
type countReader struct {
|
||||
r io.ReaderAt
|
||||
cPos *int64
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (cr *countReader) Read(p []byte) (int, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
n, err := cr.r.ReadAt(p, *cr.cPos)
|
||||
if err == nil {
|
||||
*cr.cPos += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
switch whence {
|
||||
default:
|
||||
return 0, fmt.Errorf("Unknown whence: %v", whence)
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
offset += *cr.cPos
|
||||
case io.SeekEnd:
|
||||
return 0, fmt.Errorf("Unsupported whence: %v", whence)
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return 0, fmt.Errorf("invalid offset")
|
||||
}
|
||||
*cr.cPos = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (cr *countReader) currentPos() int64 {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
return *cr.cPos
|
||||
}
|
||||
|
||||
func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
|
||||
if org.Size() < 4 {
|
||||
return org, nil
|
||||
}
|
||||
src := make([]byte, 4)
|
||||
if _, err := org.Read(src); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
var dR io.Reader
|
||||
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
|
||||
// gzip
|
||||
dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dgR.Close()
|
||||
dR = io.Reader(dgR)
|
||||
} else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
|
||||
// zstd
|
||||
dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dzR.Close()
|
||||
dR = io.Reader(dzR)
|
||||
} else {
|
||||
// uncompressed
|
||||
return io.NewSectionReader(org, 0, org.Size()), nil
|
||||
}
|
||||
b, err := tmp.TempFile("", "uncompresseddata")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.Copy(b, dR); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileSectionReader(b)
|
||||
}
|
||||
40
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
generated
vendored
Normal file
40
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errorutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Aggregate combines a list of errors into a single new error.
|
||||
func Aggregate(errs []error) error {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
points := make([]string, len(errs)+1)
|
||||
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
|
||||
for i, err := range errs {
|
||||
points[i+1] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
return errors.New(strings.Join(points, "\n\t"))
|
||||
}
|
||||
}
|
||||
849
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
Normal file
849
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
Normal file
|
|
@ -0,0 +1,849 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
type Reader struct {
|
||||
sr *io.SectionReader
|
||||
toc *jtoc
|
||||
tocDigest digest.Digest
|
||||
|
||||
// m stores all non-chunk entries, keyed by name.
|
||||
m map[string]*TOCEntry
|
||||
|
||||
// chunks stores all TOCEntry values for regular files that
|
||||
// are split up. For a file with a single chunk, it's only
|
||||
// stored in m.
|
||||
chunks map[string][]*TOCEntry
|
||||
}
|
||||
|
||||
// Open opens a stargz file for reading.
|
||||
//
|
||||
// Note that each entry name is normalized as the path that is relative to root.
|
||||
func Open(sr *io.SectionReader) (*Reader, error) {
|
||||
tocOff, footerSize, err := OpenFooter(sr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing footer")
|
||||
}
|
||||
tocTargz := make([]byte, sr.Size()-tocOff-footerSize)
|
||||
if _, err := sr.ReadAt(tocTargz, tocOff); err != nil {
|
||||
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err)
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(tocTargz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
|
||||
}
|
||||
zr.Multistream(false)
|
||||
tr := tar.NewReader(zr)
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
|
||||
}
|
||||
if h.Name != TOCTarName {
|
||||
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
toc := new(jtoc)
|
||||
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
|
||||
return nil, fmt.Errorf("error decoding TOC JSON: %v", err)
|
||||
}
|
||||
r := &Reader{sr: sr, toc: toc, tocDigest: dgstr.Digest()}
|
||||
if err := r.initFields(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// OpenFooter extracts and parses footer from the given blob.
|
||||
func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
|
||||
if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
|
||||
return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
|
||||
}
|
||||
// TODO: read a bigger chunk (1MB?) at once here to hopefully
|
||||
// get the TOC + footer in one go.
|
||||
var footer [FooterSize]byte
|
||||
if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
|
||||
return 0, 0, fmt.Errorf("error reading footer: %v", err)
|
||||
}
|
||||
return parseFooter(footer[:])
|
||||
}
|
||||
|
||||
// initFields populates the Reader from r.toc after decoding it from
|
||||
// JSON.
|
||||
//
|
||||
// Unexported fields are populated and TOCEntry fields that were
|
||||
// implicit in the JSON are populated.
|
||||
func (r *Reader) initFields() error {
|
||||
r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
|
||||
r.chunks = make(map[string][]*TOCEntry)
|
||||
var lastPath string
|
||||
uname := map[int]string{}
|
||||
gname := map[int]string{}
|
||||
var lastRegEnt *TOCEntry
|
||||
for _, ent := range r.toc.Entries {
|
||||
ent.Name = cleanEntryName(ent.Name)
|
||||
if ent.Type == "reg" {
|
||||
lastRegEnt = ent
|
||||
}
|
||||
if ent.Type == "chunk" {
|
||||
ent.Name = lastPath
|
||||
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
|
||||
if ent.ChunkSize == 0 && lastRegEnt != nil {
|
||||
ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
|
||||
}
|
||||
} else {
|
||||
lastPath = ent.Name
|
||||
|
||||
if ent.Uname != "" {
|
||||
uname[ent.UID] = ent.Uname
|
||||
} else {
|
||||
ent.Uname = uname[ent.UID]
|
||||
}
|
||||
if ent.Gname != "" {
|
||||
gname[ent.GID] = ent.Gname
|
||||
} else {
|
||||
ent.Gname = uname[ent.GID]
|
||||
}
|
||||
|
||||
ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
|
||||
|
||||
if ent.Type == "dir" {
|
||||
ent.NumLink++ // Parent dir links to this directory
|
||||
}
|
||||
r.m[ent.Name] = ent
|
||||
}
|
||||
if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
|
||||
r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
|
||||
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
|
||||
}
|
||||
if ent.ChunkSize == 0 && ent.Size != 0 {
|
||||
ent.ChunkSize = ent.Size
|
||||
}
|
||||
}
|
||||
|
||||
// Populate children, add implicit directories:
|
||||
for _, ent := range r.toc.Entries {
|
||||
if ent.Type == "chunk" {
|
||||
continue
|
||||
}
|
||||
// add "foo/":
|
||||
// add "foo" child to "" (creating "" if necessary)
|
||||
//
|
||||
// add "foo/bar/":
|
||||
// add "bar" child to "foo" (creating "foo" if necessary)
|
||||
//
|
||||
// add "foo/bar.txt":
|
||||
// add "bar.txt" child to "foo" (creating "foo" if necessary)
|
||||
//
|
||||
// add "a/b/c/d/e/f.txt":
|
||||
// create "a/b/c/d/e" node
|
||||
// add "f.txt" child to "e"
|
||||
|
||||
name := ent.Name
|
||||
pdirName := parentDir(name)
|
||||
if name == pdirName {
|
||||
// This entry and its parent are the same.
|
||||
// Ignore this for avoiding infinite loop of the reference.
|
||||
// The example case where this can occur is when tar contains the root
|
||||
// directory itself (e.g. "./", "/").
|
||||
continue
|
||||
}
|
||||
pdir := r.getOrCreateDir(pdirName)
|
||||
ent.NumLink++ // at least one name(ent.Name) references this entry.
|
||||
if ent.Type == "hardlink" {
|
||||
if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok {
|
||||
org.NumLink++ // original entry is referenced by this ent.Name.
|
||||
ent = org
|
||||
} else {
|
||||
return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
|
||||
}
|
||||
}
|
||||
pdir.addChild(path.Base(name), ent)
|
||||
}
|
||||
|
||||
lastOffset := r.sr.Size()
|
||||
for i := len(r.toc.Entries) - 1; i >= 0; i-- {
|
||||
e := r.toc.Entries[i]
|
||||
if e.isDataType() {
|
||||
e.nextOffset = lastOffset
|
||||
}
|
||||
if e.Offset != 0 {
|
||||
lastOffset = e.Offset
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parentDir(p string) string {
|
||||
dir, _ := path.Split(p)
|
||||
return strings.TrimSuffix(dir, "/")
|
||||
}
|
||||
|
||||
func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
e, ok := r.m[d]
|
||||
if !ok {
|
||||
e = &TOCEntry{
|
||||
Name: d,
|
||||
Type: "dir",
|
||||
Mode: 0755,
|
||||
NumLink: 2, // The directory itself(.) and the parent link to this directory.
|
||||
}
|
||||
r.m[d] = e
|
||||
if d != "" {
|
||||
pdir := r.getOrCreateDir(parentDir(d))
|
||||
pdir.addChild(path.Base(d), e)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// VerifyTOC checks that the TOC JSON in the passed blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the blob. If the verification succceeds, this function
|
||||
// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
|
||||
func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
// Verify the digest of TOC JSON
|
||||
if r.tocDigest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
|
||||
}
|
||||
digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest
|
||||
for _, e := range r.toc.Entries {
|
||||
if e.Type == "reg" || e.Type == "chunk" {
|
||||
if e.Type == "reg" && e.Size == 0 {
|
||||
continue // ignores empty file
|
||||
}
|
||||
|
||||
// offset must be unique in stargz blob
|
||||
if _, ok := digestMap[e.Offset]; ok {
|
||||
return nil, fmt.Errorf("offset %d found twice", e.Offset)
|
||||
}
|
||||
|
||||
// all chunk entries must contain digest
|
||||
if e.ChunkDigest == "" {
|
||||
return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
|
||||
e.Name, e.Offset)
|
||||
}
|
||||
|
||||
d, err := digest.Parse(e.ChunkDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest)
|
||||
}
|
||||
digestMap[e.Offset] = d
|
||||
}
|
||||
}
|
||||
|
||||
return &verifier{digestMap: digestMap}, nil
|
||||
}
|
||||
|
||||
// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
|
||||
// offset of the chunk.
|
||||
type verifier struct {
|
||||
digestMap map[int64]digest.Digest
|
||||
digestMapMu sync.Mutex
|
||||
}
|
||||
|
||||
// Verifier returns a content verifier specified by TOCEntry.
|
||||
func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
|
||||
v.digestMapMu.Lock()
|
||||
defer v.digestMapMu.Unlock()
|
||||
d, ok := v.digestMap[ce.Offset]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
|
||||
ce.Offset, ce.ChunkSize)
|
||||
}
|
||||
return d.Verifier(), nil
|
||||
}
|
||||
|
||||
// ChunkEntryForOffset returns the TOCEntry containing the byte of the
|
||||
// named file at the given offset within the file.
|
||||
// Name must be absolute path or one that is relative to root.
|
||||
func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
|
||||
name = cleanEntryName(name)
|
||||
e, ok = r.Lookup(name)
|
||||
if !ok || !e.isDataType() {
|
||||
return nil, false
|
||||
}
|
||||
ents := r.chunks[name]
|
||||
if len(ents) < 2 {
|
||||
if offset >= e.ChunkSize {
|
||||
return nil, false
|
||||
}
|
||||
return e, true
|
||||
}
|
||||
i := sort.Search(len(ents), func(i int) bool {
|
||||
e := ents[i]
|
||||
return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
|
||||
})
|
||||
if i == len(ents) {
|
||||
return nil, false
|
||||
}
|
||||
return ents[i], true
|
||||
}
|
||||
|
||||
// Lookup returns the Table of Contents entry for the given path.
|
||||
//
|
||||
// To get the root directory, use the empty string.
|
||||
// Path must be absolute path or one that is relative to root.
|
||||
func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
|
||||
path = cleanEntryName(path)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
e, ok = r.m[path]
|
||||
if ok && e.Type == "hardlink" {
|
||||
e, ok = r.m[e.LinkName]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenFile returns the reader of the specified file payload.
|
||||
//
|
||||
// Name must be absolute path or one that is relative to root.
|
||||
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
||||
name = cleanEntryName(name)
|
||||
ent, ok := r.Lookup(name)
|
||||
if !ok {
|
||||
// TODO: come up with some error plan. This is lazy:
|
||||
return nil, &os.PathError{
|
||||
Path: name,
|
||||
Op: "OpenFile",
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
if ent.Type != "reg" {
|
||||
return nil, &os.PathError{
|
||||
Path: name,
|
||||
Op: "OpenFile",
|
||||
Err: errors.New("not a regular file"),
|
||||
}
|
||||
}
|
||||
fr := &fileReader{
|
||||
r: r,
|
||||
size: ent.Size,
|
||||
ents: r.getChunks(ent),
|
||||
}
|
||||
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||
}
|
||||
|
||||
func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
|
||||
if ents, ok := r.chunks[ent.Name]; ok {
|
||||
return ents
|
||||
}
|
||||
return []*TOCEntry{ent}
|
||||
}
|
||||
|
||||
type fileReader struct {
|
||||
r *Reader
|
||||
size int64
|
||||
ents []*TOCEntry // 1 or more reg/chunk entries
|
||||
}
|
||||
|
||||
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off >= fr.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if off < 0 {
|
||||
return 0, errors.New("invalid offset")
|
||||
}
|
||||
var i int
|
||||
if len(fr.ents) > 1 {
|
||||
i = sort.Search(len(fr.ents), func(i int) bool {
|
||||
return fr.ents[i].ChunkOffset >= off
|
||||
})
|
||||
if i == len(fr.ents) {
|
||||
i = len(fr.ents) - 1
|
||||
}
|
||||
}
|
||||
ent := fr.ents[i]
|
||||
if ent.ChunkOffset > off {
|
||||
if i == 0 {
|
||||
return 0, errors.New("internal error; first chunk offset is non-zero")
|
||||
}
|
||||
ent = fr.ents[i-1]
|
||||
}
|
||||
|
||||
// If ent is a chunk of a large file, adjust the ReadAt
|
||||
// offset by the chunk's offset.
|
||||
off -= ent.ChunkOffset
|
||||
|
||||
finalEnt := fr.ents[len(fr.ents)-1]
|
||||
gzOff := ent.Offset
|
||||
// gzBytesRemain is the number of compressed gzip bytes in this
|
||||
// file remaining, over 1+ gzip chunks.
|
||||
gzBytesRemain := finalEnt.NextOffset() - gzOff
|
||||
|
||||
sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain)
|
||||
|
||||
const maxGZread = 2 << 20
|
||||
var bufSize = maxGZread
|
||||
if gzBytesRemain < maxGZread {
|
||||
bufSize = int(gzBytesRemain)
|
||||
}
|
||||
|
||||
br := bufio.NewReaderSize(sr, bufSize)
|
||||
if _, err := br.Peek(bufSize); err != nil {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
|
||||
}
|
||||
|
||||
gz, err := gzip.NewReader(br)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err)
|
||||
}
|
||||
if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
}
|
||||
return io.ReadFull(gz, p)
|
||||
}
|
||||
|
||||
// A Writer writes stargz files.
|
||||
//
|
||||
// Use NewWriter to create a new Writer.
|
||||
type Writer struct {
|
||||
bw *bufio.Writer
|
||||
cw *countWriter
|
||||
toc *jtoc
|
||||
diffHash hash.Hash // SHA-256 of uncompressed tar
|
||||
|
||||
closed bool
|
||||
gz *gzip.Writer
|
||||
lastUsername map[int]string
|
||||
lastGroupname map[int]string
|
||||
compressionLevel int
|
||||
|
||||
// ChunkSize optionally controls the maximum number of bytes
|
||||
// of data of a regular file that can be written in one gzip
|
||||
// stream before a new gzip stream is started.
|
||||
// Zero means to use a default, currently 4 MiB.
|
||||
ChunkSize int
|
||||
}
|
||||
|
||||
// currentGzipWriter writes to the current w.gz field, which can
|
||||
// change throughout writing a tar entry.
|
||||
//
|
||||
// Additionally, it updates w's SHA-256 of the uncompressed bytes
|
||||
// of the tar file.
|
||||
type currentGzipWriter struct{ w *Writer }
|
||||
|
||||
func (cgw currentGzipWriter) Write(p []byte) (int, error) {
|
||||
cgw.w.diffHash.Write(p)
|
||||
return cgw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
func (w *Writer) chunkSize() int {
|
||||
if w.ChunkSize <= 0 {
|
||||
return 4 << 20
|
||||
}
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return NewWriterLevel(w, gzip.BestCompression)
|
||||
}
|
||||
|
||||
// NewWriterLevel returns a new stargz writer writing to w.
|
||||
// The compression level is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
|
||||
bw := bufio.NewWriter(w)
|
||||
cw := &countWriter{w: bw}
|
||||
return &Writer{
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &jtoc{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
compressionLevel: compressionLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Close writes the stargz's table of contents and flushes all the
|
||||
// buffers, returning any error.
|
||||
func (w *Writer) Close() (digest.Digest, error) {
|
||||
if w.closed {
|
||||
return "", nil
|
||||
}
|
||||
defer func() { w.closed = true }()
|
||||
|
||||
if err := w.closeGz(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write the TOC index.
|
||||
tocOff := w.cw.n
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
tocJSON, err := json.MarshalIndent(w.toc, "", "\t")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := w.closeGz(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// And a little footer with pointer to the TOC gzip stream.
|
||||
if _, err := w.bw.Write(footerBytes(tocOff)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := w.bw.Flush(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digest.FromBytes(tocJSON), nil
|
||||
}
|
||||
|
||||
func (w *Writer) closeGz() error {
|
||||
if w.closed {
|
||||
return errors.New("write on closed Writer")
|
||||
}
|
||||
if w.gz != nil {
|
||||
if err := w.gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.gz = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
|
||||
// in which case it returns the empty string.
|
||||
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
if *mp == nil {
|
||||
*mp = make(map[int]string)
|
||||
}
|
||||
if (*mp)[id] == name {
|
||||
return ""
|
||||
}
|
||||
(*mp)[id] = name
|
||||
return name
|
||||
}
|
||||
|
||||
func (w *Writer) condOpenGz() {
|
||||
if w.gz == nil {
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
|
||||
}
|
||||
}
|
||||
|
||||
// AppendTar reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
continue
|
||||
}
|
||||
|
||||
xattrs := make(map[string][]byte)
|
||||
const xattrPAXRecordsPrefix = "SCHILY.xattr."
|
||||
if h.PAXRecords != nil {
|
||||
for k, v := range h.PAXRecords {
|
||||
if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
|
||||
xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
ent := &TOCEntry{
|
||||
Name: h.Name,
|
||||
Mode: h.Mode,
|
||||
UID: h.Uid,
|
||||
GID: h.Gid,
|
||||
Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
|
||||
Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
|
||||
ModTime3339: formatModtime(h.ModTime),
|
||||
Xattrs: xattrs,
|
||||
}
|
||||
w.condOpenGz()
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
ent.Type = "hardlink"
|
||||
ent.LinkName = h.Linkname
|
||||
case tar.TypeSymlink:
|
||||
ent.Type = "symlink"
|
||||
ent.LinkName = h.Linkname
|
||||
case tar.TypeDir:
|
||||
ent.Type = "dir"
|
||||
case tar.TypeReg:
|
||||
ent.Type = "reg"
|
||||
ent.Size = h.Size
|
||||
case tar.TypeChar:
|
||||
ent.Type = "char"
|
||||
ent.DevMajor = int(h.Devmajor)
|
||||
ent.DevMinor = int(h.Devminor)
|
||||
case tar.TypeBlock:
|
||||
ent.Type = "block"
|
||||
ent.DevMajor = int(h.Devmajor)
|
||||
ent.DevMinor = int(h.Devminor)
|
||||
case tar.TypeFifo:
|
||||
ent.Type = "fifo"
|
||||
default:
|
||||
return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
|
||||
}
|
||||
|
||||
// We need to keep a reference to the TOC entry for regular files, so that we
|
||||
// can fill the digest later.
|
||||
var regFileEntry *TOCEntry
|
||||
var payloadDigest digest.Digester
|
||||
if h.Typeflag == tar.TypeReg {
|
||||
regFileEntry = ent
|
||||
payloadDigest = digest.Canonical.Digester()
|
||||
}
|
||||
|
||||
if h.Typeflag == tar.TypeReg && ent.Size > 0 {
|
||||
var written int64
|
||||
totalSize := ent.Size // save it before we destroy ent
|
||||
tee := io.TeeReader(tr, payloadDigest.Hash())
|
||||
for written < totalSize {
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunkSize := int64(w.chunkSize())
|
||||
remain := totalSize - written
|
||||
if remain < chunkSize {
|
||||
chunkSize = remain
|
||||
} else {
|
||||
ent.ChunkSize = chunkSize
|
||||
}
|
||||
ent.Offset = w.cw.n
|
||||
ent.ChunkOffset = written
|
||||
chunkDigest := digest.Canonical.Digester()
|
||||
|
||||
w.condOpenGz()
|
||||
|
||||
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
|
||||
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
ent.ChunkDigest = chunkDigest.Digest().String()
|
||||
w.toc.Entries = append(w.toc.Entries, ent)
|
||||
written += chunkSize
|
||||
ent = &TOCEntry{
|
||||
Name: h.Name,
|
||||
Type: "chunk",
|
||||
}
|
||||
}
|
||||
} else {
|
||||
w.toc.Entries = append(w.toc.Entries, ent)
|
||||
}
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = payloadDigest.Digest().String()
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
// It is only valid to call DiffID after Close.
|
||||
func (w *Writer) DiffID() string {
|
||||
return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
|
||||
}
|
||||
|
||||
// footerBytes returns the 51 bytes footer.
|
||||
func footerBytes(tocOff int64) []byte {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
|
||||
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
|
||||
|
||||
// Extra header indicating the offset of TOCJSON
|
||||
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
|
||||
header := make([]byte, 4)
|
||||
header[0], header[1] = 'S', 'G'
|
||||
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
|
||||
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
|
||||
gz.Header.Extra = append(header, []byte(subfield)...)
|
||||
gz.Close()
|
||||
if buf.Len() != FooterSize {
|
||||
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func parseFooter(p []byte) (tocOffset int64, footerSize int64, rErr error) {
|
||||
var allErr []error
|
||||
|
||||
tocOffset, err := parseEStargzFooter(p)
|
||||
if err == nil {
|
||||
return tocOffset, FooterSize, nil
|
||||
}
|
||||
allErr = append(allErr, err)
|
||||
|
||||
pad := len(p) - legacyFooterSize
|
||||
if pad < 0 {
|
||||
pad = 0
|
||||
}
|
||||
tocOffset, err = parseLegacyFooter(p[pad:])
|
||||
if err == nil {
|
||||
return tocOffset, legacyFooterSize, nil
|
||||
}
|
||||
return 0, 0, errorutil.Aggregate(append(allErr, err))
|
||||
}
|
||||
|
||||
func parseEStargzFooter(p []byte) (tocOffset int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
return strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
}
|
||||
|
||||
func parseLegacyFooter(p []byte) (tocOffset int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
return strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
}
|
||||
|
||||
func formatModtime(t time.Time) string {
|
||||
if t.IsZero() || t.Unix() == 0 {
|
||||
return ""
|
||||
}
|
||||
return t.UTC().Round(time.Second).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func cleanEntryName(name string) string {
|
||||
// Use path.Clean to consistently deal with path separators across platforms.
|
||||
return strings.TrimPrefix(path.Clean("/"+name), "/")
|
||||
}
|
||||
|
||||
// countWriter counts how many bytes have been written to its wrapped
|
||||
// io.Writer.
|
||||
type countWriter struct {
|
||||
w io.Writer
|
||||
n int64
|
||||
}
|
||||
|
||||
func (cw *countWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.w.Write(p)
|
||||
cw.n += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
// isGzip reports whether br is positioned right before an upcoming gzip stream.
|
||||
// It does not consume any bytes from br.
|
||||
func isGzip(br *bufio.Reader) bool {
|
||||
const (
|
||||
gzipID1 = 0x1f
|
||||
gzipID2 = 0x8b
|
||||
gzipDeflate = 8
|
||||
)
|
||||
peek, _ := br.Peek(3)
|
||||
return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
|
||||
}
|
||||
10
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
Normal file
10
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
module github.com/containerd/stargz-snapshotter/estargz
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.12.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
10
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
Normal file
10
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
264
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
Normal file
264
storage/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,264 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// TOCTarName is the name of the JSON file in the tar archive in the
|
||||
// table of contents gzip stream.
|
||||
TOCTarName = "stargz.index.json"
|
||||
|
||||
// FooterSize is the number of bytes in the footer
|
||||
//
|
||||
// The footer is an empty gzip stream with no compression and an Extra
|
||||
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
|
||||
// number is the offset to the gzip stream of JSON TOC.
|
||||
//
|
||||
// 51 comes from:
|
||||
//
|
||||
// 10 bytes gzip header
|
||||
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
||||
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
||||
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
||||
// 5 bytes flate header
|
||||
// 8 bytes gzip footer
|
||||
// (End of the eStargz blob)
|
||||
//
|
||||
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
|
||||
FooterSize = 51
|
||||
|
||||
// legacyFooterSize is the number of bytes in the legacy stargz footer.
|
||||
//
|
||||
// 47 comes from:
|
||||
//
|
||||
// 10 byte gzip header +
|
||||
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
|
||||
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
|
||||
// 5 byte flate header
|
||||
// 8 byte gzip footer (two little endian uint32s: digest, size)
|
||||
legacyFooterSize = 47
|
||||
|
||||
// TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
|
||||
// digest of the TOC JSON.
|
||||
// This annotation is valid only when it is specified in `.[]layers.annotations`
|
||||
// of an image manifest.
|
||||
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
|
||||
// StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
|
||||
// pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
|
||||
// to the runtime but current OCI image doesn't ship this information by default. So we store this
|
||||
// to the special annotation.
|
||||
StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
|
||||
|
||||
// PrefetchLandmark is a file entry which indicates the end position of
|
||||
// prefetch in the stargz file.
|
||||
PrefetchLandmark = ".prefetch.landmark"
|
||||
|
||||
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
|
||||
// occur in the stargz file.
|
||||
NoPrefetchLandmark = ".no.prefetch.landmark"
|
||||
|
||||
landmarkContents = 0xf
|
||||
)
|
||||
|
||||
// jtoc is the JSON-serialized table of contents index of the files in the stargz file.
|
||||
type jtoc struct {
|
||||
Version int `json:"version"`
|
||||
Entries []*TOCEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
|
||||
type TOCEntry struct {
|
||||
// Name is the tar entry's name. It is the complete path
|
||||
// stored in the tar file, not just the base name.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
|
||||
// "block", "fifo", or "chunk".
|
||||
// The "chunk" type is used for regular file data chunks past the first
|
||||
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
|
||||
// ChunkOffset, and ChunkSize populated.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Size, for regular files, is the logical size of the file.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// ModTime3339 is the modification time of the tar entry. Empty
|
||||
// means zero or unknown. Otherwise it's in UTC RFC3339
|
||||
// format. Use the ModTime method to access the time.Time value.
|
||||
ModTime3339 string `json:"modtime,omitempty"`
|
||||
modTime time.Time
|
||||
|
||||
// LinkName, for symlinks and hardlinks, is the link target.
|
||||
LinkName string `json:"linkName,omitempty"`
|
||||
|
||||
// Mode is the permission and mode bits.
|
||||
Mode int64 `json:"mode,omitempty"`
|
||||
|
||||
// UID is the user ID of the owner.
|
||||
UID int `json:"uid,omitempty"`
|
||||
|
||||
// GID is the group ID of the owner.
|
||||
GID int `json:"gid,omitempty"`
|
||||
|
||||
// Uname is the username of the owner.
|
||||
//
|
||||
// In the serialized JSON, this field may only be present for
|
||||
// the first entry with the same UID.
|
||||
Uname string `json:"userName,omitempty"`
|
||||
|
||||
// Gname is the group name of the owner.
|
||||
//
|
||||
// In the serialized JSON, this field may only be present for
|
||||
// the first entry with the same GID.
|
||||
Gname string `json:"groupName,omitempty"`
|
||||
|
||||
// Offset, for regular files, provides the offset in the
|
||||
// stargz file to the file's data bytes. See ChunkOffset and
|
||||
// ChunkSize.
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
|
||||
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
||||
|
||||
// DevMajor is the major device number for "char" and "block" types.
|
||||
DevMajor int `json:"devMajor,omitempty"`
|
||||
|
||||
// DevMinor is the major device number for "char" and "block" types.
|
||||
DevMinor int `json:"devMinor,omitempty"`
|
||||
|
||||
// NumLink is the number of entry names pointing to this entry.
|
||||
// Zero means one name references this entry.
|
||||
NumLink int
|
||||
|
||||
// Xattrs are the extended attribute for the entry.
|
||||
Xattrs map[string][]byte `json:"xattrs,omitempty"`
|
||||
|
||||
// Digest stores the OCI checksum for regular files payload.
|
||||
// It has the form "sha256:abcdef01234....".
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// ChunkOffset is non-zero if this is a chunk of a large,
|
||||
// regular file. If so, the Offset is where the gzip header of
|
||||
// ChunkSize bytes at ChunkOffset in Name begin.
|
||||
//
|
||||
// In serialized form, a "chunkSize" JSON field of zero means
|
||||
// that the chunk goes to the end of the file. After reading
|
||||
// from the stargz TOC, though, the ChunkSize is initialized
|
||||
// to a non-zero file for when Type is either "reg" or
|
||||
// "chunk".
|
||||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||
|
||||
// ChunkDigest stores an OCI digest of the chunk. This must be formed
|
||||
// as "sha256:0123abcd...".
|
||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||
|
||||
children map[string]*TOCEntry
|
||||
}
|
||||
|
||||
// ModTime returns the entry's modification time.
|
||||
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
|
||||
|
||||
// NextOffset returns the position (relative to the start of the
|
||||
// stargz file) of the next gzip boundary after e.Offset.
|
||||
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
|
||||
|
||||
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
|
||||
if e.children == nil {
|
||||
e.children = make(map[string]*TOCEntry)
|
||||
}
|
||||
if child.Type == "dir" {
|
||||
e.NumLink++ // Entry ".." in the subdirectory links to this directory
|
||||
}
|
||||
e.children[baseName] = child
|
||||
}
|
||||
|
||||
// isDataType reports whether TOCEntry is a regular file or chunk (something that
|
||||
// contains regular file data).
|
||||
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
|
||||
|
||||
// Stat returns a FileInfo value representing e.
|
||||
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
|
||||
|
||||
// ForeachChild calls f for each child item. If f returns false, iteration ends.
|
||||
// If e is not a directory, f is not called.
|
||||
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
|
||||
for name, ent := range e.children {
|
||||
if !f(name, ent) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LookupChild returns the directory e's child by its base name.
|
||||
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
|
||||
child, ok = e.children[baseName]
|
||||
return
|
||||
}
|
||||
|
||||
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
|
||||
type fileInfo struct{ e *TOCEntry }
|
||||
|
||||
var _ os.FileInfo = fileInfo{}
|
||||
|
||||
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
|
||||
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
|
||||
func (fi fileInfo) Size() int64 { return fi.e.Size }
|
||||
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
|
||||
func (fi fileInfo) Sys() interface{} { return fi.e }
|
||||
func (fi fileInfo) Mode() (m os.FileMode) {
|
||||
// TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
|
||||
m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
|
||||
(os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
|
||||
switch fi.e.Type {
|
||||
case "dir":
|
||||
m |= os.ModeDir
|
||||
case "symlink":
|
||||
m |= os.ModeSymlink
|
||||
case "char":
|
||||
m |= os.ModeDevice | os.ModeCharDevice
|
||||
case "block":
|
||||
m |= os.ModeDevice
|
||||
case "fifo":
|
||||
m |= os.ModeNamedPipe
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
|
||||
// in a eStargz blob.
|
||||
type TOCEntryVerifier interface {
|
||||
|
||||
// Verifier provides a content verifier that can be used for verifying the
|
||||
// contents of the specified TOCEntry.
|
||||
Verifier(ce *TOCEntry) (digest.Verifier, error)
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package errgroup provides synchronization, error propagation, and Context
|
||||
// cancelation for groups of goroutines working on subtasks of a common task.
|
||||
package errgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A Group is a collection of goroutines working on subtasks that are part of
|
||||
// the same overall task.
|
||||
//
|
||||
// A zero Group is valid and does not cancel on error.
|
||||
type Group struct {
|
||||
cancel func()
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
errOnce sync.Once
|
||||
err error
|
||||
}
|
||||
|
||||
// WithContext returns a new Group and an associated Context derived from ctx.
|
||||
//
|
||||
// The derived Context is canceled the first time a function passed to Go
|
||||
// returns a non-nil error or the first time Wait returns, whichever occurs
|
||||
// first.
|
||||
func WithContext(ctx context.Context) (*Group, context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Group{cancel: cancel}, ctx
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the first non-nil error (if any) from them.
|
||||
func (g *Group) Wait() error {
|
||||
g.wg.Wait()
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
return g.err
|
||||
}
|
||||
|
||||
// Go calls the given function in a new goroutine.
|
||||
//
|
||||
// The first call to return a non-nil error cancels the group; its error will be
|
||||
// returned by Wait.
|
||||
func (g *Group) Go(f func() error) {
|
||||
g.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -35,6 +35,10 @@ github.com/Microsoft/hcsshim/osversion
|
|||
github.com/bits-and-blooms/bitset
|
||||
# github.com/containerd/cgroups v1.0.1
|
||||
github.com/containerd/cgroups/stats/v1
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.7.0
|
||||
## explicit
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
# github.com/davecgh/go-spew v1.1.1
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/docker/go-units v0.4.0
|
||||
|
|
@ -145,6 +149,8 @@ go.opencensus.io/trace/tracestate
|
|||
# golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
## explicit
|
||||
golang.org/x/net/context
|
||||
# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
|
||||
## explicit
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
|
|
|
|||
Loading…
Reference in New Issue