Merge pull request #1255 from rhatdan/VENDOR

Vendor in latest containers/(storage, image)
This commit is contained in:
OpenShift Merge Robot 2022-12-05 13:31:49 -05:00 committed by GitHub
commit 5d6d7d8418
56 changed files with 4957 additions and 3382 deletions

View File

@ -8,9 +8,9 @@ require (
github.com/containerd/containerd v1.6.10
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.1.1
github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
github.com/containers/image/v5 v5.23.1-0.20221130170538-333c50e3eac8
github.com/containers/ocicrypt v1.1.6
github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
github.com/containers/storage v1.44.1-0.20221201083122-c5a80ad65f42
github.com/coreos/go-systemd/v22 v22.5.0
github.com/cyphar/filepath-securejoin v0.2.3
github.com/davecgh/go-spew v1.1.1
@ -56,7 +56,7 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
@ -65,7 +65,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-containerregistry v0.12.0 // indirect
github.com/google/go-containerregistry v0.12.1 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
@ -75,7 +75,7 @@ require (
github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be // indirect
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
@ -88,9 +88,9 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/sigstore/sigstore v1.4.5 // indirect
github.com/sigstore/sigstore v1.4.6 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.8.3 // indirect
github.com/sylabs/sif/v2 v2.9.0 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
@ -102,12 +102,12 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.23.0 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/tools v0.2.0 // indirect
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e // indirect
google.golang.org/grpc v1.50.1 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect

File diff suppressed because it is too large Load Diff

View File

@ -82,12 +82,12 @@ type InitConfig struct {
// Note: The networks are not loaded from disk until a method is called.
func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
// TODO: consider using a shared memory lock
lock, err := lockfile.GetLockfile(filepath.Join(conf.CNIConfigDir, "cni.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(conf.CNIConfigDir, "cni.lock"))
if err != nil {
// If we're on a read-only filesystem, there is no risk of
// contention. Fall back to a local lockfile.
if errors.Is(err, unix.EROFS) {
lock, err = lockfile.GetLockfile(filepath.Join(conf.RunDir, "cni.lock"))
lock, err = lockfile.GetLockFile(filepath.Join(conf.RunDir, "cni.lock"))
if err != nil {
return nil, err
}

View File

@ -95,7 +95,7 @@ type InitConfig struct {
// Note: The networks are not loaded from disk until a method is called.
func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
// TODO: consider using a shared memory lock
lock, err := lockfile.GetLockfile(filepath.Join(conf.NetworkConfigDir, "netavark.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(conf.NetworkConfigDir, "netavark.lock"))
if err != nil {
return nil, err
}

View File

@ -108,7 +108,7 @@ func NewManager(rootPath string) (*ConfigMapManager, error) {
return nil, err
}
lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "configMaps.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(rootPath, "configMaps.lock"))
if err != nil {
return nil, err
}

View File

@ -39,7 +39,7 @@ func NewDriver(rootPath string) (*Driver, error) {
return nil, err
}
lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "configMapsdata.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(rootPath, "configMapsdata.lock"))
if err != nil {
return nil, err
}

View File

@ -39,7 +39,7 @@ func NewDriver(rootPath string) (*Driver, error) {
return nil, err
}
lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "secretsdata.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(rootPath, "secretsdata.lock"))
if err != nil {
return nil, err
}

View File

@ -127,7 +127,7 @@ func NewManager(rootPath string) (*SecretsManager, error) {
return nil, err
}
lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "secrets.lock"))
lock, err := lockfile.GetLockFile(filepath.Join(rootPath, "secrets.lock"))
if err != nil {
return nil, err
}

View File

@ -49,6 +49,7 @@ type options struct {
missedPrioritizedFiles *[]string
compression Compression
ctx context.Context
minChunkSize int
}
type Option func(o *options) error
@ -63,6 +64,7 @@ func WithChunkSize(chunkSize int) Option {
// WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression.
// This option will be ignored if WithCompression option is used.
// See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option {
return func(o *options) error {
@ -113,6 +115,18 @@ func WithContext(ctx context.Context) Option {
}
}
// WithMinChunkSize option specifies the minimal number of bytes of data
// must be written in one gzip stream.
// By increasing this number, one gzip stream can contain multiple files
// and it hopefully leads to smaller result blob.
// NOTE: This adds a TOC property that old reader doesn't understand.
func WithMinChunkSize(minChunkSize int) Option {
return func(o *options) error {
o.minChunkSize = minChunkSize
return nil
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
@ -180,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
if err != nil {
return nil, err
}
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
var tarParts [][]*entry
if opts.minChunkSize > 0 {
// Each entry needs to know the size of the current gzip stream so they
// cannot be processed in parallel.
tarParts = [][]*entry{entries}
} else {
tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
}
writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex
@ -195,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
}
sw := NewWriterWithCompressor(esgzFile, opts.compression)
sw.ChunkSize = opts.chunkSize
sw.MinChunkSize = opts.minChunkSize
if sw.needsOpenGzEntries == nil {
sw.needsOpenGzEntries = make(map[string]struct{})
}
for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
sw.needsOpenGzEntries[f] = struct{}{}
}
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err
}
@ -209,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
rErr = err
return nil, err
}
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
tocAndFooter, tocDgst, err := closeWithCombine(writers...)
if err != nil {
rErr = err
return nil, err
@ -252,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
// Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob.
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed")
}
@ -395,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader {
func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
pw, err := newCountReadSeeker(in)
if err != nil {
return nil, fmt.Errorf("failed to make position watcher: %w", err)
}
@ -571,19 +599,19 @@ func (tf *tempFiles) cleanupAll() error {
return errorutil.Aggregate(allErr)
}
func newCountReader(r io.ReaderAt) (*countReader, error) {
func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
pos := int64(0)
return &countReader{r: r, cPos: &pos}, nil
return &countReadSeeker{r: r, cPos: &pos}, nil
}
type countReader struct {
type countReadSeeker struct {
r io.ReaderAt
cPos *int64
mu sync.Mutex
}
func (cr *countReader) Read(p []byte) (int, error) {
func (cr *countReadSeeker) Read(p []byte) (int, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
@ -594,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) {
return n, err
}
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
@ -615,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
return offset, nil
}
func (cr *countReader) currentPos() int64 {
func (cr *countReadSeeker) currentPos() int64 {
cr.mu.Lock()
defer cr.mu.Unlock()

View File

@ -150,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
allErr = append(allErr, err)
continue
}
if tocSize <= 0 {
if tocOffset >= 0 && tocSize <= 0 {
tocSize = sr.Size() - tocOffset - fSize
}
if tocSize < int64(len(maybeTocBytes)) {
if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
maybeTocBytes = maybeTocBytes[:tocSize]
}
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
@ -207,8 +207,16 @@ func (r *Reader) initFields() error {
uname := map[int]string{}
gname := map[int]string{}
var lastRegEnt *TOCEntry
for _, ent := range r.toc.Entries {
var chunkTopIndex int
for i, ent := range r.toc.Entries {
ent.Name = cleanEntryName(ent.Name)
switch ent.Type {
case "reg", "chunk":
if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
chunkTopIndex = i
}
ent.chunkTopIndex = chunkTopIndex
}
if ent.Type == "reg" {
lastRegEnt = ent
}
@ -294,7 +302,7 @@ func (r *Reader) initFields() error {
if e.isDataType() {
e.nextOffset = lastOffset
}
if e.Offset != 0 {
if e.Offset != 0 && e.InnerOffset == 0 {
lastOffset = e.Offset
}
}
@ -488,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
//
// Name must be absolute path or one that is relative to root.
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
fr, err := r.newFileReader(name)
if err != nil {
return nil, err
}
return io.NewSectionReader(fr, 0, fr.size), nil
}
func (r *Reader) newFileReader(name string) (*fileReader, error) {
name = cleanEntryName(name)
ent, ok := r.Lookup(name)
if !ok {
@ -505,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
Err: errors.New("not a regular file"),
}
}
fr := &fileReader{
return &fileReader{
r: r,
size: ent.Size,
ents: r.getChunks(ent),
}, nil
}
func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
fr, err := r.newFileReader(name)
if err != nil {
return nil, err
}
fr.preRead = preRead
return io.NewSectionReader(fr, 0, fr.size), nil
}
@ -521,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
}
type fileReader struct {
r *Reader
size int64
ents []*TOCEntry // 1 or more reg/chunk entries
r *Reader
size int64
ents []*TOCEntry // 1 or more reg/chunk entries
preRead func(*TOCEntry, io.Reader) error
}
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
@ -578,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
}
defer dr.Close()
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
if fr.preRead == nil {
if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
}
return io.ReadFull(dr, p)
}
return io.ReadFull(dr, p)
var retN int
var retErr error
var found bool
var nr int64
for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
if !e.isDataType() {
continue
}
if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
break
}
if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
}
nr = e.InnerOffset
if e == ent {
found = true
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
}
retN, retErr = io.ReadFull(dr, p)
nr += off + int64(retN)
continue
}
cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
if err := fr.preRead(e, cr); err != nil {
return 0, fmt.Errorf("failed to pre read: %w", err)
}
nr += cr.n
}
if !found {
return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
}
return retN, retErr
}
// A Writer writes stargz files.
@ -599,11 +662,20 @@ type Writer struct {
lastGroupname map[int]string
compressor Compressor
uncompressedCounter *countWriteFlusher
// ChunkSize optionally controls the maximum number of bytes
// of data of a regular file that can be written in one gzip
// stream before a new gzip stream is started.
// Zero means to use a default, currently 4 MiB.
ChunkSize int
// MinChunkSize optionally controls the minimum number of bytes
// of data must be written in one gzip stream before a new gzip
// NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
MinChunkSize int
needsOpenGzEntries map[string]struct{}
}
// currentCompressionWriter writes to the current w.gz field, which can
@ -646,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
if err != nil {
return nil, fmt.Errorf("failed to parse footer: %w", err)
}
if blobPayloadSize < 0 {
blobPayloadSize = sr.Size()
}
return c.Reader(io.LimitReader(sr, blobPayloadSize))
}
@ -672,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
bw := bufio.NewWriter(w)
cw := &countWriter{w: bw}
return &Writer{
bw: bw,
cw: cw,
toc: &JTOC{Version: 1},
diffHash: sha256.New(),
compressor: c,
bw: bw,
cw: cw,
toc: &JTOC{Version: 1},
diffHash: sha256.New(),
compressor: c,
uncompressedCounter: &countWriteFlusher{},
}
}
@ -717,6 +793,20 @@ func (w *Writer) closeGz() error {
return nil
}
func (w *Writer) flushGz() error {
if w.closed {
return errors.New("flush on closed Writer")
}
if w.gz != nil {
if f, ok := w.gz.(interface {
Flush() error
}); ok {
return f.Flush()
}
}
return nil
}
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
// in which case it returns the empty string.
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
@ -736,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
func (w *Writer) condOpenGz() (err error) {
if w.gz == nil {
w.gz, err = w.compressor.Writer(w.cw)
if w.gz != nil {
w.gz = w.uncompressedCounter.register(w.gz)
}
}
return
}
@ -784,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
if lossless {
tr.RawAccounting = true
}
prevOffset := w.cw.n
var prevOffsetUncompressed int64
for {
h, err := tr.Next()
if err == io.EOF {
@ -883,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
totalSize := ent.Size // save it before we destroy ent
tee := io.TeeReader(tr, payloadDigest.Hash())
for written < totalSize {
if err := w.closeGz(); err != nil {
return err
}
chunkSize := int64(w.chunkSize())
remain := totalSize - written
if remain < chunkSize {
@ -894,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
} else {
ent.ChunkSize = chunkSize
}
ent.Offset = w.cw.n
// We flush the underlying compression writer here to correctly calculate "w.cw.n".
if err := w.flushGz(); err != nil {
return err
}
if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
if err := w.closeGz(); err != nil {
return err
}
ent.Offset = w.cw.n
prevOffset = ent.Offset
prevOffsetUncompressed = w.uncompressedCounter.n
} else {
ent.Offset = prevOffset
ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
}
ent.ChunkOffset = written
chunkDigest := digest.Canonical.Digester()
@ -940,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
return err
}
func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
if ent.Type != "reg" {
return false
}
if w.needsOpenGzEntries == nil {
return false
}
_, ok := w.needsOpenGzEntries[ent.Name]
return ok
}
// DiffID returns the SHA-256 of the uncompressed tar bytes.
// It is only valid to call DiffID after Close.
func (w *Writer) DiffID() string {
@ -956,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
}
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
if tocOff < 0 {
// This means that TOC isn't contained in the blob.
// We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
// the external location.
start := time.Now()
toc, tocDgst, err := d.ParseTOC(nil)
if err != nil {
return nil, err
}
if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
opts.telemetry.GetTocLatency(start)
}
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
opts.telemetry.DeserializeTocLatency(start)
}
return &Reader{
sr: sr,
toc: toc,
tocDigest: tocDgst,
decompressor: d,
}, nil
}
if len(tocBytes) > 0 {
start := time.Now()
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
@ -1021,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) {
return
}
type countWriteFlusher struct {
io.WriteCloser
n int64
}
func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
wc.WriteCloser = w
return wc
}
func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
n, err = wc.WriteCloser.Write(p)
wc.n += int64(n)
return
}
func (wc *countWriteFlusher) Flush() error {
if f, ok := wc.WriteCloser.(interface {
Flush() error
}); ok {
return f.Flush()
}
return nil
}
func (wc *countWriteFlusher) Close() error {
err := wc.WriteCloser.Close()
wc.WriteCloser = nil
return err
}
// isGzip reports whether br is positioned right before an upcoming gzip stream.
// It does not consume any bytes from br.
func isGzip(br *bufio.Reader) bool {
@ -1039,3 +1210,14 @@ func positive(n int64) int64 {
}
return n
}
type countReader struct {
r io.Reader
n int64
}
func (cr *countReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
cr.n += int64(n)
return
}

View File

@ -60,7 +60,7 @@ type GzipCompressor struct {
compressionLevel int
}
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
return gzip.NewWriterLevel(w, gc.compressionLevel)
}

View File

@ -31,6 +31,7 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"reflect"
@ -44,21 +45,27 @@ import (
digest "github.com/opencontainers/go-digest"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// TestingController is Compression with some helper methods necessary for testing.
type TestingController interface {
Compression
CountStreams(*testing.T, []byte) int
TestStreams(t *testing.T, b []byte, streams []int64)
DiffIDOf(*testing.T, []byte) string
String() string
}
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
func CompressionTestSuite(t *testing.T, controllers ...TestingController) {
func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
}
type TestingControllerFactory func() TestingController
const (
uncompressedType int = iota
gzipType
@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
// testBuild tests the resulting stargz blob built by this pkg has the same
// contents as the normal stargz blob.
func testBuild(t *testing.T, controllers ...TestingController) {
func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
tests := []struct {
name string
chunkSize int
in []tarEntry
name string
chunkSize int
minChunkSize []int
in []tarEntry
}{
{
name: "regfiles and directories",
@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) {
),
},
{
name: "various files",
chunkSize: 4,
name: "various files",
chunkSize: 4,
minChunkSize: []int{0, 64000},
in: tarOf(
file("baz.txt", "bazbazbazbazbazbazbaz"),
file("foo.txt", "a"),
file("foo1.txt", "a"),
file("bar/foo2.txt", "b"),
file("foo3.txt", "c"),
symlink("barlink", "test/bar.txt"),
dir("test/"),
dir("dev/"),
@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) {
},
}
for _, tt := range tests {
if len(tt.minChunkSize) == 0 {
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions {
srcCompression := srcCompression
for _, cl := range controllers {
cl := cl
for _, newCL := range controllers {
newCL := newCL
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, prefix := range allowedPrefix {
prefix := prefix
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
// Test divideEntries()
entries, err := sortEntries(tarBlob, nil, nil) // identical order
if err != nil {
t.Fatalf("failed to parse tar: %v", err)
}
var merged []*entry
for _, part := range divideEntries(entries, 4) {
merged = append(merged, part...)
}
if !reflect.DeepEqual(entries, merged) {
for _, e := range entries {
t.Logf("Original: %v", e.header)
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
// Test divideEntries()
entries, err := sortEntries(tarBlob, nil, nil) // identical order
if err != nil {
t.Fatalf("failed to parse tar: %v", err)
}
for _, e := range merged {
t.Logf("Merged: %v", e.header)
var merged []*entry
for _, part := range divideEntries(entries, 4) {
merged = append(merged, part...)
}
if !reflect.DeepEqual(entries, merged) {
for _, e := range entries {
t.Logf("Original: %v", e.header)
}
for _, e := range merged {
t.Logf("Merged: %v", e.header)
}
t.Errorf("divided entries couldn't be merged")
return
}
t.Errorf("divided entries couldn't be merged")
return
}
// Prepare sample data
wantBuf := new(bytes.Buffer)
sw := NewWriterWithCompressor(wantBuf, cl)
sw.ChunkSize = tt.chunkSize
if err := sw.AppendTar(tarBlob); err != nil {
t.Fatalf("failed to append tar to want stargz: %v", err)
}
if _, err := sw.Close(); err != nil {
t.Fatalf("failed to prepare want stargz: %v", err)
}
wantData := wantBuf.Bytes()
want, err := Open(io.NewSectionReader(
bytes.NewReader(wantData), 0, int64(len(wantData))),
WithDecompressors(cl),
)
if err != nil {
t.Fatalf("failed to parse the want stargz: %v", err)
}
// Prepare sample data
cl1 := newCL()
wantBuf := new(bytes.Buffer)
sw := NewWriterWithCompressor(wantBuf, cl1)
sw.MinChunkSize = minChunkSize
sw.ChunkSize = tt.chunkSize
if err := sw.AppendTar(tarBlob); err != nil {
t.Fatalf("failed to append tar to want stargz: %v", err)
}
if _, err := sw.Close(); err != nil {
t.Fatalf("failed to prepare want stargz: %v", err)
}
wantData := wantBuf.Bytes()
want, err := Open(io.NewSectionReader(
bytes.NewReader(wantData), 0, int64(len(wantData))),
WithDecompressors(cl1),
)
if err != nil {
t.Fatalf("failed to parse the want stargz: %v", err)
}
// Prepare testing data
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
WithChunkSize(tt.chunkSize), WithCompression(cl))
if err != nil {
t.Fatalf("failed to build stargz: %v", err)
}
defer rc.Close()
gotBuf := new(bytes.Buffer)
if _, err := io.Copy(gotBuf, rc); err != nil {
t.Fatalf("failed to copy built stargz blob: %v", err)
}
gotData := gotBuf.Bytes()
got, err := Open(io.NewSectionReader(
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
WithDecompressors(cl),
)
if err != nil {
t.Fatalf("failed to parse the got stargz: %v", err)
}
// Prepare testing data
var opts []Option
if minChunkSize > 0 {
opts = append(opts, WithMinChunkSize(minChunkSize))
}
cl2 := newCL()
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
if err != nil {
t.Fatalf("failed to build stargz: %v", err)
}
defer rc.Close()
gotBuf := new(bytes.Buffer)
if _, err := io.Copy(gotBuf, rc); err != nil {
t.Fatalf("failed to copy built stargz blob: %v", err)
}
gotData := gotBuf.Bytes()
got, err := Open(io.NewSectionReader(
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
WithDecompressors(cl2),
)
if err != nil {
t.Fatalf("failed to parse the got stargz: %v", err)
}
// Check DiffID is properly calculated
rc.Close()
diffID := rc.DiffID()
wantDiffID := cl.DiffIDOf(t, gotData)
if diffID.String() != wantDiffID {
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
}
// Check DiffID is properly calculated
rc.Close()
diffID := rc.DiffID()
wantDiffID := cl2.DiffIDOf(t, gotData)
if diffID.String() != wantDiffID {
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
}
// Compare as stargz
if !isSameVersion(t, cl, wantData, gotData) {
t.Errorf("built stargz hasn't same json")
return
}
if !isSameEntries(t, want, got) {
t.Errorf("built stargz isn't same as the original")
return
}
// Compare as stargz
if !isSameVersion(t, cl1, wantData, cl2, gotData) {
t.Errorf("built stargz hasn't same json")
return
}
if !isSameEntries(t, want, got) {
t.Errorf("built stargz isn't same as the original")
return
}
// Compare as tar.gz
if !isSameTarGz(t, cl, wantData, gotData) {
t.Errorf("built stargz isn't same tar.gz")
return
}
})
// Compare as tar.gz
if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
t.Errorf("built stargz isn't same tar.gz")
return
}
})
}
}
}
}
@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) {
}
}
func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
aGz, err := controller.Reader(bytes.NewReader(a))
func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aGz, err := cla.Reader(bytes.NewReader(a))
if err != nil {
t.Fatalf("failed to read A")
}
defer aGz.Close()
bGz, err := controller.Reader(bytes.NewReader(b))
bGz, err := clb.Reader(bytes.NewReader(b))
if err != nil {
t.Fatalf("failed to read B")
}
@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
return true
}
func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool {
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller)
func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
if err != nil {
t.Fatalf("failed to parse A: %v", err)
}
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller)
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
if err != nil {
t.Fatalf("failed to parse B: %v", err)
}
@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool {
a.GID == b.GID &&
a.Uname == b.Uname &&
a.Gname == b.Gname &&
(a.Offset > 0) == (b.Offset > 0) &&
(a.Offset >= 0) == (b.Offset >= 0) &&
(a.NextOffset() > 0) == (b.NextOffset() > 0) &&
a.DevMajor == b.DevMajor &&
a.DevMinor == b.DevMinor &&
@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
const chunkSize = 3
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController)
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
// testDigestAndVerify runs specified checks against sample stargz blobs.
func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
tests := []struct {
name string
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
checks []check
name string
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
checks []check
minChunkSize []int
}{
{
name: "no-regfile",
@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
regDigest(t, "test/bar.txt", "bbb", dgstMap),
)
},
minChunkSize: []int{0, 64000},
checks: []check{
checkStargzTOC,
checkVerifyTOC,
@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
},
},
{
name: "with-non-regfiles",
name: "with-non-regfiles",
minChunkSize: []int{0, 64000},
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
regDigest(t, "bar/foo2.txt", "b", dgstMap),
regDigest(t, "foo3.txt", "c", dgstMap),
symlink("barlink", "test/bar.txt"),
dir("test/"),
regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
file("baz.txt", "bazbazbazbazbazbazbaz"),
file("foo.txt", "a"),
file("bar/foo2.txt", "b"),
file("foo3.txt", "c"),
symlink("barlink", "test/bar.txt"),
dir("test/"),
file("test/bar.txt", "testbartestbar"),
@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
}
for _, tt := range tests {
if len(tt.minChunkSize) == 0 {
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions {
srcCompression := srcCompression
for _, cl := range controllers {
cl := cl
for _, newCL := range controllers {
newCL := newCL
for _, prefix := range allowedPrefix {
prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
// Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
// Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
WithChunkSize(chunkSize), WithCompression(cl))
if err != nil {
t.Fatalf("failed to convert stargz: %v", err)
}
tocDigest := rc.TOCDigest()
defer rc.Close()
buf := new(bytes.Buffer)
if _, err := io.Copy(buf, rc); err != nil {
t.Fatalf("failed to copy built stargz blob: %v", err)
}
newStargz := buf.Bytes()
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
cl := newCL()
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
WithChunkSize(chunkSize), WithCompression(cl))
if err != nil {
t.Fatalf("failed to convert stargz: %v", err)
}
tocDigest := rc.TOCDigest()
defer rc.Close()
buf := new(bytes.Buffer)
if _, err := io.Copy(buf, rc); err != nil {
t.Fatalf("failed to copy built stargz blob: %v", err)
}
newStargz := buf.Bytes()
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
for _, check := range tt.checks {
check(t, newStargz, tocDigest, dgstMap, cl)
}
})
for _, check := range tt.checks {
check(t, newStargz, tocDigest, dgstMap, cl, newCL)
}
})
}
}
}
}
@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
// digest and contains valid chunks. It walks all entries in the stargz and
// checks all chunk digests stored to the TOC JSON match the actual contents.
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
// stargz. It walks all entries in the stargz and checks the verifications for
// all chunks work.
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
// detected during the verification and the verification returns an error.
func checkVerifyInvalidTOCEntryFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
funcs := map[string]rewriteFunc{
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
var found bool
@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
// checkVerifyInvalidStargzFail checks if the verification detects that the
// given stargz file doesn't match to the expected digest and returns error.
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller))
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
cl := newController()
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
if err != nil {
t.Fatalf("failed to convert stargz: %v", err)
}
@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
WithDecompressors(controller),
WithDecompressors(cl),
)
if err != nil {
t.Fatalf("failed to parse converted stargz: %v", err)
@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
// that doesn't match to the expected digest and returns error.
func checkVerifyBrokenContentFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
// Parse stargz file
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
}
// Decode the TOC JSON
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
var tocReader io.Reader
if tocOffset >= 0 {
tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
}
decodedJTOC, _, err = controller.ParseTOC(tocReader)
if err != nil {
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
return decodedJTOC, tocOffset, nil
}
func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
const content = "Some contents"
invalidUtf8 := "\xff\xfe\xfd"
xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
sampleOwner := owner{uid: 50, gid: 100}
data64KB := randomContents(64000)
tests := []struct {
name string
chunkSize int
in []tarEntry
want []stargzCheck
wantNumGz int // expected number of streams
name string
chunkSize int
minChunkSize int
in []tarEntry
want []stargzCheck
wantNumGz int // expected number of streams
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
wantFailOnLossLess bool
wantTOCVersion int // default = 1
}{
{
name: "empty",
in: tarOf(),
wantNumGz: 2, // empty tar + TOC + footer
wantNumGzLossLess: 3, // empty tar + TOC + footer
name: "empty",
in: tarOf(),
wantNumGz: 2, // (empty tar) + TOC + footer
want: checks(
numTOCEntries(0),
),
@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
dir("foo/"),
file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
),
wantNumGz: 9,
wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
want: checks(
numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
hasDir("foo/"),
@ -1326,23 +1371,108 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
mustSameEntry("foo/foo1", "foolink"),
),
},
{
name: "several_files_in_chunk",
minChunkSize: 8000,
in: tarOf(
dir("foo/"),
file("foo/foo1", data64KB),
file("foo2", "bb"),
file("foo22", "ccc"),
dir("bar/"),
file("bar/bar.txt", "aaa"),
file("foo3", data64KB),
),
// NOTE: we assume that the compressed "data64KB" is still larger than 8KB
wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
want: checks(
numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
hasDir("foo/"),
hasDir("bar/"),
hasFileLen("foo/foo1", len(data64KB)),
hasFileLen("foo2", len("bb")),
hasFileLen("foo22", len("ccc")),
hasFileLen("bar/bar.txt", len("aaa")),
hasFileLen("foo3", len(data64KB)),
hasFileDigest("foo/foo1", digestFor(data64KB)),
hasFileDigest("foo2", digestFor("bb")),
hasFileDigest("foo22", digestFor("ccc")),
hasFileDigest("bar/bar.txt", digestFor("aaa")),
hasFileDigest("foo3", digestFor(data64KB)),
hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
hasFileContentsRange("foo/foo1", 0, data64KB),
hasFileContentsRange("foo2", 0, "bb"),
hasFileContentsRange("foo2", 1, "b"),
hasFileContentsRange("foo22", 0, "ccc"),
hasFileContentsRange("foo22", 1, "cc"),
hasFileContentsRange("foo22", 2, "c"),
hasFileContentsRange("bar/bar.txt", 0, "aaa"),
hasFileContentsRange("bar/bar.txt", 1, "aa"),
hasFileContentsRange("bar/bar.txt", 2, "a"),
hasFileContentsRange("foo3", 0, data64KB),
hasFileContentsRange("foo3", 1, data64KB[1:]),
hasFileContentsRange("foo3", 2, data64KB[2:]),
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
),
},
{
name: "several_files_in_chunk_chunked",
minChunkSize: 8000,
chunkSize: 32000,
in: tarOf(
dir("foo/"),
file("foo/foo1", data64KB),
file("foo2", "bb"),
dir("bar/"),
file("foo3", data64KB),
),
// NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
want: checks(
numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
hasDir("foo/"),
hasDir("bar/"),
hasFileLen("foo/foo1", len(data64KB)),
hasFileLen("foo2", len("bb")),
hasFileLen("foo3", len(data64KB)),
hasFileDigest("foo/foo1", digestFor(data64KB)),
hasFileDigest("foo2", digestFor("bb")),
hasFileDigest("foo3", digestFor(data64KB)),
hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
hasFileContentsRange("foo/foo1", 0, data64KB),
hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
hasFileContentsRange("foo2", 0, "bb"),
hasFileContentsRange("foo2", 1, "b"),
hasFileContentsRange("foo3", 0, data64KB),
hasFileContentsRange("foo3", 1, data64KB[1:]),
hasFileContentsRange("foo3", 2, data64KB[2:]),
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
),
},
}
for _, tt := range tests {
for _, cl := range controllers {
cl := cl
for _, newCL := range controllers {
newCL := newCL
for _, prefix := range allowedPrefix {
prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, lossless := range []bool{true, false} {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
origTarDgstr := digest.Canonical.Digester()
tr = io.TeeReader(tr, origTarDgstr.Hash())
var stargzBuf bytes.Buffer
w := NewWriterWithCompressor(&stargzBuf, cl)
cl1 := newCL()
w := NewWriterWithCompressor(&stargzBuf, cl1)
w.ChunkSize = tt.chunkSize
w.MinChunkSize = tt.minChunkSize
if lossless {
err := w.AppendTarLossLess(tr)
if tt.wantFailOnLossLess {
@ -1366,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
if lossless {
// Check if the result blob reserves original tar metadata
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
if err != nil {
t.Errorf("failed to decompress blob: %v", err)
return
@ -1385,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
}
diffID := w.DiffID()
wantDiffID := cl.DiffIDOf(t, b)
wantDiffID := cl1.DiffIDOf(t, b)
if diffID != wantDiffID {
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
}
got := cl.CountStreams(t, b)
wantNumGz := tt.wantNumGz
if lossless && tt.wantNumGzLossLess > 0 {
wantNumGz = tt.wantNumGzLossLess
}
if got != wantNumGz {
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
}
telemetry, checkCalled := newCalledTelemetry()
sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
r, err := Open(
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
WithDecompressors(cl),
sr,
WithDecompressors(cl1),
WithTelemetry(telemetry),
)
if err != nil {
t.Fatalf("stargz.Open: %v", err)
}
if err := checkCalled(); err != nil {
wantTOCVersion := 1
if tt.wantTOCVersion > 0 {
wantTOCVersion = tt.wantTOCVersion
}
if r.toc.Version != wantTOCVersion {
t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
}
footerSize := cl1.FooterSize()
footerOffset := sr.Size() - footerSize
footer := make([]byte, footerSize)
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
t.Errorf("failed to read footer: %v", err)
}
_, tocOffset, _, err := cl1.ParseFooter(footer)
if err != nil {
t.Errorf("failed to parse footer: %v", err)
}
if err := checkCalled(tocOffset >= 0); err != nil {
t.Errorf("telemetry failure: %v", err)
}
wantNumGz := tt.wantNumGz
if lossless && tt.wantNumGzLossLess > 0 {
wantNumGz = tt.wantNumGzLossLess
}
streamOffsets := []int64{0}
prevOffset := int64(-1)
streams := 0
for _, e := range r.toc.Entries {
if e.Offset > prevOffset {
streamOffsets = append(streamOffsets, e.Offset)
prevOffset = e.Offset
streams++
}
}
streams++ // TOC
if tocOffset >= 0 {
// toc is in the blob
streamOffsets = append(streamOffsets, tocOffset)
}
streams++ // footer
streamOffsets = append(streamOffsets, footerOffset)
if streams != wantNumGz {
t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
}
t.Logf("testing streams: %+v", streamOffsets)
cl1.TestStreams(t, b, streamOffsets)
for _, want := range tt.want {
want.check(t, r)
}
@ -1422,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
}
}
func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
type chunkInfo struct {
name string
data string
}
func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
var getFooterLatencyCalled bool
var getTocLatencyCalled bool
var deserializeTocLatencyCalled bool
@ -1430,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
func(time.Time) { getFooterLatencyCalled = true },
func(time.Time) { getTocLatencyCalled = true },
func(time.Time) { deserializeTocLatencyCalled = true },
}, func() error {
}, func(needsGetTOC bool) error {
var allErr []error
if !getFooterLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
}
if !getTocLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
if needsGetTOC {
if !getTocLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
}
}
if !deserializeTocLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
@ -1573,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck {
})
}
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
extraMap := make(map[string]chunkInfo)
for _, e := range extra {
extraMap[e.name] = e
}
var extraNames []string
for n := range extraMap {
extraNames = append(extraNames, n)
}
f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
t.Logf("On %q: got preread of %q", file, e.Name)
ex, ok := extraMap[e.Name]
if !ok {
t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
}
got, err := io.ReadAll(cr)
if err != nil {
t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
}
if ex.data != string(got) {
t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
}
delete(extraMap, e.Name)
return nil
})
if err != nil {
t.Fatal(err)
}
got := make([]byte, len(want))
n, err := f.ReadAt(got, int64(offset))
if err != nil {
t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
}
if string(got) != want {
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
}
if len(extraMap) != 0 {
var exNames []string
for _, ex := range extraMap {
exNames = append(exNames, ex.name)
}
t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
}
})
}
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
f, err := r.OpenFile(file)
@ -1585,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
}
if string(got) != want {
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want)
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
}
})
}
@ -1797,6 +2020,13 @@ func mustSameEntry(files ...string) stargzCheck {
})
}
func viewContent(c []byte) string {
if len(c) < 100 {
return string(c)
}
return string(c[:50]) + "...(omit)..." + string(c[50:100])
}
func tarOf(s ...tarEntry) []tarEntry { return s }
type tarEntry interface {
@ -2056,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
})
}
var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomContents(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = runes[rand.Intn(len(runes))]
}
return string(b)
}
func fileModeToTarMode(mode os.FileMode) (int64, error) {
h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
if err != nil {
@ -2073,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
if len(streams) == 0 {
return // nop
}
wants := map[int64]struct{}{}
for _, s := range streams {
wants[s] = struct{}{}
}
len0 := len(b)
br := bytes.NewReader(b)
zr := new(gzip.Reader)
t.Logf("got gzip streams:")
numStreams := 0
for {
zoff := len0 - br.Len()
if err := zr.Reset(br); err != nil {
if err == io.EOF {
return
}
t.Fatalf("countStreams(gzip), Reset: %v", err)
}
zr.Multistream(false)
n, err := io.Copy(io.Discard, zr)
if err != nil {
t.Fatalf("countStreams(gzip), Copy: %v", err)
}
var extra string
if len(zr.Header.Extra) > 0 {
extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
}
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
delete(wants, int64(zoff))
numStreams++
}
}
func GzipDiffIDOf(t *testing.T, b []byte) string {
h := sha256.New()
zr, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
t.Fatalf("diffIDOf(gzip): %v", err)
}
defer zr.Close()
if _, err := io.Copy(h, zr); err != nil {
t.Fatalf("diffIDOf(gzip).Copy: %v", err)
}
return fmt.Sprintf("sha256:%x", h.Sum(nil))
}

View File

@ -149,6 +149,12 @@ type TOCEntry struct {
// ChunkSize.
Offset int64 `json:"offset,omitempty"`
// InnerOffset is an optional field indicates uncompressed offset
// of this "reg" or "chunk" payload in a stream starts from Offset.
// This field enables to put multiple "reg" or "chunk" payloads
// in one chunk with having the same Offset but different InnerOffset.
InnerOffset int64 `json:"innerOffset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types.
@ -186,6 +192,9 @@ type TOCEntry struct {
ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry
// chunkTopIndex is index of the entry where Offset starts in the blob.
chunkTopIndex int
}
// ModTime returns the entry's modification time.
@ -279,7 +288,10 @@ type Compressor interface {
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
// Everytime a chunk is written, the WriteCloser is closed and Writer is
// called again for writing the next chunk.
Writer(w io.Writer) (io.WriteCloser, error)
//
// The returned writer should implement "Flush() error" function that flushes
// any pending compressed data to the underlying writer.
Writer(w io.Writer) (WriteFlushCloser, error)
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
@ -303,8 +315,12 @@ type Decompressor interface {
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
// the top until the TOC JSON).
//
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
// If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
// to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
//
// tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
// footer (blob size - tocOff - FooterSize).
// If blobPayloadSize < 0, blobPayloadSize become the blob size.
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
@ -313,5 +329,14 @@ type Decompressor interface {
// This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob. This must match to the value returned from
// Compressor.WriteTOCAndFooter that is used when creating this blob.
//
// If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
// Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
// and return it.
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
}
type WriteFlushCloser interface {
io.WriteCloser
Flush() error
}

View File

@ -1,143 +1,160 @@
package reference
import "regexp"
import (
"regexp"
"strings"
)
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
const (
// alphaNumeric defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
alphaNumeric = `[a-z0-9]+`
// separatorRegexp defines the separators allowed to be embedded in name
// separator defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// dashes. Repeated dashes and underscores are intentionally treated
// differently. In order to support valid hostnames as name components,
// supporting repeated dash was added. Additionally double underscore is
// now allowed as a separator to loosen the restriction for previously
// supported names.
separator = `(?:[._]|__|[-]*)`
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
// The string counterpart for TagRegexp.
tag = `[\w][\w.-]{0,127}`
// The string counterpart for DigestRegexp.
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// The string counterpart for IdentifierRegexp.
identifier = `([a-f0-9]{64})`
// The string counterpart for ShortIdentifierRegexp.
shortIdentifier = `([a-f0-9]{6,64})`
)
var (
// nameComponent restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponent = expression(
alphaNumeric,
optional(repeated(separator, alphaNumeric)))
domain = expression(
domainComponent,
optional(repeated(literal(`.`), domainComponent)),
optional(literal(`:`), `[0-9]+`))
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
DomainRegexp = re(domain)
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
TagRegexp = re(tag)
anchoredTag = anchored(tag)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
anchoredTagRegexp = re(anchoredTag)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
DigestRegexp = re(digestPat)
anchoredDigest = anchored(digestPat)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
anchoredDigestRegexp = re(anchoredDigest)
namePat = expression(
optional(domain, literal(`/`)),
nameComponent,
optional(repeated(literal(`/`), nameComponent)))
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
NameRegexp = re(namePat)
anchoredName = anchored(
optional(capture(domain), literal(`/`)),
capture(nameComponent,
optional(repeated(literal(`/`), nameComponent))))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
anchoredNameRegexp = re(anchoredName)
referencePat = anchored(capture(namePat),
optional(literal(":"), capture(tag)),
optional(literal("@"), capture(digestPat)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
ReferenceRegexp = re(referencePat)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
IdentifierRegexp = re(identifier)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
ShortIdentifierRegexp = re(shortIdentifier)
anchoredIdentifier = anchored(identifier)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
anchoredIdentifierRegexp = re(anchoredIdentifier)
anchoredShortIdentifier = anchored(shortIdentifier)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
anchoredShortIdentifierRegexp = re(anchoredShortIdentifier)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// re compiles the string to a regular expression.
var re = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
func literal(s string) string {
return regexp.QuoteMeta(s)
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
func expression(res ...string) string {
return strings.Join(res, "")
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
func optional(res ...string) string {
return group(expression(res...)) + `?`
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
func repeated(res ...string) string {
return group(expression(res...)) + `+`
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
func group(res ...string) string {
return `(?:` + expression(res...) + `)`
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
func capture(res ...string) string {
return `(` + expression(res...) + `)`
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
func anchored(res ...string) string {
return `^` + expression(res...) + `$`
}

View File

@ -72,6 +72,8 @@ fedora_testing_task: &fedora_testing
TEST_DRIVER: "vfs"
- env:
TEST_DRIVER: "overlay"
- env:
TEST_DRIVER: "overlay-transient"
- env:
TEST_DRIVER: "fuse-overlay"
- env:

View File

@ -60,7 +60,7 @@ local-gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
@ -117,7 +117,7 @@ help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor
vendor:
$(GO) mod tidy -compat=1.17

View File

@ -10,11 +10,28 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
)
type containerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
const (
stableContainerLocation containerLocations = 1 << iota
volatileContainerLocation
numContainerLocationIndex = iota
)
func containerLocationFromIndex(index int) containerLocations {
return 1 << index
}
// A Container is a reference to a read-write layer with metadata.
type Container struct {
// ID is either one which was specified at create-time, or a random
@ -64,6 +81,9 @@ type Container struct {
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
}
// rwContainerStore provides bookkeeping for information about Containers.
@ -115,11 +135,16 @@ type rwContainerStore interface {
// Containers returns a slice enumerating the known containers.
Containers() ([]Container, error)
// Clean up unreferenced datadirs
GarbageCollect() error
}
type containerStore struct {
lockfile Locker
lockfile *lockfile.LockFile
dir string
jsonPath [numContainerLocationIndex]string
lastWrite lockfile.LastWrite
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
@ -142,6 +167,7 @@ func copyContainer(c *Container) *Container {
UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap),
Flags: copyStringInterfaceMap(c.Flags),
volatileStore: c.volatileStore,
}
}
@ -176,6 +202,13 @@ func (c *Container) MountOpts() []string {
}
}
func containerLocation(c *Container) containerLocations {
if c.volatileStore {
return volatileContainerLocation
}
return stableContainerLocation
}
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting().
//
@ -231,7 +264,7 @@ func (r *containerStore) startReading() error {
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if _, err := r.load(true); err != nil {
if _, err := r.reloadIfChanged(true); err != nil {
return err
}
unlockFn()
@ -264,19 +297,20 @@ func (r *containerStore) stopReading() {
// if it is held for writing.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// load() with lockedForWriting could succeed. In that case the caller MUST
// call load(), not reloadIfChanged() (because the “if changed” state will not
// be detected again).
// reloadIfChanged() with lockedForWriting could succeed.
func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
r.loadMut.Lock()
defer r.loadMut.Unlock()
modified, err := r.lockfile.Modified()
lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite)
if err != nil {
return false, err
}
if modified {
return r.load(lockedForWriting)
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
}
return false, nil
}
@ -289,8 +323,37 @@ func (r *containerStore) Containers() ([]Container, error) {
return containers, nil
}
func (r *containerStore) containerspath() string {
return filepath.Join(r.dir, "containers.json")
// This looks for datadirs in the store directory that are not referenced
// by the json file and removes it. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
func (r *containerStore) GarbageCollect() error {
entries, err := os.ReadDir(r.dir)
if err != nil {
// Unexpected, don't try any GC
return err
}
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
// Should the id be there?
if r.byid[id] != nil {
continue
}
// Otherwise remove datadir
moreErr := os.RemoveAll(filepath.Join(r.dir, id))
// Propagate first error
if moreErr != nil && err == nil {
err = moreErr
}
}
return err
}
func (r *containerStore) datadir(id string) string {
@ -303,37 +366,62 @@ func (r *containerStore) datapath(id, key string) string {
// load reloads the contents of the store from disk.
//
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed.
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
rpath := r.containerspath()
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
var modifiedLocations containerLocations
containers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &containers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
ids := make(map[string]*Container)
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
locationContainers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &locationContainers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
}
}
for _, container := range locationContainers {
// There should be no duplicated ids between json files, but lets check to be sure
if ids[container.ID] != nil {
continue // skip invalid duplicated container
}
// Remember where the container came from
if location == volatileContainerLocation {
container.volatileStore = true
}
containers = append(containers, container)
ids[container.ID] = container
}
}
idlist := make([]string, 0, len(containers))
layers := make(map[string]*Container)
ids := make(map[string]*Container)
names := make(map[string]*Container)
var errorToResolveBySaving error // == nil
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = containers[n]
layers[container.LayerID] = containers[n]
for _, name := range container.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
modifiedLocations |= containerLocation(container)
}
names[name] = containers[n]
}
@ -348,34 +436,69 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
if !lockedForWriting {
return true, errorToResolveBySaving
}
return false, r.Save()
return false, r.save(modifiedLocations)
}
return false, nil
}
// Save saves the contents of the store to disk. It should be called with
// the lock held, locked for writing.
func (r *containerStore) Save() error {
func (r *containerStore) save(saveLocations containerLocations) error {
r.lockfile.AssertLockedForWriting()
rpath := r.containerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
subsetContainers := make([]*Container, 0, len(r.containers))
for _, container := range r.containers {
if containerLocation(container) == location {
subsetContainers = append(subsetContainers, container)
}
}
jdata, err := json.Marshal(&subsetContainers)
if err != nil {
return err
}
var opts *ioutils.AtomicFileWriterOptions
if location == volatileContainerLocation {
opts = &ioutils.AtomicFileWriterOptions{
NoSync: true,
}
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
return err
}
}
jdata, err := json.Marshal(&r.containers)
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
return err
}
return r.lockfile.Touch()
r.lastWrite = lw
return nil
}
func newContainerStore(dir string) (rwContainerStore, error) {
func (r *containerStore) saveFor(modifiedContainer *Container) error {
return r.save(containerLocation(modifiedContainer))
}
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock"))
volatileDir := dir
if transient {
if err := os.MkdirAll(runDir, 0700); err != nil {
return nil, err
}
volatileDir = runDir
}
lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock"))
if err != nil {
return nil, err
}
@ -386,10 +509,19 @@ func newContainerStore(dir string) (rwContainerStore, error) {
byid: make(map[string]*Container),
bylayer: make(map[string]*Container),
byname: make(map[string]*Container),
jsonPath: [numContainerLocationIndex]string{
filepath.Join(dir, "containers.json"),
filepath.Join(volatileDir, "volatile-containers.json"),
},
}
if err := cstore.startWritingWithReload(false); err != nil {
return nil, err
}
cstore.lastWrite, err = cstore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
defer cstore.stopWriting()
if _, err := cstore.load(true); err != nil {
return nil, err
@ -418,7 +550,7 @@ func (r *containerStore) ClearFlag(id string, flag string) error {
return ErrContainerUnknown
}
delete(container.Flags, flag)
return r.Save()
return r.saveFor(container)
}
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
@ -430,7 +562,7 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
return r.Save()
return r.saveFor(container)
}
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
@ -476,6 +608,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
volatileStore: options.Volatile,
}
r.containers = append(r.containers, container)
r.byid[id] = container
@ -486,7 +619,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
for _, name := range names {
r.byname[name] = container
}
err = r.Save()
err = r.saveFor(container)
container = copyContainer(container)
return container, err
}
@ -501,7 +634,7 @@ func (r *containerStore) Metadata(id string) (string, error) {
func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.lookup(id); ok {
container.Metadata = metadata
return r.Save()
return r.saveFor(container)
}
return ErrContainerUnknown
}
@ -530,7 +663,7 @@ func (r *containerStore) updateNames(id string, names []string, op updateNameOpe
r.byname[name] = container
}
container.Names = names
return r.Save()
return r.saveFor(container)
}
func (r *containerStore) Delete(id string) error {
@ -562,7 +695,7 @@ func (r *containerStore) Delete(id string) error {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil {
if err := r.saveFor(container); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
@ -601,6 +734,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
return os.ReadFile(r.datapath(c.ID, key))
}
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
@ -609,10 +743,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if !ok {
return -1, ErrContainerUnknown
}
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
if size, ok := c.BigDataSizes[key]; ok {
if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@ -631,6 +762,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown
}
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
@ -639,10 +771,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if !ok {
return "", ErrContainerUnknown
}
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := c.BigDataDigests[key]; ok {
if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@ -709,7 +838,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
save = true
}
if save {
err = r.Save()
err = r.saveFor(c)
}
}
return err

View File

@ -251,6 +251,11 @@ func (a *Driver) Exists(id string) bool {
return true
}
// List layers (not including additional image stores)
func (a *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
return nil

View File

@ -676,6 +676,11 @@ func (d *Driver) Exists(id string) bool {
return err == nil
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil

View File

@ -267,6 +267,11 @@ func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil

View File

@ -109,6 +109,9 @@ type ProtoDriver interface {
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Returns a list of layer ids that exist on this driver (does not include
// additional storage layers). Not supported by all backends.
ListLayers() ([]string, error)
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string

View File

@ -17,6 +17,7 @@ import (
"strings"
"sync"
"syscall"
"unicode"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils"
@ -356,9 +357,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if opts.forceMask != nil {
return nil, errors.New("'force_mask' is supported only with 'mount_program'")
}
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
// check if they are running over btrfs, aufs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
@ -1697,6 +1698,40 @@ func (d *Driver) Exists(id string) bool {
return err == nil
}
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.home)
if err != nil {
return nil, err
}
layers := make([]string, 0)
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
layers = append(layers, id)
}
return layers, err
}
// isParent returns if the passed in parent is the direct parent of the passed in layer
func (d *Driver) isParent(id, parent string) bool {
lowers, err := d.getLowerDirs(id)

View File

@ -8,6 +8,7 @@ import (
"runtime"
"strconv"
"strings"
"unicode"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
@ -265,6 +266,40 @@ func (d *Driver) Exists(id string) bool {
return err == nil
}
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.homes[0])
if err != nil {
return nil, err
}
layers := make([]string, 0)
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
layers = append(layers, id)
}
return layers, err
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 {

View File

@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
return result
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)

View File

@ -506,6 +506,11 @@ func (d *Driver) Exists(id string) bool {
return d.filesystemsCache[d.zfsPath(id)]
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
@ -148,19 +149,23 @@ type rwImageStore interface {
// Delete removes the record of the image.
Delete(id string) error
addMappedTopLayer(id, layer string) error
removeMappedTopLayer(id, layer string) error
// Wipe removes records of all images.
Wipe() error
}
type imageStore struct {
lockfile Locker // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
dir string
images []*Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
bydigest map[digest.Digest][]*Image
loadMut sync.Mutex
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
dir string
lastWrite lockfile.LastWrite
images []*Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
bydigest map[digest.Digest][]*Image
loadMut sync.Mutex
}
func copyImage(i *Image) *Image {
@ -252,7 +257,7 @@ func (r *imageStore) startReadingWithReload(canReload bool) error {
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if _, err := r.load(true); err != nil {
if _, err := r.reloadIfChanged(true); err != nil {
return err
}
unlockFn()
@ -292,19 +297,20 @@ func (r *imageStore) stopReading() {
// if it is held for writing.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed. In that case the caller MUST
// call load(), not reloadIfChanged() (because the “if changed” state will not
// be detected again).
// reloadIfChanged() with lockedForWriting could succeed.
func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
r.loadMut.Lock()
defer r.loadMut.Unlock()
modified, err := r.lockfile.Modified()
lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite)
if err != nil {
return false, err
}
if modified {
return r.load(lockedForWriting)
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
}
return false, nil
}
@ -371,6 +377,9 @@ func (i *Image) recomputeDigests() error {
// load reloads the contents of the store from disk.
//
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
//
@ -454,14 +463,19 @@ func (r *imageStore) Save() error {
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
return err
}
return r.lockfile.Touch()
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
return nil
}
func newImageStore(dir string) (rwImageStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
lockfile, err := GetLockfile(filepath.Join(dir, "images.lock"))
lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
@ -477,6 +491,10 @@ func newImageStore(dir string) (rwImageStore, error) {
return nil, err
}
defer istore.stopWriting()
istore.lastWrite, err = istore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
if _, err := istore.load(true); err != nil {
return nil, err
}
@ -484,7 +502,7 @@ func newImageStore(dir string) (rwImageStore, error) {
}
func newROImageStore(dir string) (roImageStore, error) {
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
@ -500,6 +518,10 @@ func newROImageStore(dir string) (roImageStore, error) {
return nil, err
}
defer istore.stopReading()
istore.lastWrite, err = istore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
if _, err := istore.load(false); err != nil {
return nil, err
}
@ -763,10 +785,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if !ok {
return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
if size, ok := image.BigDataSizes[key]; ok {
if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
@ -783,10 +802,7 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if !ok {
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := image.BigDataDigests[key]; ok {
if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil.
return d, nil
}
return "", ErrDigestUnknown

View File

@ -18,6 +18,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
@ -42,6 +43,22 @@ const (
maxLayerStoreCleanupIterations = 3
)
type layerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
const (
stableLayerLocation layerLocations = 1 << iota
volatileLayerLocation
numLayerLocationIndex = iota
)
func layerLocationFromIndex(index int) layerLocations {
return 1 << index
}
// A Layer is a record of a copy-on-write layer that's stored by the lower
// level graph driver.
type Layer struct {
@ -123,6 +140,9 @@ type Layer struct {
// ReadOnly is true if this layer resides in a read-only layer store.
ReadOnly bool `json:"-"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
// memory when being read from or written to disk.
@ -276,23 +296,36 @@ type rwLayerStore interface {
// store.
// This API is experimental and can be changed without bumping the major version number.
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
// Clean up unreferenced layers
GarbageCollect() error
}
type layerStore struct {
lockfile Locker
mountsLockfile Locker
rundir string
driver drivers.Driver
layerdir string
layers []*Layer
idindex *truncindex.TruncIndex
byid map[string]*Layer
byname map[string]*Layer
bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
loadMut sync.Mutex
layerspathModified time.Time
lockfile *lockfile.LockFile
mountsLockfile *lockfile.LockFile
rundir string
jsonPath [numLayerLocationIndex]string
driver drivers.Driver
layerdir string
lastWrite lockfile.LastWrite
mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite()
layers []*Layer
idindex *truncindex.TruncIndex
byid map[string]*Layer
byname map[string]*Layer
bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
loadMut sync.Mutex
layerspathsModified [numLayerLocationIndex]time.Time
}
func layerLocation(l *Layer) layerLocations {
if l.volatileStore {
return volatileLayerLocation
}
return stableLayerLocation
}
func copyLayer(l *Layer) *Layer {
@ -311,6 +344,7 @@ func copyLayer(l *Layer) *Layer {
UncompressedSize: l.UncompressedSize,
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
BigDataNames: copyStringSlice(l.BigDataNames),
Flags: copyStringInterfaceMap(l.Flags),
UIDMap: copyIDMap(l.UIDMap),
@ -387,7 +421,7 @@ func (r *layerStore) startReadingWithReload(canReload bool) error {
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if _, err := r.load(true); err != nil {
if _, err := r.reloadIfChanged(true); err != nil {
return err
}
unlockFn()
@ -416,38 +450,34 @@ func (r *layerStore) stopReading() {
r.lockfile.Unlock()
}
// Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held.
func (r *layerStore) Modified() (bool, error) {
var mmodified, tmodified bool
lmodified, err := r.lockfile.Modified()
// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the
// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful
// reload.
// It should only be called with the lock held.
func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite)
if err != nil {
return lmodified, err
return lockfile.LastWrite{}, modified, err
}
if r.lockfile.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
mmodified, err = r.mountsLockfile.Modified()
if err != nil {
return lmodified, err
if modified {
return lastWrite, true, nil
}
// If the layers.json file or container-layers.json has been
// modified manually, then we have to reload the storage in
// any case.
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
info, err := os.Stat(r.jsonPath[locationIndex])
if err != nil && !os.IsNotExist(err) {
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
}
if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] {
// In this case the LastWrite value is equal to r.lastWrite; writing it back doesnt hurt.
return lastWrite, true, nil
}
}
if lmodified || mmodified {
return true, nil
}
// If the layers.json file has been modified manually, then we have to
// reload the storage in any case.
info, err := os.Stat(r.layerspath())
if err != nil && !os.IsNotExist(err) {
return false, fmt.Errorf("stat layers file: %w", err)
}
if info != nil {
tmodified = info.ModTime() != r.layerspathModified
}
return tmodified, nil
return lockfile.LastWrite{}, false, nil
}
// reloadIfChanged reloads the contents of the store from disk if it is changed.
@ -456,23 +486,50 @@ func (r *layerStore) Modified() (bool, error) {
// if it is held for writing.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed. In that case the caller MUST
// call load(), not reloadIfChanged() (because the “if changed” state will not
// be detected again).
// reloadIfChanged() with lockedForWriting could succeed.
func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
r.loadMut.Lock()
defer r.loadMut.Unlock()
modified, err := r.Modified()
lastWrite, layersModified, err := r.layersModified()
if err != nil {
return false, err
}
if modified {
return r.load(lockedForWriting)
if layersModified {
// r.load also reloads mounts data; so, on this path, we dont need to call reloadMountsIfChanged.
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
return false, nil
}
if r.lockfile.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if err := r.reloadMountsIfChanged(); err != nil {
return false, err
}
}
return false, nil
}
// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed.
//
// The caller must hold r.mountsLockFile for reading or writing.
func (r *layerStore) reloadMountsIfChanged() error {
lastWrite, modified, err := r.mountsLockfile.ModifiedSince(r.mountsLastWrite)
if err != nil {
return err
}
if modified {
if err = r.loadMounts(); err != nil {
return err
}
r.mountsLastWrite = lastWrite
}
return nil
}
func (r *layerStore) Layers() ([]Layer, error) {
layers := make([]Layer, len(r.layers))
for i := range r.layers {
@ -481,44 +538,93 @@ func (r *layerStore) Layers() ([]Layer, error) {
return layers, nil
}
func (r *layerStore) GarbageCollect() error {
layers, err := r.driver.ListLayers()
if err != nil {
if errors.Is(err, drivers.ErrNotSupported) {
return nil
}
return err
}
for _, id := range layers {
// Is the id still referenced
if r.byid[id] != nil {
continue
}
// Remove layer and any related data of unreferenced id
if err := r.driver.Remove(id); err != nil {
return err
}
os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id))
}
return nil
}
func (r *layerStore) mountspath() string {
return filepath.Join(r.rundir, "mountpoints.json")
}
func (r *layerStore) layerspath() string {
return filepath.Join(r.layerdir, "layers.json")
}
// load reloads the contents of the store from disk.
//
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// As a side effect, this sets r.mountsLastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed.
func (r *layerStore) load(lockedForWriting bool) (bool, error) {
rpath := r.layerspath()
info, err := os.Stat(rpath)
if err != nil {
if !os.IsNotExist(err) {
return false, err
}
} else {
r.layerspathModified = info.ModTime()
}
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
var modifiedLocations layerLocations
layers := []*Layer{}
if len(data) != 0 {
if err := json.Unmarshal(data, &layers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
ids := make(map[string]*Layer)
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
location := layerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
info, err := os.Stat(rpath)
if err != nil {
if !os.IsNotExist(err) {
return false, err
}
} else {
r.layerspathsModified[locationIndex] = info.ModTime()
}
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
locationLayers := []*Layer{}
if len(data) != 0 {
if err := json.Unmarshal(data, &locationLayers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
}
}
for _, layer := range locationLayers {
// There should be no duplicated ids between json files, but lets check to be sure
if ids[layer.ID] != nil {
continue // skip invalid duplicated layer
}
// Remember where the layer came from
if location == volatileLayerLocation {
layer.volatileStore = true
}
layers = append(layers, layer)
ids[layer.ID] = layer
}
}
idlist := make([]string, 0, len(layers))
ids := make(map[string]*Layer)
names := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
@ -527,12 +633,12 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
selinux.ClearLabels()
}
for n, layer := range layers {
ids[layer.ID] = layers[n]
idlist = append(idlist, layer.ID)
for _, name := range layer.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
errorToResolveBySaving = ErrDuplicateLayerNames
modifiedLocations |= layerLocation(conflict)
}
names[name] = layers[n]
}
@ -574,9 +680,17 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
if r.lockfile.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
// We need to reload mounts unconditionally, becuause by creating r.layers from scratch, we have discarded the previous
// information, if any. So, obtain a fresh mountsLastWrite value so that we dont unnecessarily reload the data
// afterwards.
mountsLastWrite, err := r.mountsLockfile.GetLastWrite()
if err != nil {
return false, err
}
if err := r.loadMounts(); err != nil {
return false, err
}
r.mountsLastWrite = mountsLastWrite
}
if errorToResolveBySaving != nil {
@ -593,7 +707,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
}
if layerHasIncompleteFlag(layer) {
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
err = r.deleteInternal(layer.ID)
err := r.deleteInternal(layer.ID)
if err != nil {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
@ -601,9 +715,10 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
modifiedLocations |= layerLocation(layer)
}
}
if err := r.saveLayers(); err != nil {
if err := r.saveLayers(modifiedLocations); err != nil {
return false, err
}
if incompleteDeletionErrors != nil {
@ -652,37 +767,66 @@ func (r *layerStore) loadMounts() error {
// Save saves the contents of the store to disk. It should be called with
// the lock held, locked for writing.
func (r *layerStore) Save() error {
func (r *layerStore) save(saveLocations layerLocations) error {
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if err := r.saveLayers(); err != nil {
if err := r.saveLayers(saveLocations); err != nil {
return err
}
return r.saveMounts()
}
func (r *layerStore) saveLayers() error {
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
return r.save(layerLocation(modifiedLayer))
}
func (r *layerStore) saveLayers(saveLocations layerLocations) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
r.lockfile.AssertLockedForWriting()
rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
subsetLayers := make([]*Layer, 0, len(r.layers))
for _, layer := range r.layers {
if layerLocation(layer) == location {
subsetLayers = append(subsetLayers, layer)
}
}
jldata, err := json.Marshal(&subsetLayers)
if err != nil {
return err
}
var opts *ioutils.AtomicFileWriterOptions
if location == volatileLayerLocation {
opts = &ioutils.AtomicFileWriterOptions{
NoSync: true,
}
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, opts); err != nil {
return err
}
}
jldata, err := json.Marshal(&r.layers)
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
return err
}
return r.lockfile.Touch()
r.lastWrite = lw
return nil
}
func (r *layerStore) saveMounts() error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
r.mountsLockfile.AssertLockedForWriting()
mpath := r.mountspath()
@ -706,29 +850,40 @@ func (r *layerStore) saveMounts() error {
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err
}
if err := r.mountsLockfile.Touch(); err != nil {
lw, err := r.mountsLockfile.RecordWrite()
if err != nil {
return err
}
r.mountsLastWrite = lw
return r.loadMounts()
}
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (rwLayerStore, error) {
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
if err := os.MkdirAll(rundir, 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(layerdir, 0700); err != nil {
return nil, err
}
lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
// Note: While the containers.lock file is in rundir for transient stores
// we don't want to do this here, because the non-transient layers in
// layers.json might be used externally as a read-only layer (using e.g.
// additionalimagestores), and that would look for the lockfile in the
// same directory
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
}
volatileDir := layerdir
if transient {
volatileDir = rundir
}
rlstore := layerStore{
lockfile: lockfile,
lockfile: lockFile,
mountsLockfile: mountsLockfile,
driver: driver,
rundir: rundir,
@ -736,11 +891,21 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
filepath.Join(volatileDir, "volatile-layers.json"),
},
}
if err := rlstore.startWritingWithReload(false); err != nil {
return nil, err
}
defer rlstore.stopWriting()
lw, err := rlstore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
rlstore.lastWrite = lw
// rlstore.mountsLastWrite is initialized inside rlstore.load().
if _, err := rlstore.load(true); err != nil {
return nil, err
}
@ -748,7 +913,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
}
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) {
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
@ -761,11 +926,20 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
filepath.Join(layerdir, "volatile-layers.json"),
},
}
if err := rlstore.startReadingWithReload(false); err != nil {
return nil, err
}
defer rlstore.stopReading()
lw, err := rlstore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
rlstore.lastWrite = lw
if _, err := rlstore.load(false); err != nil {
return nil, err
}
@ -800,19 +974,19 @@ func (r *layerStore) Size(name string) (int64, error) {
func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
delete(layer.Flags, flag)
return r.Save()
return r.saveFor(layer)
}
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@ -822,7 +996,7 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
layer.Flags = make(map[string]interface{})
}
layer.Flags[flag] = value
return r.Save()
return r.saveFor(layer)
}
func (r *layerStore) Status() ([][2]string, error) {
@ -876,7 +1050,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if err := r.Save(); err != nil {
if err := r.saveFor(layer); err != nil {
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
}
@ -887,7 +1061,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
@ -975,6 +1149,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
}
r.layers = append(r.layers, layer)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
@ -1004,7 +1179,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
}
}()
err := r.Save()
err := r.saveFor(layer)
if err != nil {
cleanupFailureContext = "saving incomplete layer metadata"
return nil, -1, err
@ -1070,7 +1245,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
}
}
delete(layer.Flags, incompleteFlag)
err = r.Save()
err = r.saveFor(layer)
if err != nil {
cleanupFailureContext = "saving finished layer metadata"
return nil, -1, err
@ -1096,10 +1271,8 @@ func (r *layerStore) Mounted(id string) (int, error) {
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return 0, err
}
if err := r.reloadMountsIfChanged(); err != nil {
return 0, err
}
layer, ok := r.lookup(id)
if !ok {
@ -1126,10 +1299,8 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return "", err
}
if err := r.reloadMountsIfChanged(); err != nil {
return "", err
}
layer, ok := r.lookup(id)
if !ok {
@ -1176,10 +1347,8 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return false, err
}
if err := r.reloadMountsIfChanged(); err != nil {
return false, err
}
layer, ok := r.lookup(id)
if !ok {
@ -1214,10 +1383,8 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return nil, nil, err
}
if err := r.reloadMountsIfChanged(); err != nil {
return nil, nil, err
}
layer, ok := r.lookup(id)
if !ok {
@ -1285,7 +1452,7 @@ func (r *layerStore) removeName(layer *Layer, name string) {
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@ -1306,7 +1473,7 @@ func (r *layerStore) updateNames(id string, names []string, op updateNameOperati
r.byname[name] = layer
}
layer.Names = names
return r.Save()
return r.saveFor(layer)
}
func (r *layerStore) datadir(id string) string {
@ -1333,7 +1500,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@ -1370,7 +1537,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
}
if addName {
layer.BigDataNames = append(layer.BigDataNames, key)
return r.Save()
return r.saveFor(layer)
}
return nil
}
@ -1392,11 +1559,11 @@ func (r *layerStore) Metadata(id string) (string, error) {
func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
return r.Save()
return r.saveFor(layer)
}
return ErrLayerUnknown
}
@ -1418,7 +1585,7 @@ func layerHasIncompleteFlag(layer *Layer) bool {
func (r *layerStore) deleteInternal(id string) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@ -1430,7 +1597,7 @@ func (r *layerStore) deleteInternal(id string) error {
layer.Flags = make(map[string]interface{})
}
layer.Flags[incompleteFlag] = true
if err := r.Save(); err != nil {
if err := r.saveFor(layer); err != nil {
return err
}
}
@ -1532,7 +1699,7 @@ func (r *layerStore) Delete(id string) error {
if err := r.deleteInternal(id); err != nil {
return err
}
return r.Save()
return r.saveFor(layer)
}
func (r *layerStore) Exists(id string) bool {
@ -1549,7 +1716,7 @@ func (r *layerStore) Get(id string) (*Layer, error) {
func (r *layerStore) Wipe() error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
@ -1814,7 +1981,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.lockfile.IsReadWrite() {
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
@ -1953,7 +2120,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
return layer.GIDs[i] < layer.GIDs[j]
})
err = r.Save()
err = r.saveFor(layer)
return size, err
}
@ -1994,7 +2161,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.UncompressedDigest = diffOutput.UncompressedDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
if err = r.Save(); err != nil {
if err = r.saveFor(layer); err != nil {
return err
}
for k, v := range diffOutput.BigData {
@ -2035,7 +2202,7 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp
}
layer.UIDs = output.UIDs
layer.GIDs = output.GIDs
err = r.Save()
err = r.saveFor(layer)
return &output, err
}

View File

@ -4,12 +4,15 @@ import (
"github.com/containers/storage/pkg/lockfile"
)
type Locker = lockfile.Locker
// Deprecated: Use lockfile.*LockFile.
type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated
// Deprecated: Use lockfile.GetLockFile.
func GetLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetLockfile(path)
}
// Deprecated: Use lockfile.GetROLockFile.
func GetROLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetROLockfile(path)
}

View File

@ -43,7 +43,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
root := newRootFileInfo(idMappings)
err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
sourceStat, err := system.Lstat(sourceDir)
if err != nil {
return nil, err
}
err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
@ -86,8 +91,12 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
if err != nil {
return err
}
info.stat = s
if s.Dev() != sourceStat.Dev() {
return filepath.SkipDir
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info

View File

@ -61,8 +61,8 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err
}
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := newAtomicFileWriter(filename, perm, nil)
func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error {
f, err := newAtomicFileWriter(filename, perm, opts)
if err != nil {
return err
}
@ -77,6 +77,10 @@ func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
return err
}
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
return AtomicWriteFileWithOpts(filename, data, perm, nil)
}
type atomicFileWriter struct {
f *os.File
fn string

View File

@ -10,6 +10,8 @@ import (
// A Locker represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// Deprecated: Refer directly to *LockFile, the provided implementation, instead.
type Locker interface {
// Acquire a writer lock.
// The default unix implementation panics if:
@ -28,10 +30,13 @@ type Locker interface {
// Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held.
//
// Deprecated: Use *LockFile.RecordWrite.
Touch() error
// Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held.
// Deprecated: Use *LockFile.ModifiedSince.
Modified() (bool, error)
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
@ -50,58 +55,76 @@ type Locker interface {
}
var (
lockfiles map[string]Locker
lockfilesLock sync.Mutex
lockFiles map[string]*LockFile
lockFilesLock sync.Mutex
)
// GetLockFile opens a read-write lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetLockFile(path string) (*LockFile, error) {
return getLockfile(path, false)
}
// GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
//
// Deprecated: Use GetLockFile
func GetLockfile(path string) (Locker, error) {
return getLockfile(path, false)
return GetLockFile(path)
}
// GetROLockFile opens a read-only lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetROLockFile(path string) (*LockFile, error) {
return getLockfile(path, true)
}
// GetROLockfile opens a read-only lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
//
// Deprecated: Use GetROLockFile
func GetROLockfile(path string) (Locker, error) {
return getLockfile(path, true)
return GetROLockFile(path)
}
// getLockfile returns a Locker object, possibly (depending on the platform)
// getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func getLockfile(path string, ro bool) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]Locker)
func getLockfile(path string, ro bool) (*LockFile, error) {
lockFilesLock.Lock()
defer lockFilesLock.Unlock()
if lockFiles == nil {
lockFiles = make(map[string]*LockFile)
}
cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
if lockFile, ok := lockFiles[cleanPath]; ok {
if ro && lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
}
if !ro && !locker.IsReadWrite() {
if !ro && !lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
}
return locker, nil
return lockFile, nil
}
locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker
lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile
if err != nil {
return nil, err
}
lockfiles[cleanPath] = locker
return locker, nil
lockFiles[cleanPath] = lockFile
return lockFile, nil
}

View File

@ -18,27 +18,48 @@ import (
"golang.org/x/sys/unix"
)
type lockfile struct {
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
file string
fd uintptr
lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
locktype int16
locked bool
ro bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd uintptr
}
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWriterID returns a new "last writer" ID.
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWriterID() []byte {
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
@ -60,7 +81,38 @@ func newLastWriterID() []byte {
panic(err) // This shouldn't happen
}
return res
return LastWrite{
state: res,
}
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// openLock opens the file at path and returns the corresponding file
@ -84,7 +136,7 @@ func openLock(path string, ro bool) (fd int, err error) {
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return fd, fmt.Errorf("creating locker directory: %w", err)
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
@ -93,20 +145,20 @@ func openLock(path string, ro bool) (fd int, err error) {
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
@ -118,19 +170,21 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
if ro {
locktype = unix.F_RDLCK
}
return &lockfile{
stateMutex: &sync.Mutex{},
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
file: path,
lw: newLastWriterID(),
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
locktype: int16(locktype),
locked: false,
ro: ro}, nil
}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *lockfile) lock(lType int16) {
func (l *LockFile) lock(lType int16) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(unix.SEEK_SET),
@ -168,7 +222,7 @@ func (l *lockfile) lock(lType int16) {
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *lockfile) Lock() {
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
@ -177,12 +231,12 @@ func (l *lockfile) Lock() {
}
// LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() {
func (l *LockFile) RLock() {
l.lock(unix.F_RDLCK)
}
// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
@ -213,7 +267,7 @@ func (l *lockfile) Unlock() {
l.stateMutex.Unlock()
}
func (l *lockfile) AssertLocked() {
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
@ -230,7 +284,7 @@ func (l *lockfile) AssertLocked() {
}
}
func (l *lockfile) AssertLockedForWriting() {
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
@ -242,53 +296,128 @@ func (l *lockfile) AssertLockedForWriting() {
}
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
contents := make([]byte, lastWriterIDSize)
n, err := unix.Pread(int(l.fd), contents, 0)
if err != nil {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
n, err := unix.Pwrite(int(l.fd), lockContents, 0)
if err != nil {
return LastWrite{}, err
}
if n != len(lockContents) {
return LastWrite{}, unix.ENOSPC
}
return lw, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = newLastWriterID()
n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
if n != len(l.lw) {
return unix.ENOSPC
}
l.lw = lw
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
func (l *lockfile) Modified() (bool, error) {
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
currentLW := make([]byte, lastWriterIDSize)
n, err := unix.Pread(int(l.fd), currentLW, 0)
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
currentLW = currentLW[:n]
oldLW := l.lw
l.lw = currentLW
return !bytes.Equal(currentLW, oldLW), nil
return modified, nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool {
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
func (l *LockFile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true

View File

@ -9,45 +9,58 @@ import (
"time"
)
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// createLockFileForPath returns a *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
return &lockfile{locked: false}, nil
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
return &LockFile{locked: false}, nil
}
type lockfile struct {
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
mu sync.Mutex
file string
locked bool
}
func (l *lockfile) Lock() {
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes.
type LastWrite struct {
// Nothing: The Windows “implementation” does not actually track writes.
}
func (l *LockFile) Lock() {
l.mu.Lock()
l.locked = true
}
func (l *lockfile) RLock() {
func (l *LockFile) RLock() {
l.mu.Lock()
l.locked = true
}
func (l *lockfile) Unlock() {
func (l *LockFile) Unlock() {
l.locked = false
l.mu.Unlock()
}
func (l *lockfile) AssertLocked() {
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
@ -59,24 +72,77 @@ func (l *lockfile) AssertLocked() {
}
}
func (l *lockfile) AssertLockedForWriting() {
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked() // The current implementation does not distinguish between read and write locks.
}
func (l *lockfile) Modified() (bool, error) {
// GetLastWrite() returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing) before this function is called.
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
return LastWrite{}, nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
return LastWrite{}, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
return LastWrite{}, false, nil
}
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
return false, nil
}
func (l *lockfile) Touch() error {
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
return nil
}
func (l *lockfile) IsReadWrite() bool {
func (l *LockFile) IsReadWrite() bool {
return false
}
func (l *lockfile) TouchedSince(when time.Time) bool {
func (l *LockFile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
return true

View File

@ -18,7 +18,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}
mtim: s.Mtimespec,
dev: s.Dev}
st.flags = s.Flags
st.dev = s.Dev
return st, nil
}

View File

@ -9,7 +9,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
mtim: s.Mtim,
dev: uint64(s.Dev)}, nil
}
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type

View File

@ -18,6 +18,7 @@ type StatT struct {
rdev uint64
size int64
mtim syscall.Timespec
dev uint64
platformStatT
}
@ -51,6 +52,11 @@ func (s StatT) Mtim() syscall.Timespec {
return s.mtim
}
// Dev returns a unique identifier for owning filesystem
func (s StatT) Dev() uint64 {
return s.dev
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View File

@ -43,6 +43,11 @@ func (s StatT) GID() uint32 {
return 0
}
// Dev returns a unique identifier for owning filesystem
func (s StatT) Dev() uint64 {
return 0
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View File

@ -32,6 +32,10 @@ graphroot = "/var/lib/containers/storage"
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
# Transient store mode makes all container metadata be saved in temporary storage
# (i.e. runroot above). This is faster, but doesn't persist across reboots.
# transient_store = true
[storage.options]
# Storage options to be passed to underlying storage drivers

View File

@ -20,6 +20,7 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/system"
@ -141,6 +142,7 @@ type Store interface {
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
TransientStore() bool
GraphDriverName() string
GraphOptions() []string
PullOptions() map[string]string
@ -502,6 +504,11 @@ type Store interface {
// Releasing AdditionalLayer handler is caller's responsibility.
// This API is experimental and can be changed without bumping the major version number.
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
// Tries to clean up remainders of previous containers or layers that are not
// references in the json files. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
GarbageCollect() error
}
// AdditionalLayer reprents a layer that is contained in the additional layer store
@ -545,6 +552,8 @@ type LayerOptions struct {
// and reliably known by the caller.
// Use the default "" if this fields is not applicable or the value is not known.
UncompressedDigest digest.Digest
// True is the layer info can be treated as volatile
Volatile bool
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
@ -571,29 +580,31 @@ type ContainerOptions struct {
}
type store struct {
lastLoaded time.Time
runRoot string
graphLock Locker
usernsLock Locker
graphRoot string
graphDriverName string
graphOptions []string
pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
additionalUIDs *idSet // Set by getAvailableIDs()
additionalGIDs *idSet // Set by getAvailableIDs()
autoNsMinSize uint32
autoNsMaxSize uint32
graphDriver drivers.Driver
layerStore rwLayerStore
roLayerStores []roLayerStore
imageStore rwImageStore
roImageStores []roImageStore
containerStore rwContainerStore
digestLockRoot string
disableVolatile bool
lastLoaded time.Time
runRoot string
graphLock *lockfile.LockFile
usernsLock *lockfile.LockFile
graphRoot string
graphDriverName string
graphOptions []string
pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
additionalUIDs *idSet // Set by getAvailableIDs()
additionalGIDs *idSet // Set by getAvailableIDs()
autoNsMinSize uint32
autoNsMaxSize uint32
graphLockLastWrite lockfile.LastWrite
graphDriver drivers.Driver
layerStore rwLayerStore
roLayerStores []roLayerStore
imageStore rwImageStore
roImageStores []roImageStore
containerStore rwContainerStore
digestLockRoot string
disableVolatile bool
transientStore bool
}
// GetStore attempts to find an already-created Store object matching the
@ -668,12 +679,12 @@ func GetStore(options types.StoreOptions) (Store, error) {
return nil, err
}
graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock"))
usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock"))
if err != nil {
return nil, err
}
@ -701,8 +712,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
additionalGIDs: nil,
usernsLock: usernsLock,
disableVolatile: options.DisableVolatile,
transientStore: options.TransientStore,
pullOptions: options.PullOptions,
}
if err := func() error { // A scope for defer
s.graphLock.Lock()
defer s.graphLock.Unlock()
lastWrite, err := s.graphLock.GetLastWrite()
if err != nil {
return err
}
s.graphLockLastWrite = lastWrite
return nil
}(); err != nil {
return nil, err
}
if err := s.load(); err != nil {
return nil, err
}
@ -748,6 +772,10 @@ func (s *store) GraphRoot() string {
return s.graphRoot
}
func (s *store) TransientStore() bool {
return s.transientStore
}
func (s *store) GraphOptions() []string {
return s.graphOptions
}
@ -794,14 +822,16 @@ func (s *store) load() error {
if err := os.MkdirAll(gcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath)
if err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath, rcpath, s.transientStore)
if err != nil {
return err
}
s.containerStore = rcs
for _, store := range driver.AdditionalImageStores() {
@ -823,7 +853,7 @@ func (s *store) load() error {
// GetDigestLock returns a digest-specific Locker.
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
return lockfile.GetLockFile(filepath.Join(s.digestLockRoot, d.String()))
}
func (s *store) getGraphDriver() (drivers.Driver, error) {
@ -883,7 +913,7 @@ func (s *store) getLayerStore() (rwLayerStore, error) {
if err := os.MkdirAll(glpath, 0700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, driver)
rls, err := s.newLayerStore(rlpath, glpath, driver, s.transientStore)
if err != nil {
return nil, err
}
@ -1282,9 +1312,10 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
// imageTopLayerForMapping does ???
// On entry:
// - ristore must be locked EITHER for reading or writing
// - primaryImageStore must be locked for writing; it might be identical to ristore.
// - rlstore must be locked for writing
// - lstores must all be locked for reading
func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, createMappedLayer bool, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) {
func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, primaryImageStore rwImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) {
layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool {
// If the driver supports shifting and the layer has no mappings, we can use it.
if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
@ -1303,6 +1334,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, crea
var layer, parentLayer *Layer
allStores := append([]roLayerStore{rlstore}, lstores...)
// Locate the image's top layer and its parent, if it has one.
createMappedLayer := ristore == primaryImageStore
for _, s := range allStores {
store := s
// Walk the top layer list.
@ -1350,44 +1382,41 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, crea
return layer, nil
}
// The top layer's mappings don't match the ones we want, and it's in an image store
// that lets us edit image metadata...
if istore, ok := ristore.(*imageStore); ok {
// ... so create a duplicate of the layer with the desired mappings, and
// register it as an alternate top layer in the image.
var layerOptions LayerOptions
if s.canUseShifting(options.UIDMap, options.GIDMap) {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
} else {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
}
// that lets us edit image metadata, so create a duplicate of the layer with the desired
// mappings, and register it as an alternate top layer in the image.
var layerOptions LayerOptions
if s.canUseShifting(options.UIDMap, options.GIDMap) {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
} else {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
}
return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
}
layer = mappedLayer
}
return layer, nil
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
// By construction, createMappedLayer can only be true if ristore == primaryImageStore.
if err = primaryImageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
}
return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
}
return mappedLayer, nil
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
@ -1488,8 +1517,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
idMappingsOptions := options.IDMappingOptions
if image != "" {
if cimage.TopLayer != "" {
createMappedLayer := imageHomeStore == istore
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, istore, rlstore, lstores, idMappingsOptions)
if err != nil {
return nil, err
}
@ -1514,25 +1542,28 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
gidMap = s.gidMap
}
}
var layerOptions *LayerOptions
layerOptions := &LayerOptions{
// Normally layers for containers are volatile only if the container is.
// But in transient store mode, all container layers are volatile.
Volatile: options.Volatile || s.transientStore,
}
if s.canUseShifting(uidMap, gidMap) {
layerOptions = &LayerOptions{
IDMappingOptions: types.IDMappingOptions{
layerOptions.IDMappingOptions =
types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
}
} else {
layerOptions = &LayerOptions{
IDMappingOptions: types.IDMappingOptions{
layerOptions.IDMappingOptions =
types.IDMappingOptions{
HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
},
}
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
@ -1559,6 +1590,11 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
layer = clayer.ID
// Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile
if s.transientStore {
options.Volatile = true
}
var container *Container
err = s.writeToContainerStore(func(rcstore rwContainerStore) error {
options.IDMappingOptions = types.IDMappingOptions{
@ -1888,63 +1924,59 @@ func (s *store) ContainerSize(id string) (int64, error) {
return -1, err
}
rcstore, err := s.getContainerStore()
if err != nil {
return -1, err
}
if err := rcstore.startReading(); err != nil {
return -1, err
}
defer rcstore.stopReading()
// Read the container record.
container, err := rcstore.Get(id)
if err != nil {
return -1, err
}
// Read the container's layer's size.
var layer *Layer
var size int64
for _, store := range layerStores {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
}
break
}
}
if layer == nil {
return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
var res int64 = -1
err = s.writeToContainerStore(func(rcstore rwContainerStore) error { // Yes, rcstore.BigDataSize requires a write lock.
// Read the container record.
container, err := rcstore.Get(id)
if err != nil {
return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
return err
}
// Read the container's layer's size.
var layer *Layer
var size int64
for _, store := range layerStores {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
}
break
}
}
if layer == nil {
return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
if err != nil {
return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
}
size += n
}
// Count the size of our container directory and container run directory.
n, err := directory.Size(cdir)
if err != nil {
return err
}
size += n
n, err = directory.Size(rdir)
if err != nil {
return err
}
size += n
}
// Count the size of our container directory and container run directory.
n, err := directory.Size(cdir)
if err != nil {
return -1, err
}
size += n
n, err = directory.Size(rdir)
if err != nil {
return -1, err
}
size += n
return size, nil
res = size
return nil
})
return res, err
}
func (s *store) ListContainerBigData(id string) ([]string, error) {
@ -1962,27 +1994,23 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
}
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
rcstore, err := s.getContainerStore()
if err != nil {
return -1, err
}
if err := rcstore.startReading(); err != nil {
return -1, err
}
defer rcstore.stopReading()
return rcstore.BigDataSize(id, key)
var res int64 = -1
err := s.writeToContainerStore(func(store rwContainerStore) error { // Yes, BigDataSize requires a write lock.
var err error
res, err = store.BigDataSize(id, key)
return err
})
return res, err
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore, err := s.getContainerStore()
if err != nil {
return "", err
}
if err := rcstore.startReading(); err != nil {
return "", err
}
defer rcstore.stopReading()
return rcstore.BigDataDigest(id, key)
var res digest.Digest
err := s.writeToContainerStore(func(store rwContainerStore) error { // Yes, BigDataDigest requires a write lock.
var err error
res, err = store.BigDataDigest(id, key)
return err
})
return res, err
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
@ -2222,12 +2250,6 @@ func (s *store) DeleteLayer(id string) error {
if image.TopLayer == id {
return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
if stringutils.InSlice(image.MappedTopLayers, id) {
// No write access to the image store, fail before the layer is deleted
if _, ok := ristore.(*imageStore); !ok {
return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
}
}
containers, err := rcstore.Containers()
if err != nil {
@ -2242,14 +2264,10 @@ func (s *store) DeleteLayer(id string) error {
return fmt.Errorf("delete layer %v: %w", id, err)
}
// The check here is used to avoid iterating the images if we don't need to.
// There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers.
if istore, ok := ristore.(*imageStore); ok {
for _, image := range images {
if stringutils.InSlice(image.MappedTopLayers, id) {
if err = istore.removeMappedTopLayer(image.ID, id); err != nil {
return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
}
for _, image := range images {
if stringutils.InSlice(image.MappedTopLayers, id) {
if err = ristore.removeMappedTopLayer(image.ID, id); err != nil {
return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
}
}
}
@ -2506,10 +2524,11 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
}
defer rlstore.stopWriting()
modified, err := s.graphLock.Modified()
lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite)
if err != nil {
return "", err
}
s.graphLockLastWrite = lastWrite
/* We need to make sure the home mount is present when the Mount is done. */
if modified {
@ -2653,10 +2672,11 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
s.graphLock.Lock()
defer s.graphLock.Unlock()
modified, err := s.graphLock.Modified()
lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite)
if err != nil {
return nil, err
}
s.graphLockLastWrite = lastWrite
// We need to make sure the home mount is present when the Mount is done.
if modified {
@ -3215,11 +3235,14 @@ func (s *store) Shutdown(force bool) ([]string, error) {
}
if err == nil {
err = s.graphDriver.Cleanup()
if err2 := s.graphLock.Touch(); err2 != nil {
// We dont retain the lastWrite value, and treat this update as if someone else did the .Cleanup(),
// so that we reload after a .Shutdown() the same way other processes would.
// Shutdown() is basically an error path, so reliability is more important than performance.
if _, err2 := s.graphLock.RecordWrite(); err2 != nil {
if err == nil {
err = err2
} else {
err = fmt.Errorf("(graphLock.Touch failed: %v) %w", err2, err)
err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err)
}
}
}
@ -3366,3 +3389,20 @@ func (s *store) Free() {
}
}
}
// Tries to clean up old unreferenced container leftovers. returns the first error
// but continues as far as it can
func (s *store) GarbageCollect() error {
firstErr := s.writeToContainerStore(func(rcstore rwContainerStore) error {
return rcstore.GarbageCollect()
})
moreErr := s.writeToLayerStore(func(rlstore rwLayerStore) error {
return rlstore.GarbageCollect()
})
if firstErr == nil {
firstErr = moreErr
}
return firstErr
}

View File

@ -22,6 +22,7 @@ type TomlConfig struct {
RunRoot string `toml:"runroot,omitempty"`
GraphRoot string `toml:"graphroot,omitempty"`
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
TransientStore bool `toml:"transient_store,omitempty"`
Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"`
}
@ -234,6 +235,8 @@ type StoreOptions struct {
PullOptions map[string]string `toml:"pull_options"`
// DisableVolatile doesn't allow volatile mounts when it is set.
DisableVolatile bool `json:"disable-volatile,omitempty"`
// If transient, don't persist containers over boot (stores db in runroot)
TransientStore bool `json:"transient_store,omitempty"`
}
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root
@ -452,6 +455,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
}
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
storeOptions.TransientStore = config.Storage.TransientStore
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)

View File

@ -2,6 +2,7 @@ package storage
import (
"fmt"
"unicode"
"github.com/containers/storage/types"
)
@ -72,3 +73,15 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
}
return dedupeNames(result), nil
}
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}

View File

@ -175,11 +175,6 @@ type ValidationRecord struct {
// ...
// }
AddressesTried []net.IP `json:"addressesTried,omitempty"`
// OldTLS is true if any request in the validation chain used HTTPS and negotiated
// a TLS version lower than 1.2.
// TODO(#6011): Remove once TLS 1.0 and 1.1 support is gone.
OldTLS bool `json:"oldTLS,omitempty"`
}
func looksLikeKeyAuthorization(str string) error {

View File

@ -26,6 +26,8 @@ import (
jose "gopkg.in/square/go-jose.v2"
)
const Unspecified = "Unspecified"
// Package Variables Variables
// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)")
@ -182,7 +184,7 @@ func ValidSerial(serial string) bool {
func GetBuildID() (retID string) {
retID = BuildID
if retID == "" {
retID = "Unspecified"
retID = Unspecified
}
return
}
@ -191,7 +193,7 @@ func GetBuildID() (retID string) {
func GetBuildTime() (retID string) {
retID = BuildTime
if retID == "" {
retID = "Unspecified"
retID = Unspecified
}
return
}
@ -200,7 +202,7 @@ func GetBuildTime() (retID string) {
func GetBuildHost() (retID string) {
retID = BuildHost
if retID == "" {
retID = "Unspecified"
retID = Unspecified
}
return
}

View File

@ -12,6 +12,7 @@ package errors
import (
"fmt"
"time"
"github.com/letsencrypt/boulder/identifier"
)
@ -56,6 +57,10 @@ type BoulderError struct {
Type ErrorType
Detail string
SubErrors []SubBoulderError
// RetryAfter the duration a client should wait before retrying the request
// which resulted in this error.
RetryAfter time.Duration
}
// SubBoulderError represents sub-errors specific to an identifier that are
@ -77,9 +82,10 @@ func (be *BoulderError) Unwrap() error {
// provided subErrs to the existing BoulderError.
func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
return &BoulderError{
Type: be.Type,
Detail: be.Detail,
SubErrors: append(be.SubErrors, subErrs...),
Type: be.Type,
Detail: be.Detail,
SubErrors: append(be.SubErrors, subErrs...),
RetryAfter: be.RetryAfter,
}
}
@ -107,31 +113,35 @@ func NotFoundError(msg string, args ...interface{}) error {
return New(NotFound, msg, args...)
}
func RateLimitError(msg string, args ...interface{}) error {
func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
RetryAfter: retryAfter,
}
}
func DuplicateCertificateError(msg string, args ...interface{}) error {
func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...),
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...),
RetryAfter: retryAfter,
}
}
func FailedValidationError(msg string, args ...interface{}) error {
func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...),
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...),
RetryAfter: retryAfter,
}
}
func RegistrationsPerIPError(msg string, args ...interface{}) error {
func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error {
return &BoulderError{
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...),
Type: RateLimit,
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...),
RetryAfter: retryAfter,
}
}

View File

@ -16,36 +16,37 @@ func _() {
_ = x[StreamlineOrderAndAuthzs-5]
_ = x[V1DisableNewValidations-6]
_ = x[ExpirationMailerDontLookTwice-7]
_ = x[CAAValidationMethods-8]
_ = x[CAAAccountURI-9]
_ = x[EnforceMultiVA-10]
_ = x[MultiVAFullResults-11]
_ = x[MandatoryPOSTAsGET-12]
_ = x[AllowV1Registration-13]
_ = x[StoreRevokerInfo-14]
_ = x[RestrictRSAKeySizes-15]
_ = x[FasterNewOrdersRateLimit-16]
_ = x[ECDSAForAll-17]
_ = x[ServeRenewalInfo-18]
_ = x[GetAuthzReadOnly-19]
_ = x[GetAuthzUseIndex-20]
_ = x[CheckFailedAuthorizationsFirst-21]
_ = x[AllowReRevocation-22]
_ = x[MozRevocationReasons-23]
_ = x[OldTLSOutbound-24]
_ = x[OldTLSInbound-25]
_ = x[SHA1CSRs-26]
_ = x[AllowUnrecognizedFeatures-27]
_ = x[RejectDuplicateCSRExtensions-28]
_ = x[ROCSPStage1-29]
_ = x[ROCSPStage2-30]
_ = x[ROCSPStage3-31]
_ = x[OldTLSInbound-8]
_ = x[OldTLSOutbound-9]
_ = x[ROCSPStage1-10]
_ = x[ROCSPStage2-11]
_ = x[ROCSPStage3-12]
_ = x[CAAValidationMethods-13]
_ = x[CAAAccountURI-14]
_ = x[EnforceMultiVA-15]
_ = x[MultiVAFullResults-16]
_ = x[MandatoryPOSTAsGET-17]
_ = x[AllowV1Registration-18]
_ = x[StoreRevokerInfo-19]
_ = x[RestrictRSAKeySizes-20]
_ = x[FasterNewOrdersRateLimit-21]
_ = x[ECDSAForAll-22]
_ = x[ServeRenewalInfo-23]
_ = x[GetAuthzReadOnly-24]
_ = x[GetAuthzUseIndex-25]
_ = x[CheckFailedAuthorizationsFirst-26]
_ = x[AllowReRevocation-27]
_ = x[MozRevocationReasons-28]
_ = x[SHA1CSRs-29]
_ = x[AllowUnrecognizedFeatures-30]
_ = x[RejectDuplicateCSRExtensions-31]
_ = x[ROCSPStage6-32]
_ = x[ROCSPStage7-33]
}
const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsOldTLSOutboundOldTLSInboundSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage1ROCSPStage2ROCSPStage3ROCSPStage6"
const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceOldTLSInboundOldTLSOutboundROCSPStage1ROCSPStage2ROCSPStage3CAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage6ROCSPStage7"
var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 177, 190, 204, 222, 240, 259, 275, 294, 318, 329, 345, 361, 377, 407, 424, 444, 458, 471, 479, 504, 532, 543, 554, 565, 576}
var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 170, 184, 195, 206, 217, 237, 250, 264, 282, 300, 319, 335, 354, 378, 389, 405, 421, 437, 467, 484, 504, 512, 537, 565, 576, 587}
func (i FeatureFlag) String() string {
if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {

View File

@ -20,6 +20,11 @@ const (
StreamlineOrderAndAuthzs
V1DisableNewValidations
ExpirationMailerDontLookTwice
OldTLSInbound
OldTLSOutbound
ROCSPStage1
ROCSPStage2
ROCSPStage3
// Currently in-use features
// Check CAA and respect validationmethods parameter.
@ -79,14 +84,6 @@ const (
// with the certificate's keypair, the cert will be revoked with reason
// keyCompromise, regardless of what revocation reason they request.
MozRevocationReasons
// OldTLSOutbound allows the VA to negotiate TLS 1.0 and TLS 1.1 during
// HTTPS redirects. When it is set to false, the VA will only connect to
// HTTPS servers that support TLS 1.2 or above.
OldTLSOutbound
// OldTLSInbound controls whether the WFE rejects inbound requests using
// TLS 1.0 and TLS 1.1. Because WFE does not terminate TLS in production,
// we rely on the TLS-Version header (set by our reverse proxy).
OldTLSInbound
// SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that
// are self-signed using SHA1.
SHA1CSRs
@ -98,25 +95,15 @@ const (
// go1.19.
RejectDuplicateCSRExtensions
// ROCSPStage1 enables querying Redis, live-signing response, and storing
// to Redis, but doesn't serve responses from Redis.
ROCSPStage1
// ROCSPStage2 enables querying Redis, live-signing a response, and storing
// to Redis, and does serve responses from Redis when appropriate (when
// they are fresh, and agree with MariaDB's status for the certificate).
ROCSPStage2
// ROCSPStage3 enables querying Redis, live-signing a response, and serving
// from Redis, without any fallback to serving bytes from MariaDB. In this
// mode we still make a parallel request to MariaDB to cross-check the
// _status_ of the response. If that request indicates a different status
// than what's stored in Redis, we'll trigger a fresh signing and serve and
// store the result.
ROCSPStage3
// ROCSPStage6 disables writing full OCSP Responses to MariaDB during
// (pre)certificate issuance and during revocation. Because Stage 4 involved
// disabling ocsp-updater, this means that no ocsp response bytes will be
// written to the database anymore.
ROCSPStage6
// ROCSPStage7 disables generating OCSP responses during issuance and
// revocation. This affects codepaths in both the RA (revocation) and the CA
// (precert "birth certificates").
ROCSPStage7
)
// List of features and their default value, protected by fMu
@ -154,6 +141,7 @@ var features = map[FeatureFlag]bool{
ROCSPStage2: false,
ROCSPStage3: false,
ROCSPStage6: false,
ROCSPStage7: false,
}
var fMu = new(sync.RWMutex)

File diff suppressed because it is too large Load Diff

View File

@ -7,56 +7,89 @@ import "core/proto/core.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
service StorageAuthority {
// Getters
rpc GetRegistration(RegistrationID) returns (core.Registration) {}
rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
rpc GetCertificate(Serial) returns (core.Certificate) {}
rpc GetPrecertificate(Serial) returns (core.Certificate) {}
rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
// StorageAuthorityReadOnly exposes only those SA methods which are read-only.
service StorageAuthorityReadOnly {
rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
rpc CountOrders(CountOrdersRequest) returns (Count) {}
rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
rpc CountOrders(CountOrdersRequest) returns (Count) {}
// Return a count of authorizations with status "invalid" that belong to
// a given registration ID and expire in the given time range.
rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
rpc GetCertificate(Serial) returns (core.Certificate) {}
rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
rpc GetOrder(OrderRequest) returns (core.Order) {}
rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
rpc GetPrecertificate(Serial) returns (core.Certificate) {}
rpc GetRegistration(RegistrationID) returns (core.Registration) {}
rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
rpc IncidentsForSerial(Serial) returns (Incidents) {}
rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
}
// StorageAuthority provides full read/write access to the database.
service StorageAuthority {
// Getters: this list must be identical to the StorageAuthorityReadOnly rpcs.
rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
rpc CountOrders(CountOrdersRequest) returns (Count) {}
rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
rpc GetCertificate(Serial) returns (core.Certificate) {}
rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
rpc GetOrder(OrderRequest) returns (core.Order) {}
rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
rpc GetPrecertificate(Serial) returns (core.Certificate) {}
rpc GetRegistration(RegistrationID) returns (core.Registration) {}
rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
rpc IncidentsForSerial(Serial) returns (Incidents) {}
rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
// Adders
rpc NewRegistration(core.Registration) returns (core.Registration) {}
rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {}
rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {}
rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {}
rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {}
rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
rpc NewOrder(NewOrderRequest) returns (core.Order) {}
rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {}
rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
rpc GetOrder(OrderRequest) returns (core.Order) {}
rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
rpc NewRegistration(core.Registration) returns (core.Registration) {}
rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
}
message RegistrationID {
@ -124,6 +157,7 @@ message CountCertificatesByNamesRequest {
message CountByNames {
map<string, int64> counts = 1;
google.protobuf.Timestamp earliest = 2; // Unix timestamp (nanoseconds)
}
message CountRegistrationsByIPRequest {

File diff suppressed because it is too large Load Diff

View File

@ -91,7 +91,7 @@ embedmd:
.PHONY: install-tools
install-tools:
go get -u golang.org/x/lint/golint
go get -u golang.org/x/tools/cmd/cover
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/rakyll/embedmd
go install golang.org/x/lint/golint@latest
go install golang.org/x/tools/cmd/cover@latest
go install golang.org/x/tools/cmd/goimports@latest
go install github.com/rakyll/embedmd@latest

View File

@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
return "0.23.0"
return "0.24.0"
}

View File

@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io
Exporting Traces
# Exporting Traces
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
trace.RegisterExporter(exporter)
trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Be careful about using trace.AlwaysSample in a production application with
significant traffic: a new trace will be started and exported for every request.
Adding Spans to a Trace
# Adding Spans to a Trace
A trace consists of a tree of spans. In Go, the current span is carried in a
context.Context.
@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.

View File

@ -44,7 +44,7 @@ func (lm lruMap) len() int {
}
func (lm lruMap) keys() []interface{} {
keys := make([]interface{}, len(lm.cacheKeys))
keys := make([]interface{}, 0, len(lm.cacheKeys))
for k := range lm.cacheKeys {
keys = append(keys, k)
}

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.11
// +build go1.11
package trace

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.11
// +build !go1.11
package trace

View File

@ -59,7 +59,7 @@ github.com/containerd/cgroups/stats/v1
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/platforms
# github.com/containerd/stargz-snapshotter/estargz v0.12.1
# github.com/containerd/stargz-snapshotter/estargz v0.13.0
## explicit; go 1.16
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
@ -78,7 +78,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1
## explicit; go 1.17
github.com/containernetworking/plugins/pkg/ns
# github.com/containers/image/v5 v5.23.1-0.20221109193300-0d85878d7a77
# github.com/containers/image/v5 v5.23.1-0.20221130170538-333c50e3eac8
## explicit; go 1.17
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -157,7 +157,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7
github.com/containers/ocicrypt/spec
github.com/containers/ocicrypt/utils
github.com/containers/ocicrypt/utils/keyprovider
# github.com/containers/storage v1.44.1-0.20221110192950-67e9778710f8
# github.com/containers/storage v1.44.1-0.20221201083122-c5a80ad65f42
## explicit; go 1.17
github.com/containers/storage
github.com/containers/storage/drivers
@ -291,7 +291,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
# github.com/google/go-containerregistry v0.12.0
# github.com/google/go-containerregistry v0.12.1
## explicit; go 1.17
github.com/google/go-containerregistry/pkg/name
# github.com/google/go-intervals v0.0.2
@ -337,7 +337,7 @@ github.com/klauspost/pgzip
# github.com/kr/fs v0.1.0
## explicit
github.com/kr/fs
# github.com/letsencrypt/boulder v0.0.0-20220929215747-76583552c2be
# github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf
## explicit; go 1.18
github.com/letsencrypt/boulder/core
github.com/letsencrypt/boulder/core/proto
@ -457,7 +457,7 @@ github.com/rivo/uniseg
# github.com/seccomp/libseccomp-golang v0.10.0
## explicit; go 1.14
github.com/seccomp/libseccomp-golang
# github.com/sigstore/sigstore v1.4.5
# github.com/sigstore/sigstore v1.4.6
## explicit; go 1.18
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/signature
@ -479,7 +479,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/sylabs/sif/v2 v2.8.3
# github.com/sylabs/sif/v2 v2.9.0
## explicit; go 1.18
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
@ -533,7 +533,7 @@ go.etcd.io/bbolt
# go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352
## explicit; go 1.11
go.mozilla.org/pkcs7
# go.opencensus.io v0.23.0
# go.opencensus.io v0.24.0
## explicit; go 1.13
go.opencensus.io
go.opencensus.io/internal
@ -636,7 +636,7 @@ golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
# google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a
# google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.50.1