chunked: reuse cache
try to reuse an existing cache object, instead of creating it for every layer. Set a time limit on how long it can be reused so to clean up stale references. Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
be4e8f622d
commit
526c57d8b0
|
|
@ -4,57 +4,121 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
)
|
||||
|
||||
type layer struct {
|
||||
id string
|
||||
metadata map[string][]*internal.FileMetadata
|
||||
target string
|
||||
}
|
||||
|
||||
type layersCache struct {
|
||||
layers []layer
|
||||
layers []layer
|
||||
refs int
|
||||
store storage.Store
|
||||
mutex sync.Mutex
|
||||
created time.Time
|
||||
}
|
||||
|
||||
type findFileVisitor interface {
|
||||
VisitFile(file *internal.FileMetadata, target string) (bool, error)
|
||||
}
|
||||
|
||||
var cacheMutex sync.Mutex
|
||||
var cache *layersCache
|
||||
|
||||
func (c *layersCache) release() {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
|
||||
c.refs--
|
||||
if c.refs == 0 {
|
||||
cache = nil
|
||||
}
|
||||
}
|
||||
|
||||
func getLayersCacheRef(store storage.Store) *layersCache {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
|
||||
cache.refs++
|
||||
return cache
|
||||
}
|
||||
cache := &layersCache{
|
||||
store: store,
|
||||
refs: 1,
|
||||
created: time.Now(),
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func getLayersCache(store storage.Store) (*layersCache, error) {
|
||||
allLayers, err := store.Layers()
|
||||
if err != nil {
|
||||
c := getLayersCacheRef(store)
|
||||
|
||||
if err := c.load(); err != nil {
|
||||
c.release()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cache := layersCache{}
|
||||
func (c *layersCache) load() error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
allLayers, err := c.store.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingLayers := make(map[string]string)
|
||||
for _, r := range c.layers {
|
||||
existingLayers[r.id] = r.target
|
||||
}
|
||||
|
||||
currentLayers := make(map[string]string)
|
||||
for _, r := range allLayers {
|
||||
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
|
||||
currentLayers[r.ID] = r.ID
|
||||
if _, found := existingLayers[r.ID]; found {
|
||||
continue
|
||||
}
|
||||
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer manifestReader.Close()
|
||||
manifest, err := ioutil.ReadAll(manifestReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||
}
|
||||
var toc internal.TOC
|
||||
if err := json.Unmarshal(manifest, &toc); err != nil {
|
||||
continue
|
||||
}
|
||||
target, err := store.DifferTarget(r.ID)
|
||||
target, err := c.store.DifferTarget(r.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get checkout directory layer %q: %w", r.ID, err)
|
||||
return fmt.Errorf("get checkout directory layer %q: %w", r.ID, err)
|
||||
}
|
||||
|
||||
cache.addLayer(toc.Entries, target)
|
||||
c.addLayer(r.ID, toc.Entries, target)
|
||||
}
|
||||
return &cache, nil
|
||||
|
||||
var newLayers []layer
|
||||
for _, l := range c.layers {
|
||||
if _, found := currentLayers[l.id]; found {
|
||||
newLayers = append(newLayers, l)
|
||||
}
|
||||
}
|
||||
c.layers = newLayers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *layersCache) addLayer(entries []internal.FileMetadata, target string) {
|
||||
func (c *layersCache) addLayer(id string, entries []internal.FileMetadata, target string) {
|
||||
r := make(map[string][]*internal.FileMetadata)
|
||||
for i := range entries {
|
||||
if entries[i].Digest != "" {
|
||||
|
|
@ -67,6 +131,7 @@ func (c *layersCache) addLayer(entries []internal.FileMetadata, target string) {
|
|||
}
|
||||
}
|
||||
l := layer{
|
||||
id: id,
|
||||
metadata: r,
|
||||
target: target,
|
||||
}
|
||||
|
|
@ -77,6 +142,9 @@ func (c *layersCache) addLayer(entries []internal.FileMetadata, target string) {
|
|||
// file is the file to look for.
|
||||
// visitor is the findFileVisitor to notify for each candidate found.
|
||||
func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, visitor findFileVisitor) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
files, found := layer.metadata[file.Digest]
|
||||
if !found {
|
||||
|
|
@ -98,6 +166,9 @@ func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, visitor
|
|||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
entries, found := layer.metadata[chunk.ChunkDigest]
|
||||
if !found {
|
||||
|
|
|
|||
|
|
@ -1184,6 +1184,8 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
|
|||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
defer c.layersCache.release()
|
||||
|
||||
bigData := map[string][]byte{
|
||||
bigDataKey: c.manifest,
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue