Reorganize the "defragmented" reader construction a bit.

Have one section deal with detecting compression and re-assembling
the original stream, and another with computing the length and digest
of the original stream.

Should not change behavior.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2021-08-20 19:17:47 +02:00
parent bfe784b920
commit 12ac28b3d7
1 changed files with 3 additions and 2 deletions

View File

@ -1519,11 +1519,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil && err != io.EOF {
return -1, err
}
compression := archive.DetectCompression(header[:n])
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
compressedDigester := digest.Canonical.Digester()
compressedCounter := ioutils.NewWriteCounter(compressedDigester.Hash())
defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
defragmented = io.TeeReader(defragmented, compressedCounter)
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)