Merge pull request #592 from containers/dependabot/go_modules/github.com/klauspost/compress-1.10.4

build(deps): bump github.com/klauspost/compress from 1.10.3 to 1.10.4
This commit is contained in:
Daniel J Walsh 2020-04-08 09:18:10 -04:00 committed by GitHub
commit 6f608e1bcf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 145 additions and 103 deletions

2
go.mod
View File

@ -6,7 +6,7 @@ require (
github.com/Microsoft/hcsshim v0.8.7 github.com/Microsoft/hcsshim v0.8.7
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror v1.0.0
github.com/klauspost/compress v1.10.3 github.com/klauspost/compress v1.10.4
github.com/klauspost/pgzip v1.2.3 github.com/klauspost/pgzip v1.2.3
github.com/mattn/go-shellwords v1.0.10 github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/mistifyio/go-zfs v2.1.1+incompatible

2
go.sum
View File

@ -55,6 +55,8 @@ github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiy
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=

View File

@ -80,9 +80,7 @@ type advancedState struct {
// deflate state // deflate state
length int length int
offset int offset int
hash uint32
maxInsertIndex int maxInsertIndex int
ii uint16 // position of last match, intended to overflow to reset.
// Input hash chains // Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value // hashHead[hashValue] contains the largest inputIndex with the specified hash value
@ -97,6 +95,9 @@ type advancedState struct {
// input window: unprocessed data is window[index:windowEnd] // input window: unprocessed data is window[index:windowEnd]
index int index int
hashMatch [maxMatchLength + minMatchLength]uint32 hashMatch [maxMatchLength + minMatchLength]uint32
hash uint32
ii uint16 // position of last match, intended to overflow to reset.
} }
type compressor struct { type compressor struct {
@ -107,18 +108,19 @@ type compressor struct {
// compression algorithm // compression algorithm
fill func(*compressor, []byte) int // copy data to window fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window step func(*compressor) // process window
sync bool // requesting flush
window []byte window []byte
windowEnd int windowEnd int
blockStart int // window index where current tokens start blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1]. err error
err error
// queued output tokens // queued output tokens
tokens tokens tokens tokens
fast fastEnc fast fastEnc
state *advancedState state *advancedState
sync bool // requesting flush
byteAvailable bool // if true, still need to process window[index-1].
} }
func (d *compressor) fillDeflate(b []byte) int { func (d *compressor) fillDeflate(b []byte) int {

View File

@ -295,10 +295,6 @@ type decompressor struct {
r Reader r Reader
roffset int64 roffset int64
// Input bits, in top of b.
b uint32
nb uint
// Huffman decoders for literal/length, distance. // Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder h1, h2 huffmanDecoder
@ -309,19 +305,24 @@ type decompressor struct {
// Output history, buffer. // Output history, buffer.
dict dictDecoder dict dictDecoder
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Next step in the decompression, // Next step in the decompression,
// and decompression state. // and decompression state.
step func(*decompressor) step func(*decompressor)
stepState int stepState int
final bool
err error err error
toRead []byte toRead []byte
hl, hd *huffmanDecoder hl, hd *huffmanDecoder
copyLen int copyLen int
copyDist int copyDist int
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Input bits, in top of b.
b uint32
nb uint
final bool
} }
func (f *decompressor) nextBlock() { func (f *decompressor) nextBlock() {

View File

@ -44,18 +44,14 @@ var (
// Scratch provides temporary storage for compression and decompression. // Scratch provides temporary storage for compression and decompression.
type Scratch struct { type Scratch struct {
// Private // Private
count [maxSymbolValue + 1]uint32 count [maxSymbolValue + 1]uint32
norm [maxSymbolValue + 1]int16 norm [maxSymbolValue + 1]int16
symbolLen uint16 // Length of active part of the symbol table. br byteReader
actualTableLog uint8 // Selected tablelog. bits bitReader
br byteReader bw bitWriter
bits bitReader ct cTable // Compression tables.
bw bitWriter decTable []decSymbol // Decompression table.
ct cTable // Compression tables. maxCount int // count of the most probable symbol
decTable []decSymbol // Decompression table.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
maxCount int // count of the most probable symbol
// Per block parameters. // Per block parameters.
// These can be used to override compression parameters of the block. // These can be used to override compression parameters of the block.
@ -68,17 +64,22 @@ type Scratch struct {
// and allocation will be avoided. // and allocation will be avoided.
Out []byte Out []byte
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
// DecompressLimit limits the maximum decoded size acceptable. // DecompressLimit limits the maximum decoded size acceptable.
// If > 0 decompression will stop when approximately this many bytes // If > 0 decompression will stop when approximately this many bytes
// has been decoded. // has been decoded.
// If 0, maximum size will be 2GB. // If 0, maximum size will be 2GB.
DecompressLimit int DecompressLimit int
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
} }
// Histogram allows to populate the histogram and skip that step in the compression, // Histogram allows to populate the histogram and skip that step in the compression,

View File

@ -79,6 +79,13 @@ type Scratch struct {
// Slice of the returned data. // Slice of the returned data.
OutData []byte OutData []byte
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
br byteReader
// MaxSymbolValue will override the maximum symbol value of the next block. // MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8 MaxSymbolValue uint8
@ -95,12 +102,6 @@ type Scratch struct {
// If WantLogLess == 0 any improvement will do. // If WantLogLess == 0 any improvement will do.
WantLogLess uint8 WantLogLess uint8
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
br byteReader
symbolLen uint16 // Length of active part of the symbol table. symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol maxCount int // count of the most probable symbol
clearCount bool // clear count clearCount bool // clear count

View File

@ -75,21 +75,25 @@ type blockDec struct {
// Window size of the block. // Window size of the block.
WindowSize uint64 WindowSize uint64
Type blockType
RLESize uint32 history chan *history
input chan struct{}
result chan decodeOutput
sequenceBuf []seq
err error
decWG sync.WaitGroup
// Block is RLE, this is the size.
RLESize uint32
tmp [4]byte
Type blockType
// Is this the last block of a frame? // Is this the last block of a frame?
Last bool Last bool
// Use less memory // Use less memory
lowMem bool lowMem bool
history chan *history
input chan struct{}
result chan decodeOutput
sequenceBuf []seq
tmp [4]byte
err error
decWG sync.WaitGroup
} }
func (b *blockDec) String() string { func (b *blockDec) String() string {

View File

@ -169,7 +169,12 @@ func (d *Decoder) Reset(r io.Reader) error {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
} }
b := bb.Bytes() b := bb.Bytes()
dst, err := d.DecodeAll(b, nil) var dst []byte
if cap(d.current.b) > 0 {
dst = d.current.b
}
dst, err := d.DecodeAll(b, dst[:0])
if err == nil { if err == nil {
err = io.EOF err = io.EOF
} }

View File

@ -104,10 +104,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration. // stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1. // It should be >= 1.
stepSize := int32(e.o.targetLength) const stepSize = 1
if stepSize == 0 {
stepSize++
}
const kSearchStrength = 9 const kSearchStrength = 9

View File

@ -80,10 +80,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration. // stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1. // It should be >= 1.
stepSize := int32(e.o.targetLength) const stepSize = 1
if stepSize == 0 {
stepSize++
}
const kSearchStrength = 8 const kSearchStrength = 8
@ -401,10 +398,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration. // stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1. // It should be >= 1.
stepSize := int32(e.o.targetLength) const stepSize = 1
if stepSize == 0 {
stepSize++
}
const kSearchStrength = 8 const kSearchStrength = 8

View File

@ -25,7 +25,6 @@ type tableEntry struct {
} }
type fastBase struct { type fastBase struct {
o encParams
// cur is the offset at the start of hist // cur is the offset at the start of hist
cur int32 cur int32
// maximum offset. Should be at least 2x block size. // maximum offset. Should be at least 2x block size.
@ -117,11 +116,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration. // stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2. // It should be >= 2.
stepSize := int32(e.o.targetLength) const stepSize = 2
if stepSize == 0 {
stepSize++
}
stepSize++
// TEMPLATE // TEMPLATE
const hashLog = tableBits const hashLog = tableBits

View File

@ -4,6 +4,8 @@
package zstd package zstd
/*
// encParams are not really used, just here for reference.
type encParams struct { type encParams struct {
// largest match distance : larger == more compression, more memory needed during decompression // largest match distance : larger == more compression, more memory needed during decompression
windowLog uint8 windowLog uint8
@ -152,3 +154,4 @@ var defEncParams = [4][]encParams{
{14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22. {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
}, },
} }
*/

View File

@ -39,17 +39,18 @@ type encoder interface {
} }
type encoderState struct { type encoderState struct {
w io.Writer w io.Writer
filling []byte filling []byte
current []byte current []byte
previous []byte previous []byte
encoder encoder encoder encoder
writing *blockEnc writing *blockEnc
err error err error
writeErr error writeErr error
nWritten int64 nWritten int64
headerWritten bool headerWritten bool
eofWritten bool eofWritten bool
fullFrameWritten bool
// This waitgroup indicates an encode is running. // This waitgroup indicates an encode is running.
wg sync.WaitGroup wg sync.WaitGroup
@ -114,6 +115,7 @@ func (e *Encoder) Reset(w io.Writer) {
s.encoder.Reset() s.encoder.Reset()
s.headerWritten = false s.headerWritten = false
s.eofWritten = false s.eofWritten = false
s.fullFrameWritten = false
s.w = w s.w = w
s.err = nil s.err = nil
s.nWritten = 0 s.nWritten = 0
@ -172,6 +174,22 @@ func (e *Encoder) nextBlock(final bool) error {
return fmt.Errorf("block > maxStoreBlockSize") return fmt.Errorf("block > maxStoreBlockSize")
} }
if !s.headerWritten { if !s.headerWritten {
// If we have a single block encode, do a sync compression.
if final && len(s.filling) > 0 {
s.current = e.EncodeAll(s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
return s.err
}
s.nWritten += int64(n2)
s.current = s.current[:0]
s.filling = s.filling[:0]
s.headerWritten = true
s.fullFrameWritten = true
return nil
}
var tmp [maxHeaderSize]byte var tmp [maxHeaderSize]byte
fh := frameHeader{ fh := frameHeader{
ContentSize: 0, ContentSize: 0,
@ -294,7 +312,9 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
src := e.state.filling src := e.state.filling
for { for {
n2, err := r.Read(src) n2, err := r.Read(src)
_, _ = e.state.encoder.CRC().Write(src[:n2]) if e.o.crc {
_, _ = e.state.encoder.CRC().Write(src[:n2])
}
// src is now the unfilled part... // src is now the unfilled part...
src = src[n2:] src = src[n2:]
n += int64(n2) n += int64(n2)
@ -359,6 +379,9 @@ func (e *Encoder) Close() error {
if err != nil { if err != nil {
return err return err
} }
if e.state.fullFrameWritten {
return s.err
}
s.wg.Wait() s.wg.Wait()
s.wWg.Wait() s.wWg.Wait()

View File

@ -12,15 +12,16 @@ type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options. // options retains accumulated state of multiple options.
type encoderOptions struct { type encoderOptions struct {
concurrent int concurrent int
crc bool level EncoderLevel
single *bool single *bool
pad int pad int
blockSize int blockSize int
windowSize int windowSize int
level EncoderLevel crc bool
fullZero bool fullZero bool
noEntropy bool noEntropy bool
customWindow bool
} }
func (o *encoderOptions) setDefault() { func (o *encoderOptions) setDefault() {
@ -30,7 +31,7 @@ func (o *encoderOptions) setDefault() {
crc: true, crc: true,
single: nil, single: nil,
blockSize: 1 << 16, blockSize: 1 << 16,
windowSize: 1 << 22, windowSize: 8 << 20,
level: SpeedDefault, level: SpeedDefault,
} }
} }
@ -85,6 +86,7 @@ func WithWindowSize(n int) EOption {
} }
o.windowSize = n o.windowSize = n
o.customWindow = true
if o.blockSize > o.windowSize { if o.blockSize > o.windowSize {
o.blockSize = o.windowSize o.blockSize = o.windowSize
} }
@ -195,6 +197,16 @@ func WithEncoderLevel(l EncoderLevel) EOption {
return fmt.Errorf("unknown encoder level") return fmt.Errorf("unknown encoder level")
} }
o.level = l o.level = l
if !o.customWindow {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
o.windowSize = 16 << 20
}
}
return nil return nil
} }
} }

View File

@ -16,16 +16,11 @@ import (
) )
type frameDec struct { type frameDec struct {
o decoderOptions o decoderOptions
crc hash.Hash64 crc hash.Hash64
frameDone sync.WaitGroup offset int64
offset int64
WindowSize uint64 WindowSize uint64
DictionaryID uint32
FrameContentSize uint64
HasCheckSum bool
SingleSegment bool
// maxWindowSize is the maximum windows size to support. // maxWindowSize is the maximum windows size to support.
// should never be bigger than max-int. // should never be bigger than max-int.
@ -42,9 +37,16 @@ type frameDec struct {
// Byte buffer that can be reused for small input blocks. // Byte buffer that can be reused for small input blocks.
bBuf byteBuf bBuf byteBuf
FrameContentSize uint64
frameDone sync.WaitGroup
DictionaryID uint32
HasCheckSum bool
SingleSegment bool
// asyncRunning indicates whether the async routine processes input on 'decoding'. // asyncRunning indicates whether the async routine processes input on 'decoding'.
asyncRunning bool
asyncRunningMu sync.Mutex asyncRunningMu sync.Mutex
asyncRunning bool
} }
const ( const (

2
vendor/modules.txt vendored
View File

@ -46,7 +46,7 @@ github.com/hashicorp/errwrap
github.com/hashicorp/go-multierror github.com/hashicorp/go-multierror
# github.com/hashicorp/golang-lru v0.5.1 # github.com/hashicorp/golang-lru v0.5.1
github.com/hashicorp/golang-lru/simplelru github.com/hashicorp/golang-lru/simplelru
# github.com/klauspost/compress v1.10.3 # github.com/klauspost/compress v1.10.4
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0