Bump github.com/klauspost/compress from 1.9.1 to 1.9.2

Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.9.1 to 1.9.2.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.9.1...v1.9.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
dependabot-preview[bot] 2019-11-13 09:17:11 +00:00 committed by Daniel J Walsh
parent 01ea27db51
commit b10bb250c6
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
10 changed files with 288 additions and 32 deletions

2
go.mod
View File

@ -7,7 +7,7 @@ require (
github.com/Microsoft/hcsshim v0.8.6 github.com/Microsoft/hcsshim v0.8.6
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/klauspost/compress v1.9.1 github.com/klauspost/compress v1.9.2
github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1 github.com/klauspost/pgzip v1.2.1
github.com/mattn/go-shellwords v1.0.6 github.com/mattn/go-shellwords v1.0.6

2
go.sum
View File

@ -35,6 +35,8 @@ github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY= github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY=
github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=

View File

@ -322,8 +322,6 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
return d.w.err return d.w.err
} }
const hashmul = 0x1e35a7bd
// hash4 returns a hash representation of the first 4 bytes // hash4 returns a hash representation of the first 4 bytes
// of the supplied slice. // of the supplied slice.
// The caller must ensure that len(b) >= 4. // The caller must ensure that len(b) >= 4.

View File

@ -87,9 +87,6 @@ type huffmanBitWriter struct {
bits uint64 bits uint64
nbits uint16 nbits uint16
nbytes uint8 nbytes uint8
literalFreq []uint16
offsetFreq []uint16
codegen []uint8
literalEncoding *huffmanEncoder literalEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder codegenEncoding *huffmanEncoder
@ -99,7 +96,12 @@ type huffmanBitWriter struct {
logReusePenalty uint logReusePenalty uint
lastHuffMan bool lastHuffMan bool
bytes [256]byte bytes [256]byte
literalFreq [lengthCodesStart + 32]uint16
offsetFreq [32]uint16
codegenFreq [codegenCodeCount]uint16 codegenFreq [codegenCodeCount]uint16
// codegen must have an extra space for the final symbol.
codegen [literalCount + offsetCodeCount + 1]uint8
} }
// Huffman reuse. // Huffman reuse.
@ -124,10 +126,6 @@ type huffmanBitWriter struct {
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{ return &huffmanBitWriter{
writer: w, writer: w,
literalFreq: make([]uint16, lengthCodesStart+32),
offsetFreq: make([]uint16, 32),
// codegen must have an extra space for the final symbol.
codegen: make([]uint8, literalCount+offsetCodeCount+1),
literalEncoding: newHuffmanEncoder(literalCount), literalEncoding: newHuffmanEncoder(literalCount),
codegenEncoding: newHuffmanEncoder(codegenCodeCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount),
@ -253,7 +251,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
// a copy of the frequencies, and as the place where we put the result. // a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used // This is fine because the output is always shorter than the input used
// so far. // so far.
codegen := w.codegen // cache codegen := w.codegen[:] // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end. // Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals] cgnl := codegen[:numLiterals]
for i := range cgnl { for i := range cgnl {
@ -356,8 +354,8 @@ func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
header, numCodegens := w.headerSize() header, numCodegens := w.headerSize()
size = header + size = header +
litEnc.bitLength(w.literalFreq) + litEnc.bitLength(w.literalFreq[:]) +
offEnc.bitLength(w.offsetFreq) + offEnc.bitLength(w.offsetFreq[:]) +
extraBits extraBits
return size, numCodegens return size, numCodegens
} }
@ -378,8 +376,8 @@ func (w *huffmanBitWriter) extraBitSize() int {
// fixedSize returns the size of dynamically encoded data in bits. // fixedSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) fixedSize(extraBits int) int { func (w *huffmanBitWriter) fixedSize(extraBits int) int {
return 3 + return 3 +
fixedLiteralEncoding.bitLength(w.literalFreq) + fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
fixedOffsetEncoding.bitLength(w.offsetFreq) + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
extraBits extraBits
} }
@ -670,9 +668,9 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
// and offsetEncoding. // and offsetEncoding.
// The number of literal and offset tokens is returned. // The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
copy(w.literalFreq, t.litHist[:]) copy(w.literalFreq[:], t.litHist[:])
copy(w.literalFreq[256:], t.extraHist[:]) copy(w.literalFreq[256:], t.extraHist[:])
copy(w.offsetFreq, t.offHist[:offsetCodeCount]) copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
if t.n == 0 { if t.n == 0 {
return return
@ -797,17 +795,17 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
} }
// Clear histogram // Clear histogram
for i := range w.literalFreq { for i := range w.literalFreq[:] {
w.literalFreq[i] = 0 w.literalFreq[i] = 0
} }
if !w.lastHuffMan { if !w.lastHuffMan {
for i := range w.offsetFreq { for i := range w.offsetFreq[:] {
w.offsetFreq[i] = 0 w.offsetFreq[i] = 0
} }
} }
// Add everything as literals // Add everything as literals
estBits := histogramSize(input, w.literalFreq, !eof && !sync) + 15 estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15
// Store bytes, if we don't get a reasonable improvement. // Store bytes, if we don't get a reasonable improvement.
ssize, storable := w.storedSize(input) ssize, storable := w.storedSize(input)

View File

@ -107,8 +107,8 @@ const (
type huffmanDecoder struct { type huffmanDecoder struct {
min int // the minimum code length min int // the minimum code length
chunks *[huffmanNumChunks]uint32 // chunks as described above chunks *[huffmanNumChunks]uint16 // chunks as described above
links [][]uint32 // overflow links links [][]uint16 // overflow links
linkMask uint32 // mask the width of the link table linkMask uint32 // mask the width of the link table
} }
@ -124,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
const sanity = false const sanity = false
if h.chunks == nil { if h.chunks == nil {
h.chunks = &[huffmanNumChunks]uint32{} h.chunks = &[huffmanNumChunks]uint16{}
} }
if h.min != 0 { if h.min != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links} *h = huffmanDecoder{chunks: h.chunks, links: h.links}
@ -191,7 +191,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
// create link tables // create link tables
link := nextcode[huffmanChunkBits+1] >> 1 link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link { if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint32, huffmanNumChunks-link) h.links = make([][]uint16, huffmanNumChunks-link)
} else { } else {
h.links = h.links[:huffmanNumChunks-link] h.links = h.links[:huffmanNumChunks-link]
} }
@ -202,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
if sanity && h.chunks[reverse] != 0 { if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk") panic("impossible: overwriting existing chunk")
} }
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1)) h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks { if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint32, numLinks) h.links[off] = make([]uint16, numLinks)
} else { } else {
links := h.links[off][:0] links := h.links[off][:0]
h.links[off] = links[:numLinks] h.links[off] = links[:numLinks]
@ -220,7 +220,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
} }
code := nextcode[n] code := nextcode[n]
nextcode[n]++ nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n) chunk := uint16(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code))) reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n) reverse >>= uint(16 - n)
if n <= huffmanChunkBits { if n <= huffmanChunkBits {

252
vendor/github.com/klauspost/compress/flate/stateless.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package flate
import (
"io"
"math"
)
const (
maxStatelessBlock = math.MaxInt16
slTableBits = 13
slTableSize = 1 << slTableBits
slTableShift = 32 - slTableBits
)
type statelessWriter struct {
dst io.Writer
closed bool
}
func (s *statelessWriter) Close() error {
if s.closed {
return nil
}
s.closed = true
// Emit EOF block
return StatelessDeflate(s.dst, nil, true)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
err = StatelessDeflate(s.dst, p, false)
if err != nil {
return 0, err
}
return len(p), nil
}
func (s *statelessWriter) Reset(w io.Writer) {
s.dst = w
s.closed = false
}
// NewStatelessWriter will do compression but without maintaining any state
// between Write calls.
// There will be no memory kept between Write calls,
// but compression and speed will be suboptimal.
// Because of this, the size of actual Write calls will affect output size.
func NewStatelessWriter(dst io.Writer) io.WriteCloser {
return &statelessWriter{dst: dst}
}
// StatelessDeflate allows to compress directly to a Writer without retaining state.
// When returning everything will be flushed.
func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
var dst tokens
bw := newHuffmanBitWriter(out)
if eof && len(in) == 0 {
// Just write an EOF block.
// Could be faster...
bw.writeStoredHeader(0, true)
bw.flush()
return bw.err
}
for len(in) > 0 {
todo := in
if len(todo) > maxStatelessBlock {
todo = todo[:maxStatelessBlock]
}
in = in[len(todo):]
// Compress
statelessEnc(&dst, todo)
isEof := eof && len(in) == 0
if dst.n == 0 {
bw.writeStoredHeader(len(todo), isEof)
if bw.err != nil {
return bw.err
}
bw.writeBytes(todo)
} else if int(dst.n) > len(todo)-len(todo)>>4 {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, todo, false)
} else {
bw.writeBlockDynamic(&dst, isEof, todo, false)
}
if bw.err != nil {
return bw.err
}
dst.Reset()
}
if !eof {
// Align.
bw.writeStoredHeader(0, false)
}
bw.flush()
return bw.err
}
func hashSL(u uint32) uint32 {
return (u * 0x1e35a7bd) >> slTableShift
}
func load3216(b []byte, i int16) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load6416(b []byte, i int16) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func statelessEnc(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
type tableEntry struct {
offset int16
}
var table [slTableSize]tableEntry
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
s := int16(1)
nextEmit := int16(0)
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int16(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load3216(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashSL(cv)
candidate = table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit || nextS <= 0 {
goto emitRemainder
}
now := load6416(src, nextS)
table[nextHash] = tableEntry{offset: s}
nextHash = hashSL(uint32(now))
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
// Do one right away...
cv = uint32(now)
s = nextS
nextS++
candidate = table[nextHash]
now >>= 8
table[nextHash] = tableEntry{offset: s}
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
cv = uint32(now)
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset
l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
emitLiteral(dst, src[nextEmit:s])
}
// Save the match found
dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6416(src, s-2)
o := s - 2
prevHash := hashSL(uint32(x))
table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashSL(uint32(x))
candidate = table[currHash]
table[currHash] = tableEntry{offset: o + 2}
if uint32(x) != load3216(src, candidate.offset) {
cv = uint32(x >> 8)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -161,7 +161,8 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.data, err = br.readBig(cSize, b.dataStorage) b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil { if err != nil {
if debug { if debug {
println("Reading block:", err) println("Reading block:", err, "(", cSize, ")", len(b.data))
printf("%T", br)
} }
return err return err
} }

View File

@ -101,6 +101,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
dst = make([]byte, n) dst = make([]byte, n)
} }
n2, err := io.ReadFull(r.r, dst[:n]) n2, err := io.ReadFull(r.r, dst[:n])
if err == io.EOF && n > 0 {
err = io.ErrUnexpectedEOF
}
return dst[:n2], err return dst[:n2], err
} }

View File

@ -194,14 +194,14 @@ func (d *frameDec) reset(br byteBuffer) error {
// When FCS_Field_Size is 2, the offset of 256 is added. // When FCS_Field_Size is 2, the offset of 256 is added.
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4: case 4:
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3] << 24)) d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8: case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
} }
if debug { if debug {
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize])) println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
} }
} }
// Move this to shared. // Move this to shared.
@ -414,6 +414,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
} }
written += int64(len(r.b)) written += int64(len(r.b))
if d.SingleSegment && uint64(written) > d.FrameContentSize { if d.SingleSegment && uint64(written) > d.FrameContentSize {
println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
r.err = ErrFrameSizeExceeded r.err = ErrFrameSizeExceeded
output <- r output <- r
return return
@ -464,6 +465,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
break break
} }
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
err = ErrFrameSizeExceeded err = ErrFrameSizeExceeded
break break
} }

2
vendor/modules.txt vendored
View File

@ -30,7 +30,7 @@ github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value github.com/google/go-cmp/cmp/internal/value
# github.com/klauspost/compress v1.9.1 # github.com/klauspost/compress v1.9.2
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0