mirror of https://github.com/docker/docs.git
archive: use pooled bufio readers and writers
Docker-DCO-1.1-Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> (github: unclejack)
This commit is contained in:
parent
2d2016b81b
commit
84d76e556b
|
@ -18,8 +18,8 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/docker/docker/pkg/log"
|
"github.com/docker/docker/pkg/log"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
"github.com/docker/docker/utils"
|
"github.com/docker/docker/utils"
|
||||||
)
|
)
|
||||||
|
@ -81,7 +81,8 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||||
buf := bufio.NewReader(archive)
|
p := pools.BufioReader32KPool
|
||||||
|
buf := p.Get(archive)
|
||||||
bs, err := buf.Peek(10)
|
bs, err := buf.Peek(10)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -89,28 +90,44 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||||
log.Debugf("[tar autodetect] n: %v", bs)
|
log.Debugf("[tar autodetect] n: %v", bs)
|
||||||
|
|
||||||
compression := DetectCompression(bs)
|
compression := DetectCompression(bs)
|
||||||
|
|
||||||
switch compression {
|
switch compression {
|
||||||
case Uncompressed:
|
case Uncompressed:
|
||||||
return ioutil.NopCloser(buf), nil
|
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
|
||||||
|
return readBufWrapper, nil
|
||||||
case Gzip:
|
case Gzip:
|
||||||
return gzip.NewReader(buf)
|
gzReader, err := gzip.NewReader(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
|
||||||
|
return readBufWrapper, nil
|
||||||
case Bzip2:
|
case Bzip2:
|
||||||
return ioutil.NopCloser(bzip2.NewReader(buf)), nil
|
bz2Reader := bzip2.NewReader(buf)
|
||||||
|
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
|
||||||
|
return readBufWrapper, nil
|
||||||
case Xz:
|
case Xz:
|
||||||
return xzDecompress(buf)
|
xzReader, err := xzDecompress(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
|
||||||
|
return readBufWrapper, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
||||||
|
p := pools.BufioWriter32KPool
|
||||||
|
buf := p.Get(dest)
|
||||||
switch compression {
|
switch compression {
|
||||||
case Uncompressed:
|
case Uncompressed:
|
||||||
return ioutils.NopWriteCloser(dest), nil
|
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
|
||||||
|
return writeBufWrapper, nil
|
||||||
case Gzip:
|
case Gzip:
|
||||||
return gzip.NewWriter(dest), nil
|
gzWriter := gzip.NewWriter(dest)
|
||||||
|
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
|
||||||
|
return writeBufWrapper, nil
|
||||||
case Bzip2, Xz:
|
case Bzip2, Xz:
|
||||||
// archive/bzip2 does not support writing, and there is no xz support at all
|
// archive/bzip2 does not support writing, and there is no xz support at all
|
||||||
// However, this is not a problem as docker only currently generates gzipped tars
|
// However, this is not a problem as docker only currently generates gzipped tars
|
||||||
|
@ -338,7 +355,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
options.Includes = []string{"."}
|
options.Includes = []string{"."}
|
||||||
}
|
}
|
||||||
|
|
||||||
twBuf := bufio.NewWriterSize(nil, twBufSize)
|
twBuf := pools.BufioWriter32KPool.Get(nil)
|
||||||
|
defer pools.BufioWriter32KPool.Put(twBuf)
|
||||||
|
|
||||||
for _, include := range options.Includes {
|
for _, include := range options.Includes {
|
||||||
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
||||||
|
@ -412,7 +430,8 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||||
defer decompressedArchive.Close()
|
defer decompressedArchive.Close()
|
||||||
|
|
||||||
tr := tar.NewReader(decompressedArchive)
|
tr := tar.NewReader(decompressedArchive)
|
||||||
trBuf := bufio.NewReaderSize(nil, trBufSize)
|
trBuf := pools.BufioReader32KPool.Get(nil)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
var dirs []*tar.Header
|
var dirs []*tar.Header
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package archive
|
package archive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/log"
|
"github.com/docker/docker/pkg/log"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -345,7 +345,8 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||||
tw := tar.NewWriter(writer)
|
tw := tar.NewWriter(writer)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
twBuf := bufio.NewWriterSize(nil, twBufSize)
|
twBuf := pools.BufioWriter32KPool.Get(nil)
|
||||||
|
defer pools.BufioWriter32KPool.Put(twBuf)
|
||||||
// In general we log errors here but ignore them because
|
// In general we log errors here but ignore them because
|
||||||
// during e.g. a diff operation the container can continue
|
// during e.g. a diff operation the container can continue
|
||||||
// mutating the filesystem and we can see transient errors
|
// mutating the filesystem and we can see transient errors
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
package archive
|
|
||||||
|
|
||||||
const twBufSize = 32 * 1024
|
|
||||||
const trBufSize = 32 * 1024
|
|
|
@ -1,7 +1,6 @@
|
||||||
package archive
|
package archive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -11,6 +10,8 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||||
|
@ -33,7 +34,8 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
tr := tar.NewReader(layer)
|
tr := tar.NewReader(layer)
|
||||||
trBuf := bufio.NewReaderSize(nil, trBufSize)
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
var dirs []*tar.Header
|
var dirs []*tar.Header
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue