From bb69f10df91fe5ed044e7f0c81a118a2b666757d Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 22 Oct 2015 19:37:58 -0700 Subject: [PATCH] Add a buffered Writer between layer compression and layer upload Without this buffering, the compressor was outputting 64 bytes at a time to the HTTP stream, which was resulting in absurdly small chunk sizes and a lot of extra overhead. The buffering restores the chunk size to 32768 bytes, which matches the behavior with 1.8.2. Times pushing to a local registry: 1.8.2: 0m18.934s master: 0m20.564s master+this commit: 0m17.593s Fixes: #17038 Signed-off-by: Aaron Lehmann --- graph/push_v2.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/graph/push_v2.go b/graph/push_v2.go index fda531d9ca..088a6c62e0 100644 --- a/graph/push_v2.go +++ b/graph/push_v2.go @@ -1,6 +1,7 @@ package graph import ( + "bufio" "compress/gzip" "fmt" "io" @@ -20,6 +21,8 @@ import ( "golang.org/x/net/context" ) +const compressionBufSize = 32768 + type v2Pusher struct { *TagStore endpoint registry.APIEndpoint @@ -259,13 +262,18 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d // we must make sure the ReadFrom is used, not Write. Using Write would // send a PATCH request for every Write call. pipeReader, pipeWriter := io.Pipe() - compressor := gzip.NewWriter(io.MultiWriter(pipeWriter, digester.Hash())) + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize) + compressor := gzip.NewWriter(bufWriter) go func() { _, err := io.Copy(compressor, reader) if err == nil { err = compressor.Close() } + if err == nil { + err = bufWriter.Flush() + } if err != nil { pipeWriter.CloseWithError(err) } else {