Handle Linux Capabilities from command line
Had to revendor in docker/docker again, which dropped a bunch of packages Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
098389dc3e
commit
619637a919
|
@ -6,6 +6,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/daemon/caps"
|
||||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/libpod/libpod"
|
"github.com/projectatomic/libpod/libpod"
|
||||||
|
@ -15,6 +16,25 @@ import (
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func setupCapabilities(config *createConfig, configSpec *spec.Spec) error {
|
||||||
|
var err error
|
||||||
|
var caplist []string
|
||||||
|
if config.privileged {
|
||||||
|
caplist = caps.GetAllCapabilities()
|
||||||
|
} else {
|
||||||
|
caplist, err = caps.TweakCapabilities(defaultCapabilities(), config.capAdd, config.capDrop)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configSpec.Process.Capabilities.Bounding = caplist
|
||||||
|
configSpec.Process.Capabilities.Permitted = caplist
|
||||||
|
configSpec.Process.Capabilities.Inheritable = caplist
|
||||||
|
configSpec.Process.Capabilities.Effective = caplist
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Parses information needed to create a container into an OCI runtime spec
|
// Parses information needed to create a container into an OCI runtime spec
|
||||||
func createConfigToOCISpec(config *createConfig) (*spec.Spec, error) {
|
func createConfigToOCISpec(config *createConfig) (*spec.Spec, error) {
|
||||||
configSpec := config.GetDefaultLinuxSpec()
|
configSpec := config.GetDefaultLinuxSpec()
|
||||||
|
@ -30,9 +50,6 @@ func createConfigToOCISpec(config *createConfig) (*spec.Spec, error) {
|
||||||
|
|
||||||
configSpec.Process.Env = config.env
|
configSpec.Process.Env = config.env
|
||||||
|
|
||||||
//TODO
|
|
||||||
// Need examples of capacity additions so I can load that properly
|
|
||||||
|
|
||||||
configSpec.Root.Readonly = config.readOnlyRootfs
|
configSpec.Root.Readonly = config.readOnlyRootfs
|
||||||
configSpec.Hostname = config.hostname
|
configSpec.Hostname = config.hostname
|
||||||
|
|
||||||
|
@ -110,8 +127,12 @@ func createConfigToOCISpec(config *createConfig) (*spec.Spec, error) {
|
||||||
configSpec.Linux.Seccomp = &seccompConfig
|
configSpec.Linux.Seccomp = &seccompConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HANDLE CAPABILITIES
|
||||||
|
if err := setupCapabilities(config, &configSpec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Capabilities: &configSpec.LinuxCapabilities{
|
|
||||||
// Rlimits []PosixRlimit // Where does this come from
|
// Rlimits []PosixRlimit // Where does this come from
|
||||||
// Type string
|
// Type string
|
||||||
// Hard uint64
|
// Hard uint64
|
||||||
|
|
|
@ -36,3 +36,23 @@ ALPINE="docker.io/library/alpine:latest"
|
||||||
[ "$status" -eq 0 ]
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "run selinux test" {
|
||||||
|
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} run --cap-add all ${ALPINE} cat /proc/self/status
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} run --cap-add sys_admin ${ALPINE} cat /proc/self/status
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} run --cap-drop all ${ALPINE} cat /proc/self/status
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} run --cap-drop setuid ${ALPINE} cat /proc/self/status
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,131 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package caps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/stringutils"
|
||||||
|
"github.com/syndtr/gocapability/capability"
|
||||||
|
)
|
||||||
|
|
||||||
|
var capabilityList Capabilities
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
last := capability.CAP_LAST_CAP
|
||||||
|
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||||
|
if last == capability.Cap(63) {
|
||||||
|
last = capability.CAP_BLOCK_SUSPEND
|
||||||
|
}
|
||||||
|
for _, cap := range capability.List() {
|
||||||
|
if cap > last {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
capabilityList = append(capabilityList,
|
||||||
|
&CapabilityMapping{
|
||||||
|
Key: "CAP_" + strings.ToUpper(cap.String()),
|
||||||
|
Value: cap,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// CapabilityMapping maps linux capability name to its value of capability.Cap type
|
||||||
|
// Capabilities is one of the security systems in Linux Security Module (LSM)
|
||||||
|
// framework provided by the kernel.
|
||||||
|
// For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html
|
||||||
|
CapabilityMapping struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value capability.Cap `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
// Capabilities contains all CapabilityMapping
|
||||||
|
Capabilities []*CapabilityMapping
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns <key> of CapabilityMapping
|
||||||
|
func (c *CapabilityMapping) String() string {
|
||||||
|
return c.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCapability returns CapabilityMapping which contains specific key
|
||||||
|
func GetCapability(key string) *CapabilityMapping {
|
||||||
|
for _, capp := range capabilityList {
|
||||||
|
if capp.Key == key {
|
||||||
|
cpy := *capp
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllCapabilities returns all of the capabilities
|
||||||
|
func GetAllCapabilities() []string {
|
||||||
|
output := make([]string, len(capabilityList))
|
||||||
|
for i, capability := range capabilityList {
|
||||||
|
output[i] = capability.String()
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
// TweakCapabilities can tweak capabilities by adding or dropping capabilities
|
||||||
|
// based on the basics capabilities.
|
||||||
|
func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
|
||||||
|
var (
|
||||||
|
newCaps []string
|
||||||
|
allCaps = GetAllCapabilities()
|
||||||
|
)
|
||||||
|
|
||||||
|
// FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix
|
||||||
|
// Currently they are mixed in here. We should do conversion in one place.
|
||||||
|
|
||||||
|
// look for invalid cap in the drop list
|
||||||
|
for _, cap := range drops {
|
||||||
|
if strings.ToLower(cap) == "all" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stringutils.InSlice(allCaps, "CAP_"+cap) {
|
||||||
|
return nil, fmt.Errorf("Unknown capability drop: %q", cap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle --cap-add=all
|
||||||
|
if stringutils.InSlice(adds, "all") {
|
||||||
|
basics = allCaps
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stringutils.InSlice(drops, "all") {
|
||||||
|
for _, cap := range basics {
|
||||||
|
// skip `all` already handled above
|
||||||
|
if strings.ToLower(cap) == "all" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we don't drop `all`, add back all the non-dropped caps
|
||||||
|
if !stringutils.InSlice(drops, cap[4:]) {
|
||||||
|
newCaps = append(newCaps, strings.ToUpper(cap))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cap := range adds {
|
||||||
|
// skip `all` already handled above
|
||||||
|
if strings.ToLower(cap) == "all" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cap = "CAP_" + cap
|
||||||
|
|
||||||
|
if !stringutils.InSlice(allCaps, cap) {
|
||||||
|
return nil, fmt.Errorf("Unknown capability to add: %q", cap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add cap if not already in the list
|
||||||
|
if !stringutils.InSlice(newCaps, cap) {
|
||||||
|
newCaps = append(newCaps, strings.ToUpper(cap))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newCaps, nil
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
## About
|
||||||
|
|
||||||
|
This directory contains a collection of scripts used to build and manage this
|
||||||
|
repository. If there are any issues regarding the intention of a particular
|
||||||
|
script (or even part of a certain script), please reach out to us.
|
||||||
|
It may help us either refine our current scripts, or add on new ones
|
||||||
|
that are appropriate for a given use case.
|
||||||
|
|
||||||
|
## DinD (dind.sh)
|
||||||
|
|
||||||
|
DinD is a wrapper script which allows Docker to be run inside a Docker
|
||||||
|
container. DinD requires the container to
|
||||||
|
be run with privileged mode enabled.
|
||||||
|
|
||||||
|
## Generate Authors (generate-authors.sh)
|
||||||
|
|
||||||
|
Generates AUTHORS; a file with all the names and corresponding emails of
|
||||||
|
individual contributors. AUTHORS can be found in the home directory of
|
||||||
|
this repository.
|
||||||
|
|
||||||
|
## Make
|
||||||
|
|
||||||
|
There are two make files, each with different extensions. Neither are supposed
|
||||||
|
to be called directly; only invoke `make`. Both scripts run inside a Docker
|
||||||
|
container.
|
||||||
|
|
||||||
|
### make.ps1
|
||||||
|
|
||||||
|
- The Windows native build script that uses PowerShell semantics; it is limited
|
||||||
|
unlike `hack\make.sh` since it does not provide support for the full set of
|
||||||
|
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
|
||||||
|
does provide support for local Windows development and Windows to Windows CI.
|
||||||
|
More information is found within `make.ps1` by the author, @jhowardmsft
|
||||||
|
|
||||||
|
### make.sh
|
||||||
|
|
||||||
|
- Referenced via `make test` when running tests on a local machine,
|
||||||
|
or directly referenced when running tests inside a Docker development container.
|
||||||
|
- When running on a local machine, `make test` to run all tests found in
|
||||||
|
`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on
|
||||||
|
your local machine. The default timeout is set in `make.sh` to 60 minutes
|
||||||
|
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
|
||||||
|
all of the tests.
|
||||||
|
- When running inside a Docker development container, `hack/make.sh` does
|
||||||
|
not have a single target that runs all the tests. You need to provide a
|
||||||
|
single command line with multiple targets that performs the same thing.
|
||||||
|
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py`
|
||||||
|
- For more information related to testing outside the scope of this README,
|
||||||
|
refer to
|
||||||
|
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
|
||||||
|
|
||||||
|
## Release (release.sh)
|
||||||
|
|
||||||
|
Releases any bundles built by `make` on a public AWS S3 bucket.
|
||||||
|
For information regarding configuration, please view `release.sh`.
|
||||||
|
|
||||||
|
## Vendor (vendor.sh)
|
||||||
|
|
||||||
|
A shell script that is a wrapper around Vndr. For information on how to use
|
||||||
|
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
|
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
generated
vendored
Normal file
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
# Integration Testing on Swarm
|
||||||
|
|
||||||
|
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Master service
|
||||||
|
|
||||||
|
- Works as a funker caller
|
||||||
|
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
|
||||||
|
|
||||||
|
### Worker service
|
||||||
|
|
||||||
|
- Works as a funker callee
|
||||||
|
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
|
||||||
|
|
||||||
|
### Client
|
||||||
|
|
||||||
|
- Controls master and workers via `docker stack`
|
||||||
|
- No need to have a local daemon
|
||||||
|
|
||||||
|
Typically, the master and workers are supposed to be running on a cloud environment,
|
||||||
|
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
|
||||||
|
|
||||||
|
## Requirement
|
||||||
|
|
||||||
|
- Docker daemon 1.13 or later
|
||||||
|
- Private registry for distributed execution with multiple nodes
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Step 1: Prepare images
|
||||||
|
|
||||||
|
$ make build-integration-cli-on-swarm
|
||||||
|
|
||||||
|
Following environment variables are known to work in this step:
|
||||||
|
|
||||||
|
- `BUILDFLAGS`
|
||||||
|
- `DOCKER_INCREMENTAL_BINARY`
|
||||||
|
|
||||||
|
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
|
||||||
|
|
||||||
|
### Step 2: Execute tests
|
||||||
|
|
||||||
|
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
|
||||||
|
|
||||||
|
Following environment variables are known to work in this step:
|
||||||
|
|
||||||
|
- `DOCKER_GRAPHDRIVER`
|
||||||
|
- `DOCKER_EXPERIMENTAL`
|
||||||
|
|
||||||
|
#### Flags
|
||||||
|
|
||||||
|
Basic flags:
|
||||||
|
|
||||||
|
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
|
||||||
|
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
|
||||||
|
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
|
||||||
|
|
||||||
|
Experimental flags for mitigating makespan nonuniformity:
|
||||||
|
|
||||||
|
- `-shuffle`: Shuffle the test filter strings
|
||||||
|
|
||||||
|
Flags for debugging IT on Swarm itself:
|
||||||
|
|
||||||
|
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
|
||||||
|
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
|
||||||
|
- `-dry-run`: skip the actual workload
|
||||||
|
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
|
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
generated
vendored
Normal file
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
|
||||||
|
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
|
|
@ -1,137 +0,0 @@
|
||||||
// Package pools provides a collection of pools which provide various
|
|
||||||
// data types with buffers. These can be used to lower the number of
|
|
||||||
// memory allocations and reuse buffers.
|
|
||||||
//
|
|
||||||
// New pools should be added to this package to allow them to be
|
|
||||||
// shared across packages.
|
|
||||||
//
|
|
||||||
// Utility functions which operate on pools should be added to this
|
|
||||||
// package to allow them to be reused.
|
|
||||||
package pools
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
const buffer32K = 32 * 1024
|
|
||||||
|
|
||||||
var (
|
|
||||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
|
||||||
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
|
||||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
|
||||||
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
|
||||||
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
|
||||||
)
|
|
||||||
|
|
||||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
|
||||||
type BufioReaderPool struct {
|
|
||||||
pool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBufioReaderPoolWithSize is unexported because new pools should be
|
|
||||||
// added here to be shared where required.
|
|
||||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
|
||||||
return &BufioReaderPool{
|
|
||||||
pool: sync.Pool{
|
|
||||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
|
||||||
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
|
||||||
buf := bufPool.pool.Get().(*bufio.Reader)
|
|
||||||
buf.Reset(r)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts the bufio.Reader back into the pool.
|
|
||||||
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
|
||||||
b.Reset(nil)
|
|
||||||
bufPool.pool.Put(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
type bufferPool struct {
|
|
||||||
pool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBufferPoolWithSize(size int) *bufferPool {
|
|
||||||
return &bufferPool{
|
|
||||||
pool: sync.Pool{
|
|
||||||
New: func() interface{} { return make([]byte, size) },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bp *bufferPool) Get() []byte {
|
|
||||||
return bp.pool.Get().([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bp *bufferPool) Put(b []byte) {
|
|
||||||
bp.pool.Put(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
|
||||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
|
||||||
buf := buffer32KPool.Get()
|
|
||||||
written, err = io.CopyBuffer(dst, src, buf)
|
|
||||||
buffer32KPool.Put(buf)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
|
||||||
// into the pool and closes the reader if it's an io.ReadCloser.
|
|
||||||
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
|
||||||
return ioutils.NewReadCloserWrapper(r, func() error {
|
|
||||||
if readCloser, ok := r.(io.ReadCloser); ok {
|
|
||||||
readCloser.Close()
|
|
||||||
}
|
|
||||||
bufPool.Put(buf)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
|
||||||
type BufioWriterPool struct {
|
|
||||||
pool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBufioWriterPoolWithSize is unexported because new pools should be
|
|
||||||
// added here to be shared where required.
|
|
||||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
|
||||||
return &BufioWriterPool{
|
|
||||||
pool: sync.Pool{
|
|
||||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
|
||||||
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
|
||||||
buf := bufPool.pool.Get().(*bufio.Writer)
|
|
||||||
buf.Reset(w)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts the bufio.Writer back into the pool.
|
|
||||||
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
|
||||||
b.Reset(nil)
|
|
||||||
bufPool.pool.Put(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
|
||||||
// into the pool and closes the writer if it's an io.Writecloser.
|
|
||||||
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
|
||||||
return ioutils.NewWriteCloserWrapper(w, func() error {
|
|
||||||
buf.Flush()
|
|
||||||
if writeCloser, ok := w.(io.WriteCloser); ok {
|
|
||||||
writeCloser.Close()
|
|
||||||
}
|
|
||||||
bufPool.Put(buf)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
Loading…
Reference in New Issue