Merge pull request #1843 from dongluochen/ConnectionLeak-fix

Do not recycle connections. Let net/http handles connections.
This commit is contained in:
Victor Vieux 2016-02-22 16:53:26 -08:00
commit bc440060f1
4 changed files with 48 additions and 10 deletions

View File

@ -477,7 +477,6 @@ func getContainerJSON(c *context, w http.ResponseWriter, r *http.Request) {
// cleanup // cleanup
defer resp.Body.Close() defer resp.Body.Close()
defer closeIdleConnections(client)
data, err := ioutil.ReadAll(resp.Body) data, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
@ -786,7 +785,6 @@ func postContainersExec(c *context, w http.ResponseWriter, r *http.Request) {
// cleanup // cleanup
defer resp.Body.Close() defer resp.Body.Close()
defer closeIdleConnections(client)
// check status code // check status code
if resp.StatusCode < 200 || resp.StatusCode >= 400 { if resp.StatusCode < 200 || resp.StatusCode >= 400 {

View File

@ -89,13 +89,6 @@ func copyHeader(dst, src http.Header) {
} }
} }
// prevents leak with https
func closeIdleConnections(client *http.Client) {
if tr, ok := client.Transport.(*http.Transport); ok {
tr.CloseIdleConnections()
}
}
func proxyAsync(engine *cluster.Engine, w http.ResponseWriter, r *http.Request, callback func(*http.Response)) error { func proxyAsync(engine *cluster.Engine, w http.ResponseWriter, r *http.Request, callback func(*http.Response)) error {
// RequestURI may not be sent to client // RequestURI may not be sent to client
r.RequestURI = "" r.RequestURI = ""
@ -121,7 +114,6 @@ func proxyAsync(engine *cluster.Engine, w http.ResponseWriter, r *http.Request,
// cleanup // cleanup
resp.Body.Close() resp.Body.Close()
closeIdleConnections(client)
return nil return nil
} }

View File

@ -0,0 +1,41 @@
#!/usr/bin/env bats
load ../helpers
function teardown() {
swarm_manage_cleanup
stop_docker
}
@test "Swarm not leak tcp connections" {
# Start engine with busybox image
start_docker_with_busybox 2
# Start swarm and check it can reach the node
swarm_manage --engine-refresh-min-interval "20s" --engine-refresh-max-interval "20s" --engine-failure-retry 20 "${HOSTS[0]},${HOSTS[1]}"
eval "docker_swarm info | grep -q -i 'Nodes: 2'"
# create busybox with host network so that we can get netstat
run docker_swarm run -itd --name=busybox0 --net=host -e constraint:node==node-0 busybox sh
[ "$status" -eq 0 ]
run docker_swarm run -itd --name=busybox1 --net=host -e constraint:node==node-1 busybox sh
[ "$status" -eq 0 ]
# run most common container operations
for((i=0; i<30; i++)); do
# test postContainerCreate
docker_swarm run --name="hello$i" hello-world
# test getContainerJSON
docker_swarm inspect "hello$i"
# test proxyContainer
docker_swarm logs "hello$i"
# test proxyContainerAndForceRefresh
docker_swarm stop "hello$i"
done
# get connection count
count0=$(docker_swarm exec busybox0 netstat -an | grep "${HOSTS[0]}" | grep -i "ESTABLISHED" | wc -l)
count1=$(docker_swarm exec busybox1 netstat -an | grep "${HOSTS[1]}" | grep -i "ESTABLISHED" | wc -l)
[[ "$count0" -le 10 ]]
[[ "$count1" -le 10 ]]
}

View File

@ -8,6 +8,13 @@ function teardown() {
} }
@test "scheduler avoids failing node" { @test "scheduler avoids failing node" {
# Docker issue #14203 in runC causing this test to fail.
# Issue fixed in Docker 1.10
run docker --version
if [[ "${output}" == "Docker version 1.9"* ]]; then
skip
fi
# Start 1 engine and register it in the file. # Start 1 engine and register it in the file.
start_docker 2 start_docker 2
# Start swarm and check it can reach the node # Start swarm and check it can reach the node