Merge remote-tracking branch 'upstream/main' into HEAD

This commit is contained in:
Milas Bowman 2022-07-29 15:56:01 -04:00
commit c6c2bbdcda
135 changed files with 3302 additions and 1417 deletions

6
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,6 @@
# GitHub code owners
# See https://help.github.com/articles/about-codeowners/
#
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
* @aiordache @ulyssessouza

51
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: Python package
on: [push, pull_request]
jobs:
flake8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- run: pip install -U flake8
- name: Run flake8
run: flake8 docker/ tests/
unit-tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11.0-alpha - 3.11.0"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python3 -m pip install --upgrade pip
pip3 install -r test-requirements.txt -r requirements.txt
- name: Run unit tests
run: |
docker logout
rm -rf ~/.docker
py.test -v --cov=docker tests/unit
integration-tests:
runs-on: ubuntu-latest
strategy:
matrix:
variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ]
steps:
- uses: actions/checkout@v3
- name: make ${{ matrix.variant }}
run: |
docker logout
rm -rf ~/.docker
make ${{ matrix.variant }}

View File

@ -3,8 +3,12 @@ version: 2
sphinx:
configuration: docs/conf.py
build:
os: ubuntu-20.04
tools:
python: '3.10'
python:
version: 3.5
install:
- requirements: docs-requirements.txt
- requirements: requirements.txt

View File

@ -1,20 +0,0 @@
sudo: false
language: python
matrix:
include:
- python: 2.7
env: TOXENV=py27
- python: 3.5
env: TOXENV=py35
- python: 3.6
env: TOXENV=py36
- python: 3.7
env: TOXENV=py37
dist: xenial
sudo: true
- env: TOXENV=flake8
install:
- pip install tox==2.9.1
script:
- tox

View File

@ -1,4 +1,4 @@
ARG PYTHON_VERSION=2.7
ARG PYTHON_VERSION=3.10
FROM python:${PYTHON_VERSION}
@ -6,10 +6,10 @@ RUN mkdir /src
WORKDIR /src
COPY requirements.txt /src/requirements.txt
RUN pip install -r requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
COPY test-requirements.txt /src/test-requirements.txt
RUN pip install -r test-requirements.txt
RUN pip install --no-cache-dir -r test-requirements.txt
COPY . /src
RUN pip install .
RUN pip install --no-cache-dir .

View File

@ -1,4 +1,4 @@
ARG PYTHON_VERSION=3.7
ARG PYTHON_VERSION=3.10
FROM python:${PYTHON_VERSION}
@ -10,6 +10,6 @@ RUN addgroup --gid $gid sphinx \
WORKDIR /src
COPY requirements.txt docs-requirements.txt ./
RUN pip install -r requirements.txt -r docs-requirements.txt
RUN pip install --no-cache-dir -r requirements.txt -r docs-requirements.txt
USER sphinx

View File

@ -1,15 +0,0 @@
ARG PYTHON_VERSION=3.7
FROM python:${PYTHON_VERSION}
RUN mkdir /src
WORKDIR /src
COPY requirements.txt /src/requirements.txt
RUN pip install -r requirements.txt
COPY test-requirements.txt /src/test-requirements.txt
RUN pip install -r test-requirements.txt
COPY . /src
RUN pip install .

92
Jenkinsfile vendored
View File

@ -1,8 +1,8 @@
#!groovy
def imageNameBase = "dockerbuildbot/docker-py"
def imageNamePy2
def imageNameBase = "dockerpinata/docker-py"
def imageNamePy3
def imageDindSSH
def images = [:]
def buildImage = { name, buildargs, pyTag ->
@ -13,26 +13,27 @@ def buildImage = { name, buildargs, pyTag ->
img = docker.build(name, buildargs)
img.push()
}
images[pyTag] = img.id
if (pyTag?.trim()) images[pyTag] = img.id
}
def buildImages = { ->
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2", cleanWorkspace: true) {
wrappedNode(label: "amd64 && ubuntu-2004 && overlay2", cleanWorkspace: true) {
stage("build image") {
checkout(scm)
imageNamePy2 = "${imageNameBase}:py2-${gitCommit()}"
imageNamePy3 = "${imageNameBase}:py3-${gitCommit()}"
buildImage(imageNamePy2, "-f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 .", "py2.7")
buildImage(imageNamePy3, "-f tests/Dockerfile --build-arg PYTHON_VERSION=3.7 .", "py3.7")
imageDindSSH = "${imageNameBase}:sshdind-${gitCommit()}"
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
buildImage(imageDindSSH, "-f tests/Dockerfile-ssh-dind .", "")
buildImage(imageNamePy3, "-f tests/Dockerfile --build-arg PYTHON_VERSION=3.10 .", "py3.10")
}
}
}
}
def getDockerVersions = { ->
def dockerVersions = ["19.03.5"]
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2") {
def dockerVersions = ["19.03.12"]
wrappedNode(label: "amd64 && ubuntu-2004 && overlay2") {
def result = sh(script: """docker run --rm \\
--entrypoint=python \\
${imageNamePy3} \\
@ -66,39 +67,64 @@ def runTests = { Map settings ->
throw new Exception("Need test image object, e.g.: `runTests(testImage: img)`")
}
if (!dockerVersion) {
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '1.12.3')`")
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '19.03.12')`")
}
if (!pythonVersion) {
throw new Exception("Need Python version being tested, e.g.: `runTests(pythonVersion: 'py2.7')`")
throw new Exception("Need Python version being tested, e.g.: `runTests(pythonVersion: 'py3.x')`")
}
{ ->
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2", cleanWorkspace: true) {
wrappedNode(label: "amd64 && ubuntu-2004 && overlay2", cleanWorkspace: true) {
stage("test python=${pythonVersion} / docker=${dockerVersion}") {
checkout(scm)
def dindContainerName = "dpy-dind-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
def testContainerName = "dpy-tests-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
def testNetwork = "dpy-testnet-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
try {
sh """docker network create ${testNetwork}"""
sh """docker run -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
docker:${dockerVersion}-dind dockerd -H tcp://0.0.0.0:2375
"""
sh """docker run \\
--name ${testContainerName} \\
-e "DOCKER_HOST=tcp://${dindContainerName}:2375" \\
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
--network ${testNetwork} \\
--volumes-from ${dindContainerName} \\
${testImage} \\
py.test -v -rxs --cov=docker tests/
"""
} finally {
sh """
docker stop ${dindContainerName} ${testContainerName}
docker rm -vf ${dindContainerName} ${testContainerName}
docker network rm ${testNetwork}
"""
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
try {
// unit tests
sh """docker run --rm \\
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
${testImage} \\
py.test -v -rxs --cov=docker tests/unit
"""
// integration tests
sh """docker network create ${testNetwork}"""
sh """docker run --rm -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
${imageDindSSH} dockerd -H tcp://0.0.0.0:2375
"""
sh """docker run --rm \\
--name ${testContainerName} \\
-e "DOCKER_HOST=tcp://${dindContainerName}:2375" \\
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
--network ${testNetwork} \\
--volumes-from ${dindContainerName} \\
-v $DOCKER_CONFIG/config.json:/root/.docker/config.json \\
${testImage} \\
py.test -v -rxs --cov=docker tests/integration
"""
sh """docker stop ${dindContainerName}"""
// start DIND container with SSH
sh """docker run --rm -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
${imageDindSSH} dockerd --experimental"""
sh """docker exec ${dindContainerName} sh -c /usr/sbin/sshd """
// run SSH tests only
sh """docker run --rm \\
--name ${testContainerName} \\
-e "DOCKER_HOST=ssh://${dindContainerName}:22" \\
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
--network ${testNetwork} \\
--volumes-from ${dindContainerName} \\
-v $DOCKER_CONFIG/config.json:/root/.docker/config.json \\
${testImage} \\
py.test -v -rxs --cov=docker tests/ssh
"""
} finally {
sh """
docker stop ${dindContainerName}
docker network rm ${testNetwork}
"""
}
}
}
}

View File

@ -11,7 +11,8 @@
[Org]
[Org."Core maintainers"]
people = [
"shin-",
"aiordache",
"ulyssessouza",
]
[Org.Alumni]
people = [
@ -20,6 +21,7 @@
"dnephin",
"mnowster",
"mpetazzoni",
"shin-",
]
[people]
@ -35,6 +37,11 @@
Email = "aanand@docker.com"
GitHub = "aanand"
[people.aiordache]
Name = "Anca Iordache"
Email = "anca.iordache@docker.com"
GitHub = "aiordache"
[people.bfirsh]
Name = "Ben Firshman"
Email = "b@fir.sh"
@ -59,3 +66,8 @@
Name = "Joffrey F"
Email = "joffrey@docker.com"
GitHub = "shin-"
[people.ulyssessouza]
Name = "Ulysses Domiciano Souza"
Email = "ulysses.souza@docker.com"
GitHub = "ulyssessouza"

174
Makefile
View File

@ -1,100 +1,176 @@
TEST_API_VERSION ?= 1.41
TEST_ENGINE_VERSION ?= 20.10
ifeq ($(OS),Windows_NT)
PLATFORM := Windows
else
PLATFORM := $(shell sh -c 'uname -s 2>/dev/null || echo Unknown')
endif
ifeq ($(PLATFORM),Linux)
uid_args := "--build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g)"
endif
.PHONY: all
all: test
.PHONY: clean
clean:
-docker rm -f dpy-dind-py2 dpy-dind-py3 dpy-dind-certs dpy-dind-ssl
-docker rm -f dpy-dind-py3 dpy-dind-certs dpy-dind-ssl
find -name "__pycache__" | xargs rm -rf
.PHONY: build
build:
docker build -t docker-sdk-python -f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 --build-arg APT_MIRROR .
.PHONY: build-dind-ssh
build-dind-ssh:
docker build \
--pull \
-t docker-dind-ssh \
-f tests/Dockerfile-ssh-dind \
--build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} \
--build-arg API_VERSION=${TEST_API_VERSION} \
--build-arg APT_MIRROR .
.PHONY: build-py3
build-py3:
docker build -t docker-sdk-python3 -f tests/Dockerfile --build-arg APT_MIRROR .
docker build \
--pull \
-t docker-sdk-python3 \
-f tests/Dockerfile \
--build-arg APT_MIRROR .
.PHONY: build-docs
build-docs:
docker build -t docker-sdk-python-docs -f Dockerfile-docs --build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g) .
docker build -t docker-sdk-python-docs -f Dockerfile-docs $(uid_args) .
.PHONY: build-dind-certs
build-dind-certs:
docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs .
.PHONY: test
test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
.PHONY: unit-test
unit-test: build
docker run -t --rm docker-sdk-python py.test tests/unit
test: flake8 unit-test-py3 integration-dind integration-dind-ssl
.PHONY: unit-test-py3
unit-test-py3: build-py3
docker run -t --rm docker-sdk-python3 py.test tests/unit
.PHONY: integration-test
integration-test: build
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python py.test -v tests/integration/${file}
.PHONY: integration-test-py3
integration-test-py3: build-py3
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file}
TEST_API_VERSION ?= 1.35
TEST_ENGINE_VERSION ?= 19.03.5
.PHONY: setup-network
setup-network:
docker network inspect dpy-tests || docker network create dpy-tests
.PHONY: integration-dind
integration-dind: integration-dind-py2 integration-dind-py3
.PHONY: integration-dind-py2
integration-dind-py2: build setup-network
docker rm -vf dpy-dind-py2 || :
docker run -d --network dpy-tests --name dpy-dind-py2 --privileged\
docker:${TEST_ENGINE_VERSION}-dind dockerd -H tcp://0.0.0.0:2375 --experimental
docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py2:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network dpy-tests docker-sdk-python py.test tests/integration
docker rm -vf dpy-dind-py2
integration-dind: integration-dind-py3
.PHONY: integration-dind-py3
integration-dind-py3: build-py3 setup-network
docker rm -vf dpy-dind-py3 || :
docker run -d --network dpy-tests --name dpy-dind-py3 --privileged\
docker:${TEST_ENGINE_VERSION}-dind dockerd -H tcp://0.0.0.0:2375 --experimental
docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py3:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network dpy-tests docker-sdk-python3 py.test tests/integration
docker run \
--detach \
--name dpy-dind-py3 \
--network dpy-tests \
--pull=always \
--privileged \
docker:${TEST_ENGINE_VERSION}-dind \
dockerd -H tcp://0.0.0.0:2375 --experimental
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind-py3 2375; do sleep 1; done'
docker run \
--env="DOCKER_HOST=tcp://dpy-dind-py3:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--network dpy-tests \
--rm \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind-py3
.PHONY: integration-dind-ssh
integration-dind-ssh: build-dind-ssh build-py3 setup-network
docker rm -vf dpy-dind-ssh || :
docker run -d --network dpy-tests --name dpy-dind-ssh --privileged \
docker-dind-ssh dockerd --experimental
# start SSH daemon for known key
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/known_ed25519 -p 22"
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/unknown_ed25519 -p 2222"
docker run \
--tty \
--rm \
--env="DOCKER_HOST=ssh://dpy-dind-ssh" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="UNKNOWN_DOCKER_SSH_HOST=ssh://dpy-dind-ssh:2222" \
--network dpy-tests \
docker-sdk-python3 py.test tests/ssh/${file}
docker rm -vf dpy-dind-ssh
.PHONY: integration-dind-ssl
integration-dind-ssl: build-dind-certs build build-py3
integration-dind-ssl: build-dind-certs build-py3 setup-network
docker rm -vf dpy-dind-certs dpy-dind-ssl || :
docker run -d --name dpy-dind-certs dpy-dind-certs
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\
--env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\
--network dpy-tests --network-alias docker -v /tmp --privileged\
docker:${TEST_ENGINE_VERSION}-dind\
dockerd --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
--tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 --experimental
docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network dpy-tests docker-sdk-python py.test tests/integration
docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network dpy-tests docker-sdk-python3 py.test tests/integration
docker run \
--detach \
--env="DOCKER_CERT_PATH=/certs" \
--env="DOCKER_HOST=tcp://localhost:2375" \
--env="DOCKER_TLS_VERIFY=1" \
--name dpy-dind-ssl \
--network dpy-tests \
--network-alias docker \
--pull=always \
--privileged \
--volume /tmp \
--volumes-from dpy-dind-certs \
docker:${TEST_ENGINE_VERSION}-dind \
dockerd \
--tlsverify \
--tlscacert=/certs/ca.pem \
--tlscert=/certs/server-cert.pem \
--tlskey=/certs/server-key.pem \
-H tcp://0.0.0.0:2375 \
--experimental
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind-ssl 2375; do sleep 1; done'
docker run \
--env="DOCKER_CERT_PATH=/certs" \
--env="DOCKER_HOST=tcp://docker:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="DOCKER_TLS_VERIFY=1" \
--network dpy-tests \
--rm \
--volumes-from dpy-dind-ssl \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind-ssl dpy-dind-certs
.PHONY: flake8
flake8: build
docker run -t --rm docker-sdk-python flake8 docker tests
flake8: build-py3
docker run -t --rm docker-sdk-python3 flake8 docker tests
.PHONY: docs
docs: build-docs
docker run --rm -t -v `pwd`:/src docker-sdk-python-docs sphinx-build docs docs/_build
.PHONY: shell
shell: build
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python python
shell: build-py3
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 python

View File

@ -1,6 +1,6 @@
# Docker SDK for Python
[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps run containers, manage containers, manage Swarms, etc.
@ -10,9 +10,8 @@ The latest stable version [is available on PyPI](https://pypi.python.org/pypi/do
pip install docker
If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
## Usage
@ -58,7 +57,7 @@ You can stream logs:
```python
>>> for line in container.logs(stream=True):
... print line.strip()
... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...

View File

@ -1,13 +0,0 @@
version: '{branch}-{build}'
install:
- "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
- "python --version"
- "python -m pip install --upgrade pip"
- "pip install tox==2.9.1"
# Build the binary after tests
build: false
test_script:
- "tox"

View File

@ -12,7 +12,7 @@ from .. import utils
log = logging.getLogger(__name__)
class BuildApiMixin(object):
class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
@ -132,7 +132,7 @@ class BuildApiMixin(object):
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
'Invalid container_limits key {0}'.format(key)
f'Invalid container_limits key {key}'
)
if custom_context:
@ -150,10 +150,10 @@ class BuildApiMixin(object):
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
[line.strip() for line in f.read().splitlines()]
))
dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar(
@ -313,7 +313,7 @@ class BuildApiMixin(object):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
'Sending auth config ({0})'.format(
'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
@ -344,9 +344,9 @@ def process_dockerfile(dockerfile, path):
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile, 'r') as df:
with open(abs_dockerfile) as df:
return (
'.dockerfile.{0:x}'.format(random.getrandbits(160)),
f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)

View File

@ -1,12 +1,25 @@
import json
import struct
import urllib
from functools import partial
import requests
import requests.exceptions
import six
import websocket
from .. import auth
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
from ..errors import (DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception)
from ..tls import TLSConfig
from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
from ..utils import check_resource, config, update_headers, utils
from ..utils.json_stream import json_stream
from ..utils.proxy import ProxyConfig
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
@ -19,22 +32,7 @@ from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
from .. import auth
from ..constants import (
DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION,
STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS
)
from ..errors import (
DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception
)
from ..tls import TLSConfig
from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
from ..utils import utils, check_resource, update_headers, config
from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor
from ..utils.json_stream import json_stream
from ..utils.proxy import ProxyConfig
try:
from ..transport import NpipeHTTPAdapter
except ImportError:
@ -91,6 +89,11 @@ class APIClient(
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@ -102,8 +105,9 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None):
super(APIClient, self).__init__()
credstore_env=None, use_ssh_client=False,
max_pool_size=DEFAULT_MAX_POOL_SIZE):
super().__init__()
if tls and not base_url:
raise TLSParameterError(
@ -138,7 +142,8 @@ class APIClient(
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter(
base_url, timeout, pool_connections=num_pools
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
@ -152,7 +157,8 @@ class APIClient(
)
try:
self._custom_adapter = NpipeHTTPAdapter(
base_url, timeout, pool_connections=num_pools
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
except NameError:
raise DockerException(
@ -163,7 +169,8 @@ class APIClient(
elif base_url.startswith('ssh://'):
try:
self._custom_adapter = SSHHTTPAdapter(
base_url, timeout, pool_connections=num_pools
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size, shell_out=use_ssh_client
)
except NameError:
raise DockerException(
@ -183,16 +190,16 @@ class APIClient(
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None:
self._version = DEFAULT_DOCKER_API_VERSION
elif isinstance(version, six.string_types):
if version.lower() == 'auto':
self._version = self._retrieve_server_version()
else:
self._version = version
if version is None or (isinstance(
version,
str
) and version.lower() == 'auto'):
self._version = self._retrieve_server_version()
else:
self._version = version
if not isinstance(self._version, str):
raise DockerException(
'Version parameter must be a string or None. Found {0}'.format(
'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
@ -212,7 +219,7 @@ class APIClient(
)
except Exception as e:
raise DockerException(
'Error while fetching server API version: {0}'.format(e)
f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
@ -239,28 +246,28 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, six.string_types):
if not isinstance(arg, str):
raise ValueError(
'Expected a string but found {0} ({1}) '
'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise create_api_error_from_http_exception(e)
raise create_api_error_from_http_exception(e) from e
def _result(self, response, json=False, binary=False):
assert not (json and binary)
@ -277,7 +284,7 @@ class APIClient(
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
for k, v in six.iteritems(data):
for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
@ -313,12 +320,10 @@ class APIClient(
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
elif six.PY3:
else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
@ -336,8 +341,7 @@ class APIClient(
if response.raw._fp.chunked:
if decode:
for chunk in json_stream(self._stream_helper(response, False)):
yield chunk
yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
@ -393,8 +397,13 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
for out in response.iter_content(chunk_size, decode):
yield out
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
@ -458,7 +467,7 @@ class APIClient(
self._result(res, binary=True)
self._raise_for_status(res)
sep = six.binary_type()
sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
@ -472,7 +481,7 @@ class APIClient(
def get_adapter(self, url):
try:
return super(APIClient, self).get_adapter(url)
return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
@ -490,7 +499,7 @@ class APIClient(
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
otherwise ``$HOME/.dockercfg``)
Returns:
None

View File

@ -1,13 +1,11 @@
import base64
import six
from .. import utils
class ConfigApiMixin(object):
class ConfigApiMixin:
@utils.minimum_version('1.30')
def create_config(self, name, data, labels=None):
def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
@ -15,6 +13,9 @@ class ConfigApiMixin(object):
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
templating (dict): dictionary containing the name of the
templating driver to be used expressed as
{ name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
@ -22,12 +23,12 @@ class ConfigApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
'Labels': labels,
'Templating': templating
}
url = self._url('/configs/create')

View File

@ -1,7 +1,5 @@
from datetime import datetime
import six
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
@ -12,7 +10,7 @@ from ..types import HostConfig
from ..types import NetworkingConfig
class ContainerApiMixin(object):
class ContainerApiMixin:
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False, demux=False):
@ -225,7 +223,7 @@ class ContainerApiMixin(object):
mac_address=None, labels=None, stop_signal=None,
networking_config=None, healthcheck=None,
stop_timeout=None, runtime=None,
use_config_proxy=True):
use_config_proxy=True, platform=None):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@ -244,9 +242,9 @@ class ContainerApiMixin(object):
.. code-block:: python
container_id = cli.create_container(
container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
host_config=cli.create_host_config(port_bindings={
host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
@ -258,22 +256,24 @@ class ContainerApiMixin(object):
.. code-block:: python
cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
client.api.create_host_config(
port_bindings={1111: ('127.0.0.1', 4567)}
)
Or without host port assignment:
.. code-block:: python
cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
container_id = cli.create_container(
container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
host_config=cli.create_host_config(port_bindings={
host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
@ -283,7 +283,7 @@ class ContainerApiMixin(object):
.. code-block:: python
cli.create_host_config(port_bindings={
client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
@ -291,7 +291,7 @@ class ContainerApiMixin(object):
.. code-block:: python
cli.create_host_config(port_bindings={
client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
@ -307,9 +307,9 @@ class ContainerApiMixin(object):
.. code-block:: python
container_id = cli.create_container(
container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=cli.create_host_config(binds={
host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
@ -326,9 +326,9 @@ class ContainerApiMixin(object):
.. code-block:: python
container_id = cli.create_container(
container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=cli.create_host_config(binds=[
host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
@ -346,15 +346,15 @@ class ContainerApiMixin(object):
.. code-block:: python
networking_config = docker_client.create_networking_config({
'network1': docker_client.create_endpoint_config(
networking_config = client.api.create_networking_config({
'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
ctnr = docker_client.create_container(
ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
@ -398,6 +398,7 @@ class ContainerApiMixin(object):
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being created.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
@ -408,7 +409,7 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(volumes, six.string_types):
if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(environment, dict):
@ -427,16 +428,22 @@ class ContainerApiMixin(object):
stop_signal, networking_config, healthcheck,
stop_timeout, runtime
)
return self.create_container_from_config(config, name)
return self.create_container_from_config(config, name, platform)
def create_container_config(self, *args, **kwargs):
return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
def create_container_from_config(self, config, name=None, platform=None):
u = self._url("/containers/create")
params = {
'name': name
}
if platform:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'platform is not supported for API version < 1.41'
)
params['platform'] = platform
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
@ -480,6 +487,9 @@ class ContainerApiMixin(object):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
@ -503,7 +513,7 @@ class ContainerApiMixin(object):
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
mem_reservation (int or str): Memory soft limit.
mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@ -520,6 +530,8 @@ class ContainerApiMixin(object):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
This mode is incompatible with ``port_bindings``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
@ -528,7 +540,8 @@ class ContainerApiMixin(object):
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
for more information.
for more information.
Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@ -575,10 +588,13 @@ class ContainerApiMixin(object):
Example:
>>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
>>> client.api.create_host_config(
... privileged=True,
... cap_drop=['MKNOD'],
... volumes_from=['nostalgic_newton'],
... )
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
"""
if not kwargs:
@ -606,11 +622,11 @@ class ContainerApiMixin(object):
Example:
>>> docker_client.create_network('network1')
>>> networking_config = docker_client.create_networking_config({
'network1': docker_client.create_endpoint_config()
>>> client.api.create_network('network1')
>>> networking_config = client.api.create_networking_config({
'network1': client.api.create_endpoint_config()
})
>>> container = docker_client.create_container(
>>> container = client.api.create_container(
img, command, networking_config=networking_config
)
@ -636,13 +652,15 @@ class ContainerApiMixin(object):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
Example:
>>> endpoint_config = client.create_endpoint_config(
>>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
@ -694,7 +712,8 @@ class ContainerApiMixin(object):
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
encode_stream=False):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@ -705,6 +724,8 @@ class ContainerApiMixin(object):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@ -718,7 +739,7 @@ class ContainerApiMixin(object):
>>> c = docker.APIClient()
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = c.get_archive(container, '/bin/sh')
>>> bits, stat = c.api.get_archive(container, '/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
@ -729,8 +750,13 @@ class ContainerApiMixin(object):
params = {
'path': path
}
headers = {
"Accept-Encoding": "gzip, deflate"
} if encode_stream else {
"Accept-Encoding": "identity"
}
url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True)
res = self._get(url, params=params, stream=True, headers=headers)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
@ -774,7 +800,7 @@ class ContainerApiMixin(object):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
if not isinstance(signal, six.string_types):
if not isinstance(signal, str):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
@ -900,7 +926,7 @@ class ContainerApiMixin(object):
.. code-block:: python
>>> cli.port('7174d6347063', 80)
>>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
@ -1079,10 +1105,10 @@ class ContainerApiMixin(object):
Example:
>>> container = cli.create_container(
>>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
>>> cli.start(container=container.get('Id'))
>>> client.api.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
@ -1120,7 +1146,7 @@ class ContainerApiMixin(object):
else:
if decode:
raise errors.InvalidArgument(
"decode is only available in conjuction with stream=True"
"decode is only available in conjunction with stream=True"
)
return self._result(self._get(url, params={'stream': False}),
json=True)
@ -1206,8 +1232,8 @@ class ContainerApiMixin(object):
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
mem_limit (float or str): Memory limit
mem_reservation (float or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit

View File

@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin(object):
class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
@ -109,7 +109,7 @@ class DaemonApiMixin(object):
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request

View File

@ -1,10 +1,8 @@
import six
from .. import errors
from .. import utils
class ExecApiMixin(object):
class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
@ -45,7 +43,7 @@ class ExecApiMixin(object):
'Setting environment for exec is not supported in API < 1.25'
)
if isinstance(cmd, six.string_types):
if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):

View File

@ -1,15 +1,13 @@
import logging
import os
import six
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
class ImageApiMixin(object):
class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@ -31,7 +29,7 @@ class ImageApiMixin(object):
Example:
>>> image = cli.get_image("busybox:latest")
>>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
@ -81,10 +79,18 @@ class ImageApiMixin(object):
If the server returns an error.
"""
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if name:
if utils.version_lt(self._version, '1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
if filters:
filters['reference'] = name
else:
filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
@ -122,7 +128,7 @@ class ImageApiMixin(object):
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, six.string_types) else None),
src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
@ -131,7 +137,7 @@ class ImageApiMixin(object):
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, six.string_types): # from file path
elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
@ -343,13 +349,14 @@ class ImageApiMixin(object):
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None):
decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
@ -358,6 +365,8 @@ class ImageApiMixin(object):
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns:
(generator or str): The output
@ -368,7 +377,8 @@ class ImageApiMixin(object):
Example:
>>> for line in cli.pull('busybox', stream=True, decode=True):
>>> resp = client.api.pull('busybox', stream=True, decode=True)
... for line in resp:
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
@ -382,8 +392,12 @@ class ImageApiMixin(object):
}
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
repository, image_tag = utils.parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if all_tags:
tag = None
registry, repo_name = auth.resolve_repository_name(repository)
params = {
@ -443,7 +457,12 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True, decode=True):
>>> resp = client.api.push(
... 'yourname/app',
... stream=True,
... decode=True,
... )
... for line in resp:
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
@ -494,13 +513,14 @@ class ImageApiMixin(object):
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
def search(self, term):
def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
@ -509,8 +529,12 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'term': term}
if limit is not None:
params['limit'] = limit
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
self._get(self._url("/images/search"), params=params),
True
)
@ -534,7 +558,7 @@ class ImageApiMixin(object):
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
@ -551,7 +575,7 @@ class ImageApiMixin(object):
def is_file(src):
try:
return (
isinstance(src, six.string_types) and
isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError

View File

@ -4,7 +4,7 @@ from ..utils import version_lt
from .. import utils
class NetworkApiMixin(object):
class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker network ls`` command.
@ -75,7 +75,7 @@ class NetworkApiMixin(object):
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
>>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
@ -90,7 +90,7 @@ class NetworkApiMixin(object):
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
>>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
@ -216,7 +216,7 @@ class NetworkApiMixin(object):
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
link_local_ips=None, driver_opt=None):
"""
Connect a container to a network.
@ -240,7 +240,8 @@ class NetworkApiMixin(object):
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
driver_opt=driver_opt
),
}

View File

@ -1,9 +1,7 @@
import six
from .. import auth, utils
class PluginApiMixin(object):
class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
@ -21,7 +19,7 @@ class PluginApiMixin(object):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
@ -53,19 +51,20 @@ class PluginApiMixin(object):
return True
@utils.minimum_version('1.25')
def disable_plugin(self, name):
def disable_plugin(self, name, force=False):
"""
Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
force (bool): To enable the force query parameter.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/disable', name)
res = self._post(url)
res = self._post(url, params={'force': force})
self._raise_for_status(res)
return True

View File

@ -1,12 +1,10 @@
import base64
import six
from .. import errors
from .. import utils
class SecretApiMixin(object):
class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
@ -25,8 +23,7 @@ class SecretApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,

View File

@ -45,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
raise_version_error('force_update', '1.25')
raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
@ -113,7 +113,7 @@ def _merge_task_template(current, override):
return merged
class ServiceApiMixin(object):
class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,

View File

@ -1,5 +1,5 @@
import logging
from six.moves import http_client
import http.client as http_client
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
@ -8,7 +8,7 @@ from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
@ -58,10 +58,10 @@ class SwarmApiMixin(object):
Example:
>>> spec = client.create_swarm_spec(
>>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.init_swarm(
>>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
@ -354,8 +354,8 @@ class SwarmApiMixin(object):
Example:
>>> key = client.get_unlock_key()
>>> client.unlock_node(key)
>>> key = client.api.get_unlock_key()
>>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
@ -396,7 +396,7 @@ class SwarmApiMixin(object):
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""

View File

@ -2,7 +2,7 @@ from .. import errors
from .. import utils
class VolumeApiMixin(object):
class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@ -21,7 +21,7 @@ class VolumeApiMixin(object):
Example:
>>> cli.volumes()
>>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
@ -56,15 +56,18 @@ class VolumeApiMixin(object):
Example:
>>> volume = cli.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
>>> volume = client.api.create_volume(
... name='foobar',
... driver='local',
... driver_opts={'foo': 'bar', 'baz': 'false'},
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
url = self._url('/volumes/create')
@ -104,7 +107,7 @@ class VolumeApiMixin(object):
Example:
>>> cli.inspect_volume('foobar')
>>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}

View File

@ -2,14 +2,12 @@ import base64
import json
import logging
import six
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@ -18,13 +16,13 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
@ -98,10 +96,10 @@ class AuthConfig(dict):
"""
conf = {}
for registry, entry in six.iteritems(entries):
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(
'Config entry for key {} is not auth config'.format(
registry
)
)
@ -111,14 +109,14 @@ class AuthConfig(dict):
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
'Invalid configuration for registry {}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {0}'.format(
'Found an IdentityToken entry for registry {}'.format(
registry
)
)
@ -132,7 +130,7 @@ class AuthConfig(dict):
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'Auth data for {} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
@ -140,7 +138,7 @@ class AuthConfig(dict):
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
'Found entry (registry={}, username={})'
.format(repr(registry), repr(username))
)
@ -170,7 +168,7 @@ class AuthConfig(dict):
try:
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
@ -230,7 +228,7 @@ class AuthConfig(dict):
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
'Using credentials store "{0}"'.format(store_name)
f'Using credentials store "{store_name}"'
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
@ -239,15 +237,15 @@ class AuthConfig(dict):
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
log.debug(f"Looking for auth entry for {repr(registry)}")
if registry in self.auths:
log.debug("Found {0}".format(repr(registry)))
log.debug(f"Found {repr(registry)}")
return self.auths[registry]
for key, conf in six.iteritems(self.auths):
for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
log.debug(f"Found {repr(key)}")
return conf
log.debug("No entry found")
@ -258,7 +256,7 @@ class AuthConfig(dict):
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for {0}".format(repr(registry)))
log.debug(f"Looking for auth entry for {repr(registry)}")
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
@ -278,7 +276,7 @@ class AuthConfig(dict):
return None
except credentials.StoreError as e:
raise errors.DockerException(
'Credentials store error: {0}'.format(repr(e))
f'Credentials store error: {repr(e)}'
)
def _get_store_instance(self, name):
@ -329,7 +327,7 @@ def convert_to_hostname(url):
def decode_auth(auth):
if isinstance(auth, six.string_types):
if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
@ -385,7 +383,6 @@ def _load_legacy_config(config_file):
}}
except Exception as e:
log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@ -1,5 +1,5 @@
from .api.client import APIClient
from .constants import DEFAULT_TIMEOUT_SECONDS
from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
class DockerClient(object):
class DockerClient:
"""
A client for communicating with a Docker server.
@ -35,6 +35,11 @@ class DockerClient(object):
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@ -62,14 +67,19 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
@ -80,9 +90,15 @@ class DockerClient(object):
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
timeout=timeout, version=version, **kwargs_from_env(**kwargs)
timeout=timeout,
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
)
# Resources
@ -196,7 +212,7 @@ class DockerClient(object):
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
s = ["'DockerClient' object has no attribute '{}'".format(name)]
s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "

View File

@ -1,7 +1,7 @@
import sys
from .version import version
DEFAULT_DOCKER_API_VERSION = '1.35'
DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@ -28,7 +28,7 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_USER_AGENT = f"docker-sdk-python/{version}"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
@ -36,6 +36,8 @@ DEFAULT_NUM_POOLS = 25
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']

View File

@ -9,7 +9,7 @@ from docker.context.config import write_context_name_to_docker_config
from docker.context import Context
class ContextAPI(object):
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
@ -109,7 +109,7 @@ class ContextAPI(object):
if filename == METAFILE:
try:
data = json.load(
open(os.path.join(dirname, filename), "r"))
open(os.path.join(dirname, filename)))
names.append(data["Name"])
except Exception as e:
raise errors.ContextException(
@ -138,7 +138,7 @@ class ContextAPI(object):
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(
'Failed to set current context: {}'.format(err))
f'Failed to set current context: {err}')
@classmethod
def remove_context(cls, name):

View File

@ -15,7 +15,7 @@ def get_current_context_name():
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path, "r") as f:
with open(docker_cfg_path) as f:
name = json.load(f).get("currentContext", "default")
except Exception:
return "default"
@ -29,7 +29,7 @@ def write_context_name_to_docker_config(name=None):
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path, "r") as f:
with open(docker_cfg_path) as f:
config = json.load(f)
except Exception as e:
return e

View File

@ -11,35 +11,48 @@ from docker.context.config import get_context_host
class Context:
"""A context."""
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
raise Exception("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = "docker" if (
not orchestrator or orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, tls),
"SkipTLSVerify": not tls
}
}
else:
for k, v in endpoints.items():
ekeys = v.keys()
for param in ["Host", "SkipTLSVerify"]:
if param not in ekeys:
raise ContextException(
"Missing parameter {} from endpoint {}".format(
param, k))
self.endpoints = endpoints
return
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException("""Unknown endpoint format for
context {}: {}""".format(name, v))
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
host, tls))
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
"SkipTLSVerify", not tls))
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
@ -59,9 +72,13 @@ class Context:
@classmethod
def load_context(cls, name):
name, orchestrator, endpoints = Context._load_meta(name)
if name:
instance = cls(name, orchestrator, endpoints=endpoints)
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None))
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
@ -69,26 +86,30 @@ class Context:
@classmethod
def _load_meta(cls, name):
metadata = {}
meta_file = get_meta_file(name)
if os.path.isfile(meta_file):
with open(meta_file) as f:
try:
with open(meta_file) as f:
metadata = json.load(f)
for k, v in metadata["Endpoints"].items():
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v["SkipTLSVerify"])
except (IOError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
if not os.path.isfile(meta_file):
return None
return (
metadata["Name"],
metadata["Metadata"].get("StackOrchestrator", None),
metadata["Endpoints"])
return None, None, None
metadata = {}
try:
with open(meta_file) as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False))
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True))
return metadata
def _load_certs(self):
certs = {}
@ -107,8 +128,12 @@ class Context:
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert)
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
@ -146,7 +171,7 @@ class Context:
rmtree(self.tls_path)
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
@ -157,6 +182,9 @@ class Context:
result.update(self.Storage)
return result
def is_docker_host(self):
return self.context_type is None
@property
def Name(self):
return self.name
@ -164,8 +192,12 @@ class Context:
@property
def Host(self):
if not self.orchestrator or self.orchestrator == "swarm":
return self.endpoints["docker"]["Host"]
return self.endpoints[self.orchestrator]["Host"]
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None)
return None
return self.endpoints[self.orchestrator].get("Host", None)
@property
def Orchestrator(self):

View File

@ -2,15 +2,13 @@ import errno
import json
import subprocess
import six
from . import constants
from . import errors
from .utils import create_environment_dict
from .utils import find_executable
class Store(object):
class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
@ -30,7 +28,7 @@ class Store(object):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, six.binary_type):
if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
@ -41,7 +39,7 @@ class Store(object):
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {}'.format(self.program)
f'No matching credentials in {self.program}'
)
return result
@ -61,7 +59,7 @@ class Store(object):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, six.binary_type):
if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
@ -75,20 +73,9 @@ class Store(object):
output = None
env = create_environment_dict(self.environment)
try:
if six.PY3:
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
else:
process = subprocess.Popen(
[self.exe, subcmd], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=env,
)
output, _ = process.communicate(data_input)
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd='', output=output
)
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:

View File

@ -1,5 +1,14 @@
import requests
_image_not_found_explanation_fragments = frozenset(
fragment.lower() for fragment in [
'no such image',
'not found: does not exist or no pull access',
'repository does not exist',
'was found but does not match the specified platform',
]
)
class DockerException(Exception):
"""
@ -21,14 +30,13 @@ def create_api_error_from_http_exception(e):
explanation = (response.content or '').strip()
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
'not found: does not exist or no pull access'
in str(explanation) or
'repository does not exist' in str(explanation)):
explanation_msg = (explanation or '').lower()
if any(fragment in explanation_msg
for fragment in _image_not_found_explanation_fragments):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation)
raise cls(e, response=response, explanation=explanation) from e
class APIError(requests.exceptions.HTTPError, DockerException):
@ -38,23 +46,25 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
message = super(APIError, self).__str__()
message = super().__str__()
if self.is_client_error():
message = '{0} Client Error: {1}'.format(
self.response.status_code, self.response.reason)
message = '{} Client Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
elif self.is_server_error():
message = '{0} Server Error: {1}'.format(
self.response.status_code, self.response.reason)
message = '{} Server Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
if self.explanation:
message = '{0} ("{1}")'.format(message, self.explanation)
message = f'{message} ("{self.explanation}")'
return message
@ -131,11 +141,11 @@ class ContainerError(DockerException):
self.image = image
self.stderr = stderr
err = ": {}".format(stderr) if stderr is not None else ""
err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
super(ContainerError, self).__init__(msg)
super().__init__(msg)
class StreamParseError(RuntimeError):
@ -145,7 +155,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException):
def __init__(self, reason, build_log):
super(BuildError, self).__init__(reason)
super().__init__(reason)
self.msg = reason
self.build_log = build_log
@ -155,8 +165,8 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
text = ["{}() ".format(name)]
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
@ -170,7 +180,7 @@ class MissingContextParameter(DockerException):
self.param = param
def __str__(self):
return ("missing parameter: {}".format(self.param))
return (f"missing parameter: {self.param}")
class ContextAlreadyExists(DockerException):
@ -178,7 +188,7 @@ class ContextAlreadyExists(DockerException):
self.name = name
def __str__(self):
return ("context {} already exists".format(self.name))
return (f"context {self.name} already exists")
class ContextException(DockerException):
@ -194,4 +204,4 @@ class ContextNotFound(DockerException):
self.name = name
def __str__(self):
return ("context '{}' not found".format(self.name))
return (f"context '{self.name}' not found")

View File

@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID'
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):

View File

@ -225,7 +225,8 @@ class Container(Model):
"""
return self.client.api.export(self.id, chunk_size)
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
encode_stream=False):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
@ -235,6 +236,8 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@ -255,7 +258,8 @@ class Container(Model):
... f.write(chunk)
>>> f.close()
"""
return self.client.api.get_archive(self.id, path, chunk_size)
return self.client.api.get_archive(self.id, path,
chunk_size, encode_stream)
def kill(self, signal=None):
"""
@ -549,6 +553,11 @@ class ContainerCollection(Collection):
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cgroupns (str): Override the default cgroup namespace mode for the
container. One of:
- ``private`` the container runs in its own private cgroup
namespace.
- ``host`` use the host system's cgroup namespace.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
@ -579,6 +588,9 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
@ -662,6 +674,7 @@ class ContainerCollection(Collection):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
This mode is incompatible with ``ports``.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
@ -695,6 +708,7 @@ class ContainerCollection(Collection):
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@ -772,6 +786,15 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
Or a list of strings which each one of its elements specifies a
mount volume.
For example:
.. code-block:: python
['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
@ -803,7 +826,7 @@ class ContainerCollection(Collection):
image = image.id
stream = kwargs.pop('stream', False)
detach = kwargs.pop('detach', False)
platform = kwargs.pop('platform', None)
platform = kwargs.get('platform', None)
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
@ -987,6 +1010,7 @@ RUN_CREATE_KWARGS = [
'mac_address',
'name',
'network_disabled',
'platform',
'stdin_open',
'stop_signal',
'tty',
@ -1003,6 +1027,7 @@ RUN_HOST_CONFIG_KWARGS = [
'cap_add',
'cap_drop',
'cgroup_parent',
'cgroupns',
'cpu_count',
'cpu_percent',
'cpu_period',
@ -1018,6 +1043,7 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps',
'device_write_iops',
'devices',
'device_requests',
'dns_opt',
'dns_search',
'dns',

View File

@ -2,8 +2,6 @@ import itertools
import re
import warnings
import six
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError, InvalidArgument
@ -17,7 +15,10 @@ class Image(Model):
An image on the server.
"""
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
return "<{}: '{}'>".format(
self.__class__.__name__,
"', '".join(self.tags),
)
@property
def labels(self):
@ -30,12 +31,12 @@ class Image(Model):
@property
def short_id(self):
"""
The ID of the image truncated to 10 characters, plus the ``sha256:``
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
if self.id.startswith('sha256:'):
return self.id[:17]
return self.id[:10]
return self.id[:19]
return self.id[:12]
@property
def tags(self):
@ -60,6 +61,24 @@ class Image(Model):
"""
return self.client.api.history(self.id)
def remove(self, force=False, noprune=False):
"""
Remove this image.
Args:
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_image(
self.id,
force=force,
noprune=noprune,
)
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
@ -84,19 +103,19 @@ class Image(Model):
Example:
>>> image = cli.get_image("busybox:latest")
>>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, six.string_types):
if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
"{} is not a valid tag for this image".format(named)
f"{named} is not a valid tag for this image"
)
img = named
@ -127,7 +146,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
super(RegistryData, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.image_name = image_name
@property
@ -140,10 +159,10 @@ class RegistryData(Model):
@property
def short_id(self):
"""
The ID of the image truncated to 10 characters, plus the ``sha256:``
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
return self.id[:17]
return self.id[:19]
def pull(self, platform=None):
"""
@ -180,7 +199,7 @@ class RegistryData(Model):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
'"{0}" is not a valid platform descriptor'.format(platform)
f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
@ -277,7 +296,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, six.string_types):
if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
@ -395,12 +414,13 @@ class ImageCollection(Collection):
return [self.get(i) for i in images]
def pull(self, repository, tag=None, **kwargs):
def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If no tag is specified, all tags from that repository will be
pulled.
If ``tag`` is ``None`` or empty, it is set to ``latest``.
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@ -413,10 +433,11 @@ class ImageCollection(Collection):
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If no ``tag`` was specified, the method will return a list
If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
@ -426,13 +447,13 @@ class ImageCollection(Collection):
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox:latest')
>>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox')
>>> images = client.images.pull('busybox', all_tags=True)
"""
if not tag:
repository, tag = parse_repository_tag(repository)
repository, image_tag = parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if 'stream' in kwargs:
warnings.warn(
@ -442,14 +463,14 @@ class ImageCollection(Collection):
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, **kwargs
repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if tag:
if not all_tags:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))

View File

@ -46,6 +46,8 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`

View File

@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server.
"""
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@ -44,16 +44,19 @@ class Plugin(Model):
self.client.api.configure_plugin(self.name, options)
self.reload()
def disable(self):
def disable(self, force=False):
"""
Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name)
self.client.api.disable_plugin(self.name, force)
self.reload()
def enable(self, timeout=0):
@ -117,9 +120,12 @@ class Plugin(Model):
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
yield d
self._reload()
yield from self.client.api.upgrade_plugin(
self.name,
remote,
privileges,
)
self.reload()
class PluginCollection(Collection):

View File

@ -1,5 +1,4 @@
class Model(object):
class Model:
"""
A base class for representing a single object on the server.
"""
@ -18,13 +17,13 @@ class Model(object):
self.attrs = {}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash("%s:%s" % (self.__class__.__name__, self.id))
return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
@ -36,9 +35,9 @@ class Model(object):
@property
def short_id(self):
"""
The ID of the object, truncated to 10 characters.
The ID of the object, truncated to 12 characters.
"""
return self.id[:10]
return self.id[:12]
def reload(self):
"""
@ -49,7 +48,7 @@ class Model(object):
self.attrs = new_model.attrs
class Collection(object):
class Collection:
"""
A base class for representing all objects of a particular type on the
server.

View File

@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID'
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@ -30,6 +30,7 @@ class SecretCollection(Collection):
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__

View File

@ -157,6 +157,8 @@ class ServiceCollection(Collection):
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
@ -211,6 +213,10 @@ class ServiceCollection(Collection):
to the service.
privileges (Privileges): Security options for the service's
containers.
cap_add (:py:class:`list`): A list of kernel capabilities to add to
the default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
from the default set for the container.
Returns:
:py:class:`Service`: The created service.
@ -275,6 +281,8 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
'cap_add',
'cap_drop',
'command',
'configs',
'dns_config',
@ -312,6 +320,7 @@ CREATE_SERVICE_KWARGS = [
'labels',
'mode',
'update_config',
'rollback_config',
'endpoint_spec',
]
@ -319,6 +328,7 @@ PLACEMENT_KWARGS = [
'constraints',
'preferences',
'platforms',
'maxreplicas',
]

View File

@ -11,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
super(Swarm, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()

View File

@ -5,15 +5,16 @@ from . import errors
from .transport import SSLHTTPAdapter
class TLSConfig(object):
class TLSConfig:
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be ``False`` or a path to a CA cert
file.
verify (bool or str): This can be a bool or a path to a CA cert
file to verify against. If ``True``, verify using ca_cert;
if ``False`` or not specified, do not verify.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
@ -32,37 +33,18 @@ class TLSConfig(object):
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
# leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
# depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
# to exclude versions. But I think that might require a bigger
# architectural change, so I've opted not to pursue it at this time
# If the user provides an SSL version, we should use their preference
if ssl_version:
self.ssl_version = ssl_version
else:
# If the user provides no ssl version, we should default to
# TLSv1_2. This option is the most secure, and will work for the
# majority of users with reasonably up-to-date software. However,
# before doing so, detect openssl version to ensure we can support
# it.
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
ssl, 'PROTOCOL_TLSv1_2'):
# If the OpenSSL version is high enough to support TLSv1_2,
# then we should use it.
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
else:
# Otherwise, TLS v1.0 seems to be the safest default;
# SSLv23 fails in mysterious ways:
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
# "tls" and "tls_verify" must have both or neither cert/key files In
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
@ -71,7 +53,7 @@ class TLSConfig(object):
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_config must be a tuple of'
'client_cert must be a tuple of'
' (client certificate, key file)'
)
@ -79,7 +61,7 @@ class TLSConfig(object):
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
@ -88,7 +70,7 @@ class TLSConfig(object):
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `tls_ca_cert`.'
'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):

View File

@ -3,6 +3,6 @@ import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
super(BaseHTTPAdapter, self).close()
super().close()
if hasattr(self, 'pools'):
self.pools.clear()

View File

@ -1,14 +1,11 @@
import six
import queue
import requests.adapters
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
if six.PY3:
import http.client as httplib
else:
import httplib
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@ -18,9 +15,9 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
super(NpipeHTTPConnection, self).__init__(
super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
@ -35,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super(NpipeHTTPConnectionPool, self).__init__(
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
@ -57,14 +54,14 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except six.moves.queue.Empty:
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
pass # Oh well, we'll create a new connection then
# Oh well, we'll create a new connection then
return conn or self._new_conn()
@ -73,16 +70,19 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout']
'timeout',
'max_pool_size']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS):
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(NpipeHTTPAdapter, self).__init__()
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@ -91,7 +91,8 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout
self.npipe_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool

View File

@ -2,7 +2,6 @@ import functools
import time
import io
import six
import win32file
import win32pipe
@ -24,7 +23,7 @@ def check_closed(f):
return wrapped
class NpipeSocket(object):
class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
@ -128,9 +127,6 @@ class NpipeSocket(object):
@check_closed
def recv_into(self, buf, nbytes=0):
if six.PY2:
return self._recv_into_py2(buf, nbytes)
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
@ -195,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket
def close(self):
super(NpipeFileIOBase, self).close()
super().close()
self.sock = None
def fileno(self):

View File

@ -1,16 +1,17 @@
import paramiko
import queue
import urllib.parse
import requests.adapters
import six
import logging
import os
import signal
import socket
import subprocess
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
if six.PY3:
import http.client as httplib
else:
import httplib
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@ -20,33 +21,121 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHConnection(httplib.HTTPConnection, object):
def __init__(self, ssh_transport, timeout=60):
super(SSHConnection, self).__init__(
class SSHSocket(socket.socket):
def __init__(self, host):
super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=None if constants.IS_WINDOWS_PLATFORM else preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(httplib.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client, timeout=60, maxsize=10):
super(SSHConnectionPool, self).__init__(
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = ssh_client.get_transport()
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout)
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
@ -59,14 +148,14 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except six.moves.queue.Empty:
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
pass # Oh well, we'll create a new connection then
# Oh well, we'll create a new connection then
return conn or self._new_conn()
@ -74,14 +163,33 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params'
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS):
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = six.moves.urllib_parse.urlparse(base_url)
base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
@ -93,48 +201,54 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
self.ssh_conf = host_config
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
self.ssh_conf['proxycommand']
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = self.ssh_conf['port']
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = self.ssh_conf['user']
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
self._connect()
self.timeout = timeout
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(SSHHTTPAdapter, self).__init__()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
self.ssh_client.connect(**self.ssh_params)
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if not self.ssh_client.get_transport():
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
self.ssh_client, self.timeout
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super(SSHHTTPAdapter, self).close()
self.ssh_client.close()
super().close()
if self.ssh_client:
self.ssh_client.close()

View File

@ -2,9 +2,7 @@
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
import sys
from distutils.version import StrictVersion
from packaging.version import Version
from requests.adapters import HTTPAdapter
from docker.transport.basehttpadapter import BaseHTTPAdapter
@ -17,12 +15,6 @@ except ImportError:
PoolManager = urllib3.poolmanager.PoolManager
# Monkey-patching match_hostname with a version that supports
# IP-address checking. Not necessary for Python 3.5 and above
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
from backports.ssl_match_hostname import match_hostname
urllib3.connection.match_hostname = match_hostname
class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
@ -36,7 +28,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super(SSLHTTPAdapter, self).__init__(**kwargs)
super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@ -59,7 +51,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
@ -70,4 +62,4 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
return False
if urllib_ver == 'dev':
return True
return StrictVersion(urllib_ver) > StrictVersion('1.5')
return Version(urllib_ver) > Version('1.5')

View File

@ -1,7 +1,6 @@
import six
import requests.adapters
import socket
from six.moves import http_client as httplib
import http.client as httplib
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
@ -15,27 +14,15 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPResponse(httplib.HTTPResponse, object):
def __init__(self, sock, *args, **kwargs):
disable_buffering = kwargs.pop('disable_buffering', False)
if six.PY2:
# FIXME: We may need to disable buffering on Py3 as well,
# but there's no clear way to do it at the moment. See:
# https://github.com/docker/docker-py/issues/1799
kwargs['buffering'] = not disable_buffering
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
class UnixHTTPConnection(httplib.HTTPConnection, object):
class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@ -44,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.sock = sock
def putheader(self, header, *values):
super(UnixHTTPConnection, self).putheader(header, *values)
if header == 'Connection' and 'Upgrade' in values:
self.disable_buffering = True
super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
if self.disable_buffering:
kwargs['disable_buffering'] = True
return UnixHTTPResponse(sock, *args, **kwargs)
return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super(UnixHTTPConnectionPool, self).__init__(
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
@ -74,19 +56,22 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout']
'timeout',
'max_pool_size']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS):
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(UnixHTTPAdapter, self).__init__()
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@ -95,7 +80,8 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout
url, self.socket_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool

View File

@ -1,5 +1,7 @@
# flake8: noqa
from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
from .containers import (
ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig

View File

@ -1,7 +1,4 @@
import six
class DictType(dict):
def __init__(self, init):
for k, v in six.iteritems(init):
for k, v in init.items():
self[k] = v

View File

@ -1,5 +1,3 @@
import six
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
@ -10,7 +8,7 @@ from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
@ -61,7 +59,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
super().__init__({
'Type': log_driver_type,
'Config': config
})
@ -97,8 +95,8 @@ class Ulimit(DictType):
Args:
name (str): Which ulimit will this apply to. A list of valid names can
be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_.
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
@ -117,13 +115,13 @@ class Ulimit(DictType):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
@ -154,6 +152,104 @@ class Ulimit(DictType):
self['Hard'] = value
class DeviceRequest(DictType):
"""
Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(self, **kwargs):
driver = kwargs.get('driver', kwargs.get('Driver'))
count = kwargs.get('count', kwargs.get('Count'))
device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
options = kwargs.get('options', kwargs.get('Options'))
if driver is None:
driver = ''
elif not isinstance(driver, str):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
elif not isinstance(count, int):
raise ValueError('DeviceRequest.count must be an integer')
if device_ids is None:
device_ids = []
elif not isinstance(device_ids, list):
raise ValueError('DeviceRequest.device_ids must be a list')
if capabilities is None:
capabilities = []
elif not isinstance(capabilities, list):
raise ValueError('DeviceRequest.capabilities must be a list')
if options is None:
options = {}
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
super().__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
'Capabilities': capabilities,
'Options': options
})
@property
def driver(self):
return self['Driver']
@driver.setter
def driver(self, value):
self['Driver'] = value
@property
def count(self):
return self['Count']
@count.setter
def count(self, value):
self['Count'] = value
@property
def device_ids(self):
return self['DeviceIDs']
@device_ids.setter
def device_ids(self, value):
self['DeviceIDs'] = value
@property
def capabilities(self):
return self['Capabilities']
@capabilities.setter
def capabilities(self, value):
self['Capabilities'] = value
@property
def options(self):
return self['Options']
@options.setter
def options(self, value):
self['Options'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
@ -176,7 +272,8 @@ class HostConfig(dict):
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None):
device_cgroup_rules=None, device_requests=None,
cgroupns=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@ -199,7 +296,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, six.string_types):
if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
@ -236,10 +333,11 @@ class HostConfig(dict):
if dns_search:
self['DnsSearch'] = dns_search
if network_mode:
self['NetworkMode'] = network_mode
elif network_mode is None:
self['NetworkMode'] = 'default'
if network_mode == 'host' and port_bindings:
raise host_config_incompatible_error(
'network_mode', 'host', 'port_bindings'
)
self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
@ -259,7 +357,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
@ -279,11 +377,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in six.iteritems(sysctls):
self['Sysctls'][k] = six.text_type(v)
for k, v in sysctls.items():
self['Sysctls'][k] = str(v)
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
@ -305,7 +403,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
@ -460,7 +558,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, six.string_types):
if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
@ -510,7 +608,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, six.integer_types):
if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
@ -536,6 +634,22 @@ class HostConfig(dict):
)
self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
if cgroupns:
self['CgroupnsMode'] = cgroupns
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@ -553,6 +667,13 @@ def host_config_value_error(param, param_value):
return ValueError(error_msg.format(param, param_value))
def host_config_incompatible_error(param, param_value, incompatible_param):
error_msg = '\"{1}\" {0} is incompatible with {2}'
return errors.InvalidArgument(
error_msg.format(param, param_value, incompatible_param)
)
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
@ -580,17 +701,17 @@ class ContainerConfig(dict):
'version 1.29'
)
if isinstance(command, six.string_types):
if isinstance(command, str):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
@ -601,10 +722,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
@ -633,7 +754,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user is not None else None,
'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,

View File

@ -8,7 +8,7 @@ except ImportError:
from ..errors import DockerException
class CancellableStream(object):
class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
@ -32,7 +32,7 @@ class CancellableStream(object):
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
except socket.error:
except OSError:
raise StopIteration
next = __next__

View File

@ -1,7 +1,5 @@
from .base import DictType
import six
class Healthcheck(DictType):
"""
@ -31,7 +29,7 @@ class Healthcheck(DictType):
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, six.string_types):
if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
@ -39,7 +37,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
super(Healthcheck, self).__init__({
super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
@ -53,7 +51,7 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
if isinstance(value, six.string_types):
if isinstance(value, str):
value = ["CMD-SHELL", value]
self['Test'] = value

View File

@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
ipv6_address=None, link_local_ips=None):
ipv6_address=None, link_local_ips=None, driver_opt=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@ -33,6 +33,15 @@ class EndpointConfig(dict):
if ipam_config:
self['IPAMConfig'] = ipam_config
if driver_opt:
if version_lt(version, '1.32'):
raise errors.InvalidVersion(
'DriverOpts is not supported for API version < 1.32'
)
if not isinstance(driver_opt, dict):
raise TypeError('driver_opt must be a dictionary')
self['DriverOpts'] = driver_opt
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):

View File

@ -1,5 +1,3 @@
import six
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
@ -112,16 +110,21 @@ class ContainerSpec(dict):
containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
cap_add (:py:class:`list`): A list of kernel capabilities to add to the
default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
the default set for the container.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
privileges=None, isolation=None, init=None):
privileges=None, isolation=None, init=None, cap_add=None,
cap_drop=None):
self['Image'] = image
if isinstance(command, six.string_types):
if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
@ -151,7 +154,7 @@ class ContainerSpec(dict):
if mounts is not None:
parsed_mounts = []
for mount in mounts:
if isinstance(mount, six.string_types):
if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
@ -188,6 +191,18 @@ class ContainerSpec(dict):
if init is not None:
self['Init'] = init
if cap_add is not None:
if not isinstance(cap_add, list):
raise TypeError('cap_add must be a list')
self['CapabilityAdd'] = cap_add
if cap_drop is not None:
if not isinstance(cap_drop, list):
raise TypeError('cap_drop must be a list')
self['CapabilityDrop'] = cap_drop
class Mount(dict):
"""
@ -224,7 +239,7 @@ class Mount(dict):
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
'Unsupported mount type: "{}"'.format(type)
f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
@ -260,7 +275,7 @@ class Mount(dict):
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
if not isinstance(tmpfs_mode, six.integer_types):
if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
@ -280,7 +295,7 @@ class Mount(dict):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
'Invalid mount format "{0}"'.format(string)
f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
@ -347,7 +362,7 @@ def _convert_generic_resources_dict(generic_resources):
' (found {})'.format(type(generic_resources))
)
resources = []
for kind, value in six.iteritems(generic_resources):
for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
@ -421,7 +436,8 @@ class UpdateConfig(dict):
class RollbackConfig(UpdateConfig):
"""
Used to specify the way containe rollbacks should be performed by a service
Used to specify the way container rollbacks should be performed by a
service
Args:
parallelism (int): Maximum number of tasks to be rolled back in one
@ -443,7 +459,7 @@ class RollbackConfig(UpdateConfig):
pass
class RestartConditionTypesEnum(object):
class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
@ -474,7 +490,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
'Invalid RestartPolicy condition {0}'.format(condition)
f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
@ -533,7 +549,7 @@ def convert_service_ports(ports):
)
result = []
for k, v in six.iteritems(ports):
for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
@ -659,10 +675,12 @@ class Placement(dict):
are provided in order from highest to lowest precedence and
are expressed as ``(strategy, descriptor)`` tuples. See
:py:class:`PlacementPreference` for details.
maxreplicas (int): Maximum number of replicas per node
platforms (:py:class:`list` of tuple): A list of platforms
expressed as ``(arch, os)`` tuples
"""
def __init__(self, constraints=None, preferences=None, platforms=None):
def __init__(self, constraints=None, preferences=None, platforms=None,
maxreplicas=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
@ -671,6 +689,8 @@ class Placement(dict):
if isinstance(pref, tuple):
pref = PlacementPreference(*pref)
self['Preferences'].append(pref)
if maxreplicas is not None:
self['MaxReplicas'] = maxreplicas
if platforms:
self['Platforms'] = []
for plat in platforms:

View File

@ -4,8 +4,6 @@ import re
import tarfile
import tempfile
import six
from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
extra_names = set(e[0] for e in extra_files)
extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
except IOError:
raise IOError(
'Can not read file in context: {}'.format(full_path)
except OSError:
raise OSError(
f'Can not read file in context: {full_path}'
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
@ -105,8 +103,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
for name, contents in extra_files:
info = tarfile.TarInfo(name)
info.size = len(contents)
t.addfile(info, io.BytesIO(contents.encode('utf-8')))
contents_encoded = contents.encode('utf-8')
info.size = len(contents_encoded)
t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
@ -118,12 +117,8 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
@ -153,7 +148,7 @@ def walk(root, patterns, default=True):
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher(object):
class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
@ -211,13 +206,12 @@ class PatternMatcher(object):
break
if skip:
continue
for sub in rec_walk(cur):
yield sub
yield from rec_walk(cur)
return rec_walk(root)
class Pattern(object):
class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
@ -230,6 +224,9 @@ class Pattern(object):
@classmethod
def normalize(cls, p):
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole

View File

@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug("Trying paths: {0}".format(repr(paths)))
log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
log.debug("Found file at path: {0}".format(path))
log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try:
with open(config_file) as f:
return json.load(f)
except (IOError, ValueError) as e:
except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)

View File

@ -27,7 +27,7 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
'{0} is not available for version < {1}'.format(
'{} is not available for version < {}'.format(
f.__name__, version
)
)

View File

@ -108,7 +108,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)

View File

@ -1,11 +1,6 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import json.decoder
import six
from ..errors import StreamParseError
@ -20,7 +15,7 @@ def stream_as_text(stream):
instead of byte streams.
"""
for data in stream:
if not isinstance(data, six.text_type):
if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
@ -46,8 +41,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator=u'\n'):
index = buffer.find(six.text_type(separator))
def line_splitter(buffer, separator='\n'):
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type('')
buffered = ''
for data in stream_as_text(stream):
buffered += data

View File

@ -3,7 +3,7 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
r"((?P<host>[a-fA-F\d.:]+):)?" # Address
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end:
return [start + proto]
if randomly_available_port:
return ['{}-{}'.format(start, end) + proto]
return [f'{start}-{end}' + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]

View File

@ -4,8 +4,6 @@ import select
import socket as pysocket
import struct
import six
try:
from ..transport import NpipeSocket
except ImportError:
@ -27,16 +25,16 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if six.PY3 and not isinstance(socket, NpipeSocket):
if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
except OSError as e:
if e.errno not in recoverable_errors:
raise
@ -46,7 +44,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = six.binary_type()
data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
@ -134,7 +132,7 @@ def consume_socket_output(frames, demux=False):
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return six.binary_type().join(frames)
return bytes().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
@ -166,4 +164,4 @@ def demux_adaptor(stream_id, data):
elif stream_id == STDERR:
return (None, data)
else:
raise ValueError('{0} is not a valid stream'.format(stream_id))
raise ValueError(f'{stream_id} is not a valid stream')

View File

@ -1,26 +1,27 @@
import base64
import collections
import json
import os
import os.path
import shlex
import string
from datetime import datetime
from distutils.version import StrictVersion
import six
from packaging.version import Version
from .. import errors
from .. import tls
from ..constants import DEFAULT_HTTP_HOST
from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
from ..tls import TLSConfig
if six.PY2:
from urllib import splitnport
from urlparse import urlparse
else:
from urllib.parse import splitnport, urlparse
from urllib.parse import urlparse, urlunparse
URLComponents = collections.namedtuple(
'URLComponents',
'scheme netloc url params query fragment',
)
def create_ipam_pool(*args, **kwargs):
@ -39,8 +40,7 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8')
data = data.decode('utf-8')
return json.loads(data)
@ -56,8 +56,8 @@ def compare_version(v1, v2):
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
s1 = Version(v1)
s2 = Version(v2)
if s1 == s2:
return 0
elif s1 > s2:
@ -80,7 +80,7 @@ def _convert_port_binding(binding):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], six.string_types):
elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
@ -104,7 +104,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings):
result = {}
for k, v in six.iteritems(port_bindings):
for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
@ -121,7 +121,7 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
if isinstance(k, six.binary_type):
if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
@ -132,7 +132,7 @@ def convert_volume_binds(binds):
)
bind = v['bind']
if isinstance(bind, six.binary_type):
if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
@ -143,13 +143,13 @@ def convert_volume_binds(binds):
mode = 'rw'
result.append(
six.text_type('{0}:{1}:{2}').format(k, bind, mode)
f'{k}:{bind}:{mode}'
)
else:
if isinstance(v, six.binary_type):
if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
six.text_type('{0}:{1}:rw').format(k, v)
f'{k}:{v}:rw'
)
return result
@ -166,7 +166,7 @@ def convert_tmpfs_mounts(tmpfs):
result = {}
for mount in tmpfs:
if isinstance(mount, six.string_types):
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
@ -191,7 +191,7 @@ def convert_service_networks(networks):
result = []
for n in networks:
if isinstance(n, six.string_types):
if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
@ -208,10 +208,6 @@ def parse_repository_tag(repo_name):
def parse_host(addr, is_win32=False, tls=False):
path = ''
port = None
host = None
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
@ -240,14 +236,14 @@ def parse_host(addr, is_win32=False, tls=False):
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
"Invalid bind address protocol: {}".format(addr)
f"Invalid bind address protocol: {addr}"
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
f'Invalid bind address format: {addr}'
)
if any([
@ -255,7 +251,7 @@ def parse_host(addr, is_win32=False, tls=False):
parsed_url.password
]):
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
f'Invalid bind address format: {addr}'
)
if parsed_url.path and proto == 'ssh':
@ -270,20 +266,20 @@ def parse_host(addr, is_win32=False, tls=False):
# to be valid and equivalent to unix:///path
path = '/'.join((parsed_url.hostname, path))
netloc = parsed_url.netloc
if proto in ('tcp', 'ssh'):
# parsed_url.hostname strips brackets from IPv6 addresses,
# which can be problematic hence our use of splitnport() instead.
host, port = splitnport(parsed_url.netloc)
if port is None or port < 0:
port = parsed_url.port or 0
if port <= 0:
if proto != 'ssh':
raise errors.DockerException(
'Invalid bind address format: port is required:'
' {}'.format(addr)
)
port = 22
netloc = f'{parsed_url.netloc}:{port}'
if not host:
host = DEFAULT_HTTP_HOST
if not parsed_url.hostname:
netloc = f'{DEFAULT_HTTP_HOST}:{port}'
# Rewrite schemes to fit library internals (requests adapters)
if proto == 'tcp':
@ -292,8 +288,16 @@ def parse_host(addr, is_win32=False, tls=False):
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
return "{}://{}".format(proto, path).rstrip('/')
return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
return f"{proto}://{path}".rstrip('/')
return urlunparse(URLComponents(
scheme=proto,
netloc=netloc,
url=path,
params='',
query='',
fragment='',
)).rstrip('/')
def parse_devices(devices):
@ -302,9 +306,9 @@ def parse_devices(devices):
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, six.string_types):
if not isinstance(device, str):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
f'Invalid device type {type(device)}'
)
device_mapping = device.split(':')
if device_mapping:
@ -358,7 +362,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
params['tls'] = TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
@ -372,13 +376,13 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
def convert_filters(filters):
result = {}
for k, v in six.iteritems(filters):
for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
str(item) if not isinstance(item, six.string_types) else item
str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
@ -391,7 +395,7 @@ def datetime_to_timestamp(dt):
def parse_bytes(s):
if isinstance(s, six.integer_types + (float,)):
if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
@ -412,10 +416,10 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit():
try:
digits = int(digits_part)
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
'Failed converting the string value for memory ({}) to'
' an integer.'.format(digits_part)
)
@ -423,7 +427,7 @@ def parse_bytes(s):
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
'The specified value for memory ({}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
@ -433,9 +437,9 @@ def parse_bytes(s):
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
links = iter(links.items())
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
return [f'{k}:{v}' if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
@ -445,7 +449,7 @@ def parse_env_file(env_file):
"""
environment = {}
with open(env_file, 'r') as f:
with open(env_file) as f:
for line in f:
if line[0] == '#':
@ -461,15 +465,13 @@ def parse_env_file(env_file):
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
'Invalid line in environment file {}:\n{}'.format(
env_file, line))
return environment
def split_command(command):
if six.PY2 and not isinstance(command, six.binary_type):
command = command.encode('utf-8')
return shlex.split(command)
@ -477,22 +479,22 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, six.binary_type):
if isinstance(value, bytes):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in six.iteritems(environment)]
return f'{key}={value}'
return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
'{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
]
return [
'{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
]

View File

@ -1,2 +1,2 @@
version = "4.3.0-dev"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
version = "6.0.0-dev"
version_info = tuple(int(d) for d in version.split("-")[0].split("."))

View File

@ -1,2 +1,2 @@
recommonmark==0.4.0
Sphinx==1.4.6
myst-parser==0.18.0
Sphinx==5.1.1

View File

@ -1,3 +1,8 @@
dl.hide-signature > dt {
display: none;
}
dl.field-list > dt {
/* prevent code blocks from forcing wrapping on the "Parameters" header */
word-break: initial;
}

View File

@ -1,6 +1,151 @@
Change log
==========
5.0.3
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/76?closed=1)
### Features
- Add `cap_add` and `cap_drop` parameters to service create and ContainerSpec
- Add `templating` parameter to config create
### Bugfixes
- Fix getting a read timeout for logs/attach with a tty and slow output
### Miscellaneous
- Fix documentation examples
5.0.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/75?closed=1)
### Bugfixes
- Fix `disable_buffering` regression
5.0.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/74?closed=1)
### Bugfixes
- Bring back support for ssh identity file
- Cleanup remaining python-2 dependencies
- Fix image save example in docs
### Miscellaneous
- Bump urllib3 to 1.26.5
- Bump requests to 2.26.0
5.0.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/70?closed=1)
### Breaking changes
- Remove support for Python 2.7
- Make Python 3.6 the minimum version supported
### Features
- Add `limit` parameter to image search endpoint
### Bugfixes
- Fix `KeyError` exception on secret create
- Verify TLS keys loaded from docker contexts
- Update PORT_SPEC regex to allow square brackets for IPv6 addresses
- Fix containers and images documentation examples
4.4.4
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/73?closed=1)
### Bugfixes
- Remove `LD_LIBRARY_PATH` and `SSL_CERT_FILE` environment variables when shelling out to the ssh client
4.4.3
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/72?closed=1)
### Features
- Add support for docker.types.Placement.MaxReplicas
### Bugfixes
- Fix SSH port parsing when shelling out to the ssh client
4.4.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/71?closed=1)
### Bugfixes
- Fix SSH connection bug where the hostname was incorrectly trimmed and the error was hidden
- Fix docs example
### Miscellaneous
- Add Python3.8 and 3.9 in setup.py classifier list
4.4.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/69?closed=1)
### Bugfixes
- Avoid setting unsuported parameter for subprocess.Popen on Windows
- Replace use of deprecated "filter" argument on ""docker/api/image"
4.4.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/67?closed=1)
### Features
- Add an alternative SSH connection to the paramiko one, based on shelling out to the SSh client. Similar to the behaviour of Docker cli
- Default image tag to `latest` on `pull`
### Bugfixes
- Fix plugin model upgrade
- Fix examples URL in ulimits
### Miscellaneous
- Improve exception messages for server and client errors
- Bump cryptography from 2.3 to 3.2
4.3.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/68?closed=1)
### Miscellaneous
- Set default API version to `auto`
- Fix conversion to bytes for `float`
- Support OpenSSH `identityfile` option
4.3.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/64?closed=1)
### Features
- Add `DeviceRequest` type to expose host resources such as GPUs
- Add support for `DriverOpts` in EndpointConfig
- Disable compression by default when using container.get_archive method
### Miscellaneous
- Update default API version to v1.39
- Update test engine version to 19.03.12
4.2.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/66?closed=1)
### Bugfixes
- Fix context load for non-docker endpoints
4.2.1
-----
@ -47,7 +192,6 @@ Change log
- Adjust `--platform` tests for changes in docker engine
- Update credentials-helpers to v0.6.3
4.0.2
-----
@ -61,7 +205,6 @@ Change log
- Bumped version of websocket-client
4.0.1
-----
@ -120,7 +263,7 @@ Change log
### Bugfixes
* Fix base_url to keep TCP protocol on utils.py by letting the responsability of changing the
* Fix base_url to keep TCP protocol on utils.py by letting the responsibility of changing the
protocol to `parse_host` afterwards, letting `base_url` with the original value.
* XFAIL test_attach_stream_and_cancel on TLS
@ -1224,7 +1367,7 @@ like the others
(`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`,
`Client.remove_volume`).
* Added support for the `group_add` parameter in `create_host_config`.
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameteres
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameters
in `create_host_config`.
* Added support for the archive API endpoint (`Client.get_archive`,
`Client.put_archive`).

View File

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
#
# docker-sdk-python documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 14 15:48:58 2016.
@ -34,24 +33,19 @@ sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'myst_parser'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.md'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
@ -60,28 +54,28 @@ source_suffix = ['.rst', '.md']
master_doc = 'index'
# General information about the project.
project = u'Docker SDK for Python'
project = 'Docker SDK for Python'
year = datetime.datetime.now().year
copyright = u'%d Docker Inc' % year
author = u'Docker Inc'
copyright = '%d Docker Inc' % year
author = 'Docker Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
with open('../docker/version.py', 'r') as vfile:
with open('../docker/version.py') as vfile:
exec(vfile.read())
# The full version, including alpha/beta/rc tags.
release = version
# The short X.Y version.
version = '{}.{}'.format(version_info[0], version_info[1])
version = f'{version_info[0]}.{version_info[1]}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@ -283,8 +277,8 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'docker-sdk-python.tex', u'docker-sdk-python Documentation',
u'Docker Inc.', 'manual'),
(master_doc, 'docker-sdk-python.tex', 'docker-sdk-python Documentation',
'Docker Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@ -325,7 +319,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
[author], 1)
]
@ -340,7 +334,7 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
author, 'docker-sdk-python', 'One line description of project.',
'Miscellaneous'),
]

View File

@ -58,7 +58,7 @@ You can stream logs:
.. code-block:: python
>>> for line in container.logs(stream=True):
... print line.strip()
... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...

View File

@ -15,7 +15,7 @@ For example, to check the server against a specific CA certificate:
.. code-block:: python
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem', verify=True)
client = docker.DockerClient(base_url='<https_url>', tls=tls_config)
This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``.

View File

@ -1,19 +1,6 @@
appdirs==1.4.3
asn1crypto==0.22.0
backports.ssl-match-hostname==3.5.0.1
cffi==1.10.0
cryptography==2.3
enum34==1.1.6
idna==2.5
ipaddress==1.0.18
packaging==16.8
paramiko==2.4.2
pycparser==2.17
pyOpenSSL==18.0.0
pyparsing==2.2.0
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
requests==2.20.0
six==1.10.0
urllib3==1.24.3
websocket-client==0.56.0
packaging==21.3
paramiko==2.11.0
pywin32==304; sys_platform == 'win32'
requests==2.28.1
urllib3==1.26.11
websocket-client==1.3.3

View File

@ -52,8 +52,8 @@ class Version(namedtuple('_Version', 'major minor patch stage edition')):
return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self):
stage = '-{}'.format(self.stage) if self.stage else ''
edition = '-{}'.format(self.edition) if self.edition else ''
stage = f'-{self.stage}' if self.stage else ''
edition = f'-{self.edition}' if self.edition else ''
return '.'.join(map(str, self[:3])) + edition + stage

View File

@ -1,6 +1,3 @@
[bdist_wheel]
universal = 1
[metadata]
description_file = README.rst
license = Apache License 2.0

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
from __future__ import print_function
import codecs
import os
@ -11,37 +10,23 @@ ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'six >= 1.4.0',
'packaging >= 14.0',
'requests >= 2.26.0',
'urllib3 >= 1.26.0',
'websocket-client >= 0.32.0',
'requests >= 2.14.2, != 2.18.0',
]
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
# ssl_match_hostname to verify hosts match with certificates via
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
# win32 APIs if on Windows (required for npipe support)
# Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
# on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223',
':sys_platform == "win32"': 'pywin32>=304',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
# TODO: if pip installing both "requests" and "requests[security]", the
# extra package from the "security" option are not installed (see
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
# This is now a no-op, as similarly the requests[security] extra is
# a no-op as of requests 2.26.0, this is always available/by default now
# see https://github.com/psf/requests/pull/5867
'tls': [],
# Only required when connecting using the ssh:// protocol
'ssh': ['paramiko>=2.4.2'],
'ssh': ['paramiko>=2.4.3'],
}
version = None
@ -72,7 +57,7 @@ setup(
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
python_requires='>=3.7',
zip_safe=False,
test_suite='tests',
classifiers=[
@ -81,16 +66,15 @@ setup(
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Joffrey F',
maintainer_email='joffrey@docker.com',
maintainer='Ulysses Souza',
maintainer_email='ulysses.souza@docker.com',
)

View File

@ -1,7 +1,6 @@
setuptools==44.0.0 # last version with python 2.7 support
coverage==4.5.2
flake8==3.6.0
mock==1.0.1
pytest==4.3.1
pytest-cov==2.6.1
pytest-timeout==1.3.3
setuptools==63.2.0
coverage==6.4.2
flake8==4.0.1
pytest==7.1.2
pytest-cov==3.0.0
pytest-timeout==2.1.0

View File

@ -1,15 +1,20 @@
ARG PYTHON_VERSION=3.7
ARG PYTHON_VERSION=3.10
FROM python:${PYTHON_VERSION}
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
RUN apt-get update && apt-get -y install \
RUN apt-get update && apt-get -y install --no-install-recommends \
gnupg2 \
pass \
curl
pass
# Add SSH keys and set permissions
COPY tests/ssh/config/client /root/.ssh
COPY tests/ssh/config/server/known_ed25519.pub /root/.ssh/known_hosts
RUN sed -i '1s;^;dpy-dind-ssh ;' /root/.ssh/known_hosts
RUN chmod -R 600 /root/.ssh
COPY ./tests/gpg-keys /gpg-keys
RUN gpg2 --import gpg-keys/secret

View File

@ -1,4 +1,4 @@
ARG PYTHON_VERSION=2.7
ARG PYTHON_VERSION=3.10
FROM python:${PYTHON_VERSION}
RUN mkdir /tmp/certs

18
tests/Dockerfile-ssh-dind Normal file
View File

@ -0,0 +1,18 @@
ARG API_VERSION=1.41
ARG ENGINE_VERSION=20.10
FROM docker:${ENGINE_VERSION}-dind
RUN apk add --no-cache --upgrade \
openssh
COPY tests/ssh/config/server /etc/ssh/
RUN chmod -R 600 /etc/ssh
# set authorized keys for client paswordless connection
COPY tests/ssh/config/client/id_rsa.pub /root/.ssh/authorized_keys
RUN chmod -R 600 /root/.ssh
# RUN echo "root:root" | chpasswd
RUN ln -s /usr/local/bin/docker /usr/bin/docker
EXPOSE 22

View File

@ -11,7 +11,6 @@ import time
import docker
import paramiko
import pytest
import six
def make_tree(dirs, files):
@ -54,7 +53,7 @@ def requires_api_version(version):
return pytest.mark.skipif(
docker.utils.version_lt(test_version, version),
reason="API version is too low (< {0})".format(version)
reason=f"API version is too low (< {version})"
)
@ -86,7 +85,7 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
def random_name():
return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
return f'dockerpytest_{random.getrandbits(64):x}'
def force_leave_swarm(client):
@ -105,11 +104,11 @@ def force_leave_swarm(client):
def swarm_listen_addr():
return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
return f'0.0.0.0:{random.randrange(10000, 25000)}'
def assert_cat_socket_detached_with_keys(sock, inputs):
if six.PY3 and hasattr(sock, '_sock'):
if hasattr(sock, '_sock'):
sock = sock._sock
for i in inputs:
@ -128,7 +127,7 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
# of the daemon no longer cause this to raise an error.
try:
sock.sendall(b'make sure the socket is closed\n')
except socket.error:
except OSError:
return
sock.sendall(b"make sure the socket is closed\n")

View File

@ -7,7 +7,6 @@ from docker import errors
from docker.utils.proxy import ProxyConfig
import pytest
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
from ..helpers import random_name, requires_api_version, requires_experimental
@ -71,9 +70,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert len(logs) > 0
def test_build_from_stringio(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
return
script = io.StringIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
@ -83,8 +81,7 @@ class BuildTest(BaseAPIIntegrationTest):
stream = self.client.build(fileobj=script)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
chunk = chunk.decode('utf-8')
logs += chunk
assert logs != ''
@ -103,7 +100,9 @@ class BuildTest(BaseAPIIntegrationTest):
'ignored',
'Dockerfile',
'.dockerignore',
' ignored-with-spaces ', # check that spaces are trimmed
'!ignored/subdir/excepted-file',
'! ignored/subdir/excepted-with-spaces '
'', # empty line,
'#*', # comment line
]))
@ -114,6 +113,9 @@ class BuildTest(BaseAPIIntegrationTest):
with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
f.write('this file should not be ignored')
with open(os.path.join(base_dir, 'ignored-with-spaces'), 'w') as f:
f.write("this file should be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
@ -122,6 +124,9 @@ class BuildTest(BaseAPIIntegrationTest):
with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
f.write("this file should not be ignored")
with open(os.path.join(subdir, 'excepted-with-spaces'), 'w') as f:
f.write("this file should not be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
@ -135,11 +140,11 @@ class BuildTest(BaseAPIIntegrationTest):
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
logs = logs.decode('utf-8')
assert sorted(list(filter(None, logs.split('\n')))) == sorted([
'/test/#file.txt',
'/test/ignored/subdir/excepted-with-spaces',
'/test/ignored/subdir/excepted-file',
'/test/not-ignored'
])
@ -339,10 +344,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert self.client.inspect_image(img_name)
ctnr = self.run_container(img_name, 'cat /hosts-file')
self.tmp_containers.append(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
logs = logs.decode('utf-8')
assert '127.0.0.1\textrahost.local.test' in logs
assert '127.0.0.1\thello.world.test' in logs
@ -377,7 +380,7 @@ class BuildTest(BaseAPIIntegrationTest):
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
]))
stream = self.client.build(
@ -441,7 +444,7 @@ class BuildTest(BaseAPIIntegrationTest):
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_build_invalid_platform(self):
script = io.BytesIO('FROM busybox\n'.encode('ascii'))
script = io.BytesIO(b'FROM busybox\n')
with pytest.raises(errors.APIError) as excinfo:
stream = self.client.build(fileobj=script, platform='foobar')

View File

@ -72,6 +72,6 @@ class UnixconnTest(unittest.TestCase):
client.close()
del client
assert len(w) == 0, "No warnings produced: {0}".format(
assert len(w) == 0, "No warnings produced: {}".format(
w[0].message
)

View File

@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
import docker
import pytest
@ -31,7 +29,7 @@ class ConfigAPITest(BaseAPIIntegrationTest):
def test_create_config_unicode_data(self):
config_id = self.client.create_config(
'favorite_character', u'いざよいさくや'
'favorite_character', 'いざよいさくや'
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
@ -70,3 +68,16 @@ class ConfigAPITest(BaseAPIIntegrationTest):
data = self.client.configs(filters={'name': ['favorite_character']})
assert len(data) == 1
assert data[0]['ID'] == config_id['ID']
@requires_api_version('1.37')
def test_create_config_with_templating(self):
config_id = self.client.create_config(
'favorite_character', 'sakuya izayoi',
templating={'name': 'golang'}
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
data = self.client.inspect_config(config_id)
assert data['Spec']['Name'] == 'favorite_character'
assert 'Templating' in data['Spec']
assert data['Spec']['Templating']['Name'] == 'golang'

View File

@ -7,7 +7,6 @@ from datetime import datetime
import pytest
import requests
import six
import docker
from .. import helpers
@ -35,7 +34,7 @@ class ListContainersTest(BaseAPIIntegrationTest):
assert len(retrieved) == 1
retrieved = retrieved[0]
assert 'Command' in retrieved
assert retrieved['Command'] == six.text_type('true')
assert retrieved['Command'] == 'true'
assert 'Image' in retrieved
assert re.search(r'alpine:.*', retrieved['Image'])
assert 'Status' in retrieved
@ -104,13 +103,11 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container3_id)
assert self.client.wait(container3_id)['StatusCode'] == 0
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
assert '{0}_NAME='.format(link_env_prefix1) in logs
assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
assert '{0}_NAME='.format(link_env_prefix2) in logs
assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
logs = self.client.logs(container3_id).decode('utf-8')
assert f'{link_env_prefix1}_NAME=' in logs
assert f'{link_env_prefix1}_ENV_FOO=1' in logs
assert f'{link_env_prefix2}_NAME=' in logs
assert f'{link_env_prefix2}_ENV_FOO=1' in logs
def test_create_with_restart_policy(self):
container = self.client.create_container(
@ -227,9 +224,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
assert '1001' in groups
@ -244,9 +239,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
@ -279,7 +272,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
expected_msgs = [
"logger: no log driver named 'asdf' is registered",
"looking up logging plugin asdf: plugin \"asdf\" not found",
"error looking up logging plugin asdf: plugin \"asdf\" not found",
]
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500
@ -467,16 +460,13 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_device_cgroup_rules(self):
rule = 'c 7:128 rwm'
ctnr = self.client.create_container(
TEST_IMG, 'cat /sys/fs/cgroup/devices/devices.list',
host_config=self.client.create_host_config(
TEST_IMG, 'true', host_config=self.client.create_host_config(
device_cgroup_rules=[rule]
)
)
self.tmp_containers.append(ctnr)
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['DeviceCgroupRules'] == [rule]
self.client.start(ctnr)
assert rule in self.client.logs(ctnr).decode('utf-8')
def test_create_with_uts_mode(self):
container = self.client.create_container(
@ -494,7 +484,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
super(VolumeBindTest, self).setUp()
super().setUp()
self.mount_dest = '/mnt'
@ -515,10 +505,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
TEST_IMG,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@ -534,10 +521,8 @@ class VolumeBindTest(BaseAPIIntegrationTest):
TEST_IMG,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
logs = self.client.logs(container).decode('utf-8')
if six.PY3:
logs = logs.decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
@ -554,9 +539,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
host_config=host_config
)
assert container
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@ -573,9 +556,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
host_config=host_config
)
assert container
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
@ -634,7 +615,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
TEST_IMG, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
TEST_IMG, f'sh -c "echo {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@ -645,15 +626,14 @@ class ArchiveTest(BaseAPIIntegrationTest):
for d in strm:
destination.write(d)
destination.seek(0)
retrieved_data = helpers.untar_file(destination, 'data.txt')
if six.PY3:
retrieved_data = retrieved_data.decode('utf-8')
retrieved_data = helpers.untar_file(destination, 'data.txt')\
.decode('utf-8')
assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
TEST_IMG, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
TEST_IMG, f'sh -c "echo -n {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@ -672,7 +652,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
test_file.seek(0)
ctnr = self.client.create_container(
TEST_IMG,
'cat {0}'.format(
'cat {}'.format(
os.path.join('/vol1/', os.path.basename(test_file.name))
),
volumes=['/vol1']
@ -683,9 +663,6 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
data = data.decode('utf-8')
assert logs.strip() == data
def test_copy_directory_to_container(self):
@ -700,9 +677,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
logs = self.client.logs(ctnr).decode('utf-8')
results = logs.strip().split()
assert 'a.py' in results
assert 'b.py' in results
@ -723,7 +698,7 @@ class RenameContainerTest(BaseAPIIntegrationTest):
if version == '1.5.0':
assert name == inspect['Name']
else:
assert '/{0}'.format(name) == inspect['Name']
assert f'/{name}' == inspect['Name']
class StartContainerTest(BaseAPIIntegrationTest):
@ -829,7 +804,7 @@ class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, 'echo {0}'.format(snippet)
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@ -843,7 +818,7 @@ class LogsTest(BaseAPIIntegrationTest):
snippet = '''Line1
Line2'''
container = self.client.create_container(
TEST_IMG, 'echo "{0}"'.format(snippet)
TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@ -856,12 +831,12 @@ Line2'''
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, 'echo {0}'.format(snippet)
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
logs = six.binary_type()
logs = b''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
@ -876,12 +851,12 @@ Line2'''
def test_logs_streaming_and_follow_and_cancel(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
TEST_IMG, f'sh -c "echo \\"{snippet}\\" && sleep 3"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
logs = six.binary_type()
logs = b''
generator = self.client.logs(id, stream=True, follow=True)
threading.Timer(1, generator.close).start()
@ -894,7 +869,7 @@ Line2'''
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, 'echo {0}'.format(snippet)
TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@ -907,7 +882,7 @@ Line2'''
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
TEST_IMG, 'echo "{0}"'.format(snippet)
TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@ -921,7 +896,7 @@ Line2'''
def test_logs_with_until(self):
snippet = 'Shanghai Teahouse (Hong Meiling)'
container = self.client.create_container(
TEST_IMG, 'echo "{0}"'.format(snippet)
TEST_IMG, f'echo "{snippet}"'
)
self.tmp_containers.append(container)
@ -1117,7 +1092,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.top(container)
if not IS_WINDOWS_PLATFORM:
assert res['Titles'] == [u'PID', u'USER', u'TIME', u'COMMAND']
assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND']
assert len(res['Processes']) == 1
assert res['Processes'][0][-1] == 'sleep 60'
self.client.kill(container)
@ -1135,7 +1110,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.top(container, '-eopid,user')
assert res['Titles'] == [u'PID', u'USER']
assert res['Titles'] == ['PID', 'USER']
assert len(res['Processes']) == 1
assert res['Processes'][0][10] == 'sleep 60'
@ -1222,10 +1197,10 @@ class AttachContainerTest(BaseAPIIntegrationTest):
sock = self.client.attach_socket(container, ws=False)
assert sock.fileno() > -1
def test_run_container_reading_socket(self):
def test_run_container_reading_socket_http(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = "printf '{0}'".format(line)
command = f"printf '{line}'"
container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
@ -1242,12 +1217,33 @@ class AttachContainerTest(BaseAPIIntegrationTest):
data = read_exactly(pty_stdout, next_size)
assert data.decode('utf-8') == line
@pytest.mark.xfail(condition=bool(os.environ.get('DOCKER_CERT_PATH', '')),
reason='DOCKER_CERT_PATH not respected for websockets')
def test_run_container_reading_socket_ws(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = f"printf '{line}'"
container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
opts = {"stdout": 1, "stream": 1, "logs": 1}
pty_stdout = self.client.attach_socket(container, opts, ws=True)
self.addCleanup(pty_stdout.close)
self.client.start(container)
data = pty_stdout.recv()
assert data.decode('utf-8') == line
@pytest.mark.timeout(10)
def test_attach_no_stream(self):
container = self.client.create_container(
TEST_IMG, 'echo hello'
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container, condition='not-running')
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
@ -1509,7 +1505,7 @@ class LinkTest(BaseAPIIntegrationTest):
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = '%s/%s' % (linked_name, link_alias)
link_name = f'{linked_name}/{link_alias}'
self.client.remove_container(link_name, link=True)
# Link is gone

View File

@ -239,7 +239,7 @@ class ExecDemuxTest(BaseAPIIntegrationTest):
)
def setUp(self):
super(ExecDemuxTest, self).setUp()
super().setUp()
self.container = self.client.create_container(
TEST_IMG, 'cat', detach=True, stdin_open=True
)

View File

@ -7,9 +7,8 @@ import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from http.server import SimpleHTTPRequestHandler
import socketserver
import docker
@ -33,7 +32,7 @@ class ListImagesTest(BaseAPIIntegrationTest):
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
assert type(res1[0]) == six.text_type
assert type(res1[0]) == str
class PullImageTest(BaseAPIIntegrationTest):
@ -42,9 +41,9 @@ class PullImageTest(BaseAPIIntegrationTest):
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world', tag='latest')
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
assert type(res) == six.text_type
assert type(res) == str
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@ -55,7 +54,7 @@ class PullImageTest(BaseAPIIntegrationTest):
except docker.errors.APIError:
pass
stream = self.client.pull(
'hello-world', tag='latest', stream=True, decode=True)
'hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
@ -266,14 +265,14 @@ class ImportImageTest(BaseAPIIntegrationTest):
output = self.client.load_image(data)
assert any([
line for line in output
if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
if f'Loaded image: {test_img}' in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
@ -282,10 +281,10 @@ class ImportImageTest(BaseAPIIntegrationTest):
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.daemon = True
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
yield f'http://{socket.gethostname()}:{server.server_address[1]}'
server.shutdown()
@ -351,7 +350,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
result = self.client.load_image(f.read())
success = False
result_line = 'Loaded image: {}\n'.format(TEST_IMG)
result_line = f'Loaded image: {TEST_IMG}\n'
for data in result:
print(data)
if 'stream' in data:

View File

@ -9,7 +9,7 @@ from .base import BaseAPIIntegrationTest, TEST_IMG
class TestNetworks(BaseAPIIntegrationTest):
def tearDown(self):
self.client.leave_swarm(force=True)
super(TestNetworks, self).tearDown()
super().tearDown()
def create_network(self, *args, **kwargs):
net_name = random_name()
@ -275,6 +275,27 @@ class TestNetworks(BaseAPIIntegrationTest):
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
@requires_api_version('1.32')
def test_create_with_driveropt(self):
container = self.client.create_container(
TEST_IMG, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
driver_opt={'com.docker-py.setting': 'on'}
)
}
),
host_config=self.client.create_host_config(network_mode='bridge')
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
net_cfg = container_data['NetworkSettings']['Networks']['bridge']
assert 'DriverOpts' in net_cfg
assert 'com.docker-py.setting' in net_cfg['DriverOpts']
assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on'
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()

View File

@ -22,13 +22,13 @@ class PluginTest(BaseAPIIntegrationTest):
def teardown_method(self, method):
client = self.get_client_instance()
try:
client.disable_plugin(SSHFS)
client.disable_plugin(SSHFS, True)
except docker.errors.APIError:
pass
for p in self.tmp_plugins:
try:
client.remove_plugin(p, force=True)
client.remove_plugin(p)
except docker.errors.APIError:
pass

View File

@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
import docker
import pytest
@ -31,7 +29,7 @@ class SecretAPITest(BaseAPIIntegrationTest):
def test_create_secret_unicode_data(self):
secret_id = self.client.create_secret(
'favorite_character', u'いざよいさくや'
'favorite_character', 'いざよいさくや'
)
self.tmp_secrets.append(secret_id)
assert 'ID' in secret_id

View File

@ -1,11 +1,8 @@
# -*- coding: utf-8 -*-
import random
import time
import docker
import pytest
import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
@ -31,10 +28,10 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
super(ServiceTest, self).tearDown()
super().tearDown()
def get_service_name(self):
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
return f'dockerpytest_{random.getrandbits(64):x}'
def get_service_container(self, service_name, attempts=20, interval=0.5,
include_stopped=False):
@ -55,7 +52,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def create_simple_service(self, name=None, labels=None):
if name:
name = 'dockerpytest_{0}'.format(name)
name = f'dockerpytest_{name}'
else:
name = self.get_service_name()
@ -150,7 +147,7 @@ class ServiceTest(BaseAPIIntegrationTest):
else:
break
if six.PY3:
if log_line is not None:
log_line = log_line.decode('utf-8')
assert 'hello\n' in log_line
@ -404,20 +401,20 @@ class ServiceTest(BaseAPIIntegrationTest):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=['node.id=={}'.format(node_id)]
container_spec, placement=[f'node.id=={node_id}']
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
{'Constraints': ['node.id=={}'.format(node_id)]})
{'Constraints': [f'node.id=={node_id}']})
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(
constraints=['node.id=={}'.format(node_id)]
constraints=[f'node.id=={node_id}']
)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
@ -471,6 +468,19 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
@requires_api_version('1.40')
def test_create_service_with_placement_maxreplicas(self):
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(maxreplicas=1)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
def test_create_service_with_endpoint_spec(self):
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
@ -496,7 +506,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@ -658,14 +668,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
container, f'cat /run/secrets/{secret_name}'
)
assert self.client.exec_start(exec_id) == secret_data
@requires_api_version('1.25')
def test_create_service_with_unicode_secret(self):
secret_name = 'favorite_touhou'
secret_data = u'東方花映塚'
secret_data = '東方花映塚'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
@ -683,7 +693,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
container, f'cat /run/secrets/{secret_name}'
)
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
@ -710,14 +720,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /{0}'.format(config_name)
container, f'cat /{config_name}'
)
assert self.client.exec_start(exec_id) == config_data
@requires_api_version('1.30')
def test_create_service_with_unicode_config(self):
config_name = 'favorite_touhou'
config_data = u'東方花映塚'
config_data = '東方花映塚'
config_id = self.client.create_config(config_name, config_data)
self.tmp_configs.append(config_id)
config_ref = docker.types.ConfigReference(config_id, config_name)
@ -735,7 +745,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /{0}'.format(config_name)
container, f'cat /{config_name}'
)
container_config = self.client.exec_start(exec_id)
container_config = container_config.decode('utf-8')
@ -1124,7 +1134,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@ -1151,7 +1161,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@ -1346,3 +1356,33 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.update_service(*args, **kwargs)
else:
raise
@requires_api_version('1.41')
def test_create_service_cap_add(self):
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
TEST_IMG, ['echo', 'hello'], cap_add=['CAP_SYSLOG']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
svc_id = self.client.create_service(task_tmpl, name=name)
assert self.client.inspect_service(svc_id)
services = self.client.services(filters={'name': name})
assert len(services) == 1
assert services[0]['ID'] == svc_id['ID']
spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
assert 'CAP_SYSLOG' in spec['CapabilityAdd']
@requires_api_version('1.41')
def test_create_service_cap_drop(self):
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
TEST_IMG, ['echo', 'hello'], cap_drop=['CAP_SYSLOG']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
svc_id = self.client.create_service(task_tmpl, name=name)
assert self.client.inspect_service(svc_id)
services = self.client.services(filters={'name': name})
assert len(services) == 1
assert services[0]['ID'] == svc_id['ID']
spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
assert 'CAP_SYSLOG' in spec['CapabilityDrop']

View File

@ -8,7 +8,7 @@ from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
super().setUp()
force_leave_swarm(self.client)
self._unlock_key = None
@ -19,7 +19,7 @@ class SwarmTest(BaseAPIIntegrationTest):
except docker.errors.APIError:
pass
force_leave_swarm(self.client)
super(SwarmTest, self).tearDown()
super().tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):

View File

@ -75,11 +75,11 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
"""
def setUp(self):
super(BaseAPIIntegrationTest, self).setUp()
super().setUp()
self.client = self.get_client_instance()
def tearDown(self):
super(BaseAPIIntegrationTest, self).tearDown()
super().tearDown()
self.client.close()
@staticmethod

View File

@ -1,5 +1,3 @@
from __future__ import print_function
import sys
import warnings
@ -17,11 +15,11 @@ def setup_test_session():
try:
c.inspect_image(TEST_IMG)
except docker.errors.NotFound:
print("\npulling {0}".format(TEST_IMG), file=sys.stderr)
print(f"\npulling {TEST_IMG}", file=sys.stderr)
for data in c.pull(TEST_IMG, stream=True, decode=True):
status = data.get("status")
progress = data.get("progress")
detail = "{0} - {1}".format(status, progress)
detail = f"{status} - {progress}"
print(detail, file=sys.stderr)
# Double make sure we now have busybox

View File

@ -3,7 +3,6 @@ import random
import sys
import pytest
import six
from distutils.spawn import find_executable
from docker.credentials import (
@ -12,7 +11,7 @@ from docker.credentials import (
)
class TestStore(object):
class TestStore:
def teardown_method(self):
for server in self.tmp_keys:
try:
@ -33,7 +32,7 @@ class TestStore(object):
self.store = Store(DEFAULT_OSX_STORE)
def get_random_servername(self):
res = 'pycreds_test_{:x}'.format(random.getrandbits(32))
res = f'pycreds_test_{random.getrandbits(32):x}'
self.tmp_keys.append(res)
return res
@ -61,7 +60,7 @@ class TestStore(object):
def test_unicode_strings(self):
key = self.get_random_servername()
key = six.u(key)
key = key
self.store.store(server=key, username='user', secret='pass')
data = self.store.get(key)
assert data

View File

@ -1,11 +1,7 @@
import os
from docker.credentials.utils import create_environment_dict
try:
from unittest import mock
except ImportError:
import mock
from unittest import mock
@mock.patch.dict(os.environ)

View File

@ -13,8 +13,8 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
b"FROM alpine\n"
b"CMD echo hello world"
))
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
@ -24,8 +24,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"RUN exit 1".encode('ascii')
b"FROM alpine\n"
b"RUN exit 1"
))
assert (
"The command '/bin/sh -c exit 1' returned a non-zero code: 1"
@ -36,8 +36,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='some-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
b"FROM alpine\n"
b"CMD echo hello world"
)
)
self.tmp_imgs.append(image.id)
@ -47,8 +47,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='dup-txt-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo Successfully built abcd1234".encode('ascii')
b"FROM alpine\n"
b"CMD echo Successfully built abcd1234"
)
)
self.tmp_imgs.append(image.id)
@ -86,7 +86,7 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_pull_multiple(self):
client = docker.from_env(version=TEST_API_VERSION)
images = client.images.pull('hello-world')
images = client.images.pull('hello-world', all_tags=True)
assert len(images) >= 1
assert any([
'hello-world:latest' in img.attrs['RepoTags'] for img in images
@ -119,7 +119,7 @@ class ImageCollectionTest(BaseIntegrationTest):
self.tmp_imgs.append(additional_tag)
image.reload()
with tempfile.TemporaryFile() as f:
stream = image.save(named='{}:latest'.format(additional_tag))
stream = image.save(named=f'{additional_tag}:latest')
for chunk in stream:
f.write(chunk)
@ -129,7 +129,7 @@ class ImageCollectionTest(BaseIntegrationTest):
assert len(result) == 1
assert result[0].id == image.id
assert '{}:latest'.format(additional_tag) in result[0].tags
assert f'{additional_tag}:latest' in result[0].tags
def test_save_name_error(self):
client = docker.from_env(version=TEST_API_VERSION)
@ -143,7 +143,7 @@ class ImageTest(BaseIntegrationTest):
def test_tag_and_remove(self):
repo = 'dockersdk.tests.images.test_tag'
tag = 'some-tag'
identifier = '{}:{}'.format(repo, tag)
identifier = f'{repo}:{tag}'
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')

View File

@ -30,13 +30,18 @@ class ServiceTest(unittest.TestCase):
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container': 'label'}
container_labels={'container': 'label'},
rollback_config={'order': 'start-first'}
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
spec_rollback = service.attrs['Spec'].get('RollbackConfig', None)
assert spec_rollback is not None
assert ('Order' in spec_rollback and
spec_rollback['Order'] == 'start-first')
def test_create_with_network(self):
client = docker.from_env(version=TEST_API_VERSION)
@ -333,3 +338,41 @@ class ServiceTest(unittest.TestCase):
assert service.force_update()
service.reload()
assert service.version > initial_version
@helpers.requires_api_version('1.41')
def test_create_cap_add(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
labels={'foo': 'bar'},
image="alpine",
command="sleep 300",
container_labels={'container': 'label'},
cap_add=["CAP_SYSLOG"]
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
assert "CAP_SYSLOG" in container_spec["CapabilityAdd"]
@helpers.requires_api_version('1.41')
def test_create_cap_drop(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
labels={'foo': 'bar'},
image="alpine",
command="sleep 300",
container_labels={'container': 'label'},
cap_drop=["CAP_SYSLOG"]
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
assert "CAP_SYSLOG" in container_spec["CapabilityDrop"]

View File

@ -2,13 +2,13 @@ import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class TestRegressions(BaseAPIIntegrationTest):
@pytest.mark.xfail(True, reason='Docker API always returns chunked resp')
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with pytest.raises(docker.errors.APIError) as exc:
@ -39,8 +39,7 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
@ -56,10 +55,10 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
)[0]['HostPort'] == six.text_type(tcp_port)
)[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
)[0]['HostPort'] == six.text_type(tcp_port)
)[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
)[0]['HostPort'] == six.text_type(udp_port)
)[0]['HostPort'] == str(udp_port)

0
tests/ssh/__init__.py Normal file
View File

590
tests/ssh/api_build_test.py Normal file
View File

@ -0,0 +1,590 @@
import io
import os
import shutil
import tempfile
from docker import errors
from docker.utils.proxy import ProxyConfig
import pytest
from .base import BaseAPIIntegrationTest, TEST_IMG
from ..helpers import random_name, requires_api_version, requires_experimental
class BuildTest(BaseAPIIntegrationTest):
def test_build_with_proxy(self):
self.client._proxy_configs = ProxyConfig(
ftp='a', http='b', https='c', no_proxy='d'
)
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN env | grep "FTP_PROXY=a"',
'RUN env | grep "ftp_proxy=a"',
'RUN env | grep "HTTP_PROXY=b"',
'RUN env | grep "http_proxy=b"',
'RUN env | grep "HTTPS_PROXY=c"',
'RUN env | grep "https_proxy=c"',
'RUN env | grep "NO_PROXY=d"',
'RUN env | grep "no_proxy=d"',
]).encode('ascii'))
self.client.build(fileobj=script, decode=True)
def test_build_with_proxy_and_buildargs(self):
self.client._proxy_configs = ProxyConfig(
ftp='a', http='b', https='c', no_proxy='d'
)
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN env | grep "FTP_PROXY=XXX"',
'RUN env | grep "ftp_proxy=xxx"',
'RUN env | grep "HTTP_PROXY=b"',
'RUN env | grep "http_proxy=b"',
'RUN env | grep "HTTPS_PROXY=c"',
'RUN env | grep "https_proxy=c"',
'RUN env | grep "NO_PROXY=d"',
'RUN env | grep "no_proxy=d"',
]).encode('ascii'))
self.client.build(
fileobj=script,
decode=True,
buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'}
)
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, decode=True)
logs = []
for chunk in stream:
logs.append(chunk)
assert len(logs) > 0
def test_build_from_stringio(self):
return
script = io.StringIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script)
logs = ''
for chunk in stream:
chunk = chunk.decode('utf-8')
logs += chunk
assert logs != ''
def test_build_with_dockerignore(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'!ignored/subdir/excepted-file',
'', # empty line,
'#*', # comment line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
f.write('this file should not be ignored')
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
f.write("this file should not be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
logs = logs.decode('utf-8')
assert sorted(list(filter(None, logs.split('\n')))) == sorted([
'/test/#file.txt',
'/test/ignored/subdir/excepted-file',
'/test/not-ignored'
])
def test_build_with_buildargs(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'ARG test',
'USER $test'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
)
self.tmp_imgs.append('buildargs')
for chunk in stream:
pass
info = self.client.inspect_image('buildargs')
assert info['Config']['User'] == 'OK'
@requires_api_version('1.22')
def test_build_shmsize(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'CMD sh -c "echo \'Hello, World!\'"',
]).encode('ascii'))
tag = 'shmsize'
shmsize = 134217728
stream = self.client.build(
fileobj=script, tag=tag, shmsize=shmsize
)
self.tmp_imgs.append(tag)
for chunk in stream:
pass
# There is currently no way to get the shmsize
# that was used to build the image
@requires_api_version('1.24')
def test_build_isolation(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'CMD sh -c "echo \'Deaf To All But The Song\''
]).encode('ascii'))
stream = self.client.build(
fileobj=script, tag='isolation',
isolation='default'
)
for chunk in stream:
pass
@requires_api_version('1.23')
def test_build_labels(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
]).encode('ascii'))
labels = {'test': 'OK'}
stream = self.client.build(
fileobj=script, tag='labels', labels=labels
)
self.tmp_imgs.append('labels')
for chunk in stream:
pass
info = self.client.inspect_image('labels')
assert info['Config']['Labels'] == labels
@requires_api_version('1.25')
def test_build_with_cache_from(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'ENV FOO=bar',
'RUN touch baz',
'RUN touch bax',
]).encode('ascii'))
stream = self.client.build(fileobj=script, tag='build1')
self.tmp_imgs.append('build1')
for chunk in stream:
pass
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['build1'],
decode=True
)
self.tmp_imgs.append('build2')
counter = 0
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 3
self.client.remove_image('build2')
counter = 0
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['nosuchtag'],
decode=True
)
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 0
@requires_api_version('1.29')
def test_build_container_with_target(self):
script = io.BytesIO('\n'.join([
'FROM busybox as first',
'RUN mkdir -p /tmp/test',
'RUN touch /tmp/silence.tar.gz',
'FROM alpine:latest',
'WORKDIR /root/'
'COPY --from=first /tmp/silence.tar.gz .',
'ONBUILD RUN echo "This should not be in the final image"'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, target='first', tag='build1'
)
self.tmp_imgs.append('build1')
for chunk in stream:
pass
info = self.client.inspect_image('build1')
assert not info['Config']['OnBuild']
@requires_api_version('1.25')
def test_build_with_network_mode(self):
# Set up pingable endpoint on custom network
network = self.client.create_network(random_name())['Id']
self.tmp_networks.append(network)
container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
self.client.connect_container_to_network(
container, network, aliases=['pingtarget.docker']
)
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN ping -c1 pingtarget.docker'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, network_mode=network,
tag='dockerpytest_customnetbuild'
)
self.tmp_imgs.append('dockerpytest_customnetbuild')
for chunk in stream:
pass
assert self.client.inspect_image('dockerpytest_customnetbuild')
script.seek(0)
stream = self.client.build(
fileobj=script, network_mode='none',
tag='dockerpytest_nonebuild', nocache=True, decode=True
)
self.tmp_imgs.append('dockerpytest_nonebuild')
logs = [chunk for chunk in stream]
assert 'errorDetail' in logs[-1]
assert logs[-1]['errorDetail']['code'] == 1
with pytest.raises(errors.NotFound):
self.client.inspect_image('dockerpytest_nonebuild')
@requires_api_version('1.27')
def test_build_with_extra_hosts(self):
img_name = 'dockerpytest_extrahost_build'
self.tmp_imgs.append(img_name)
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN ping -c1 hello.world.test',
'RUN ping -c1 extrahost.local.test',
'RUN cp /etc/hosts /hosts-file'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, tag=img_name,
extra_hosts={
'extrahost.local.test': '127.0.0.1',
'hello.world.test': '127.0.0.1',
}, decode=True
)
for chunk in stream:
if 'errorDetail' in chunk:
pytest.fail(chunk)
assert self.client.inspect_image(img_name)
ctnr = self.run_container(img_name, 'cat /hosts-file')
logs = self.client.logs(ctnr)
logs = logs.decode('utf-8')
assert '127.0.0.1\textrahost.local.test' in logs
assert '127.0.0.1\thello.world.test' in logs
@requires_experimental(until=None)
@requires_api_version('1.25')
def test_build_squash(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN echo blah > /file_1',
'RUN echo blahblah > /file_2',
'RUN echo blahblahblah > /file_3'
]).encode('ascii'))
def build_squashed(squash):
tag = 'squash' if squash else 'nosquash'
stream = self.client.build(
fileobj=script, tag=tag, squash=squash
)
self.tmp_imgs.append(tag)
for chunk in stream:
pass
return self.client.inspect_image(tag)
non_squashed = build_squashed(False)
squashed = build_squashed(True)
assert len(non_squashed['RootFS']['Layers']) == 4
assert len(squashed['RootFS']['Layers']) == 2
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
]))
stream = self.client.build(
fileobj=script, decode=True, nocache=True
)
lines = []
for chunk in stream:
lines.append(chunk.get('stream'))
expected = '{0}{2}\n{1}'.format(
control_chars[0], control_chars[1], snippet
)
assert any([line == expected for line in lines])
def test_build_gzip_encoding(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
stream = self.client.build(
path=base_dir, decode=True, nocache=True,
gzip=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully built' in lines[-1]['stream']
def test_build_with_dockerfile_empty_lines(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write('FROM busybox\n')
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write('\n'.join([
' ',
'',
'\t\t',
'\t ',
]))
stream = self.client.build(
path=base_dir, decode=True, nocache=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully built' in lines[-1]['stream']
def test_build_gzip_custom_encoding(self):
with pytest.raises(errors.DockerException):
self.client.build(path='.', gzip=True, encoding='text/html')
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_build_invalid_platform(self):
script = io.BytesIO(b'FROM busybox\n')
with pytest.raises(errors.APIError) as excinfo:
stream = self.client.build(fileobj=script, platform='foobar')
for _ in stream:
pass
# Some API versions incorrectly returns 500 status; assert 4xx or 5xx
assert excinfo.value.is_error()
assert 'unknown operating system' in excinfo.exconly() \
or 'invalid platform' in excinfo.exconly()
def test_build_out_of_context_dockerfile(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
f.write('hello world')
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write('.dockerignore\n')
df_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, df_dir)
df_name = os.path.join(df_dir, 'Dockerfile')
with open(df_name, 'wb') as df:
df.write(('\n'.join([
'FROM busybox',
'COPY . /src',
'WORKDIR /src',
])).encode('utf-8'))
df.flush()
img_name = random_name()
self.tmp_imgs.append(img_name)
stream = self.client.build(
path=base_dir, dockerfile=df_name, tag=img_name,
decode=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully tagged' in lines[-1]['stream']
ctnr = self.client.create_container(img_name, 'ls -a')
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
lsdata = self.client.logs(ctnr).strip().split(b'\n')
assert len(lsdata) == 3
assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
def test_build_in_context_dockerfile(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
f.write('hello world')
with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
df.write('\n'.join([
'FROM busybox',
'COPY . /src',
'WORKDIR /src',
]))
img_name = random_name()
self.tmp_imgs.append(img_name)
stream = self.client.build(
path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
decode=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully tagged' in lines[-1]['stream']
ctnr = self.client.create_container(img_name, 'ls -a')
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
lsdata = self.client.logs(ctnr).strip().split(b'\n')
assert len(lsdata) == 4
assert sorted(
[b'.', b'..', b'file.txt', b'custom.dockerfile']
) == sorted(lsdata)
def test_build_in_context_nested_dockerfile(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
f.write('hello world')
subdir = os.path.join(base_dir, 'hello', 'world')
os.makedirs(subdir)
with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df:
df.write('\n'.join([
'FROM busybox',
'COPY . /src',
'WORKDIR /src',
]))
img_name = random_name()
self.tmp_imgs.append(img_name)
stream = self.client.build(
path=base_dir, dockerfile='hello/world/custom.dockerfile',
tag=img_name, decode=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully tagged' in lines[-1]['stream']
ctnr = self.client.create_container(img_name, 'ls -a')
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
lsdata = self.client.logs(ctnr).strip().split(b'\n')
assert len(lsdata) == 4
assert sorted(
[b'.', b'..', b'file.txt', b'hello']
) == sorted(lsdata)
def test_build_in_context_abs_dockerfile(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile')
with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
f.write('hello world')
with open(abs_dockerfile_path, 'w') as df:
df.write('\n'.join([
'FROM busybox',
'COPY . /src',
'WORKDIR /src',
]))
img_name = random_name()
self.tmp_imgs.append(img_name)
stream = self.client.build(
path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name,
decode=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully tagged' in lines[-1]['stream']
ctnr = self.client.create_container(img_name, 'ls -a')
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
lsdata = self.client.logs(ctnr).strip().split(b'\n')
assert len(lsdata) == 4
assert sorted(
[b'.', b'..', b'file.txt', b'custom.dockerfile']
) == sorted(lsdata)
@requires_api_version('1.31')
@pytest.mark.xfail(
True,
reason='Currently fails on 18.09: '
'https://github.com/moby/moby/issues/37920'
)
def test_prune_builds(self):
prune_result = self.client.prune_builds()
assert 'SpaceReclaimed' in prune_result
assert isinstance(prune_result['SpaceReclaimed'], int)

Some files were not shown because too many files have changed in this diff Show More