Compare commits

..

No commits in common. "main" and "3.4.1" have entirely different histories.
main ... 3.4.1

179 changed files with 3268 additions and 9375 deletions

View File

@ -9,6 +9,3 @@ max_line_length = 80
[*.md] [*.md]
trim_trailing_whitespace = false trim_trailing_whitespace = false
[*.{yaml,yml}]
indent_size = 2

View File

@ -1,72 +0,0 @@
name: Python package
on: [push, pull_request]
env:
DOCKER_BUILDKIT: '1'
FORCE_COLOR: 1
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- run: pip install -U ruff==0.1.8
- name: Run ruff
run: ruff docker tests
build:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- run: pip3 install build && python -m build .
- uses: actions/upload-artifact@v4
with:
name: dist
path: dist
unit-tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
- name: Install dependencies
run: |
python3 -m pip install --upgrade pip
pip3 install '.[ssh,dev]'
- name: Run unit tests
run: |
docker logout
rm -rf ~/.docker
py.test -v --cov=docker tests/unit
integration-tests:
runs-on: ubuntu-latest
strategy:
matrix:
variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: make ${{ matrix.variant }}
run: |
docker logout
rm -rf ~/.docker
make ${{ matrix.variant }}

View File

@ -1,53 +0,0 @@
name: Release
on:
workflow_dispatch:
inputs:
tag:
description: "Release Tag WITHOUT `v` Prefix (e.g. 6.0.0)"
required: true
dry-run:
description: 'Dry run'
required: false
type: boolean
default: true
env:
DOCKER_BUILDKIT: '1'
FORCE_COLOR: 1
jobs:
publish:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Generate Package
run: |
pip3 install build
python -m build .
env:
# This is also supported by Hatch; see
# https://github.com/ofek/hatch-vcs#version-source-environment-variables
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ inputs.tag }}
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
if: '! inputs.dry-run'
with:
password: ${{ secrets.PYPI_API_TOKEN }}
- name: Create GitHub release
uses: ncipollo/release-action@v1
if: '! inputs.dry-run'
with:
artifacts: "dist/*"
generateReleaseNotes: true
draft: true
commit: ${{ github.sha }}
token: ${{ secrets.GITHUB_TOKEN }}
tag: ${{ inputs.tag }}

4
.gitignore vendored
View File

@ -13,10 +13,6 @@ html/*
_build/ _build/
README.rst README.rst
# setuptools_scm
_version.py
env/ env/
venv/ venv/
.idea/ .idea/
*.iml

View File

@ -1,17 +0,0 @@
version: 2
sphinx:
configuration: docs/conf.py
build:
os: ubuntu-22.04
tools:
python: '3.12'
python:
install:
- method: pip
path: .
extra_requirements:
- ssh
- docs

18
.travis.yml Normal file
View File

@ -0,0 +1,18 @@
sudo: false
language: python
matrix:
include:
- python: 2.7
env: TOXENV=py27
- python: 3.4
env: TOXENV=py34
- python: 3.5
env: TOXENV=py35
- python: 3.6
env: TOXENV=py36
- env: TOXENV=flake8
install:
- pip install tox
script:
- tox

View File

@ -44,7 +44,7 @@ paragraph in the Docker contribution guidelines.
Before we can review your pull request, please ensure that nothing has been Before we can review your pull request, please ensure that nothing has been
broken by your changes by running the test suite. You can do so simply by broken by your changes by running the test suite. You can do so simply by
running `make test` in the project root. This also includes coding style using running `make test` in the project root. This also includes coding style using
`ruff` `flake8`
### 3. Write clear, self-contained commits ### 3. Write clear, self-contained commits

View File

@ -1,13 +1,13 @@
# syntax=docker/dockerfile:1 FROM python:2.7
ARG PYTHON_VERSION=3.12
FROM python:${PYTHON_VERSION}
RUN mkdir /src
WORKDIR /src WORKDIR /src
COPY . .
ARG VERSION=0.0.0.dev0 COPY requirements.txt /src/requirements.txt
RUN --mount=type=cache,target=/cache/pip \ RUN pip install -r requirements.txt
PIP_CACHE_DIR=/cache/pip \
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ COPY test-requirements.txt /src/test-requirements.txt
pip install .[ssh] RUN pip install -r test-requirements.txt
COPY . /src
RUN pip install .

View File

@ -1,8 +1,4 @@
# syntax=docker/dockerfile:1 FROM python:3.5
ARG PYTHON_VERSION=3.12
FROM python:${PYTHON_VERSION}
ARG uid=1000 ARG uid=1000
ARG gid=1000 ARG gid=1000
@ -11,12 +7,7 @@ RUN addgroup --gid $gid sphinx \
&& useradd --uid $uid --gid $gid -M sphinx && useradd --uid $uid --gid $gid -M sphinx
WORKDIR /src WORKDIR /src
COPY . . COPY requirements.txt docs-requirements.txt ./
RUN pip install -r requirements.txt -r docs-requirements.txt
ARG VERSION=0.0.0.dev0
RUN --mount=type=cache,target=/cache/pip \
PIP_CACHE_DIR=/cache/pip \
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \
pip install .[ssh,docs]
USER sphinx USER sphinx

13
Dockerfile-py3 Normal file
View File

@ -0,0 +1,13 @@
FROM python:3.6
RUN mkdir /src
WORKDIR /src
COPY requirements.txt /src/requirements.txt
RUN pip install -r requirements.txt
COPY test-requirements.txt /src/test-requirements.txt
RUN pip install -r test-requirements.txt
COPY . /src
RUN pip install .

118
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,118 @@
#!groovy
def imageNameBase = "dockerbuildbot/docker-py"
def imageNamePy2
def imageNamePy3
def images = [:]
def buildImage = { name, buildargs, pyTag ->
img = docker.image(name)
try {
img.pull()
} catch (Exception exc) {
img = docker.build(name, buildargs)
img.push()
}
images[pyTag] = img.id
}
def buildImages = { ->
wrappedNode(label: "ubuntu && !zfs && amd64", cleanWorkspace: true) {
stage("build image") {
checkout(scm)
imageNamePy2 = "${imageNameBase}:py2-${gitCommit()}"
imageNamePy3 = "${imageNameBase}:py3-${gitCommit()}"
buildImage(imageNamePy2, ".", "py2.7")
buildImage(imageNamePy3, "-f Dockerfile-py3 .", "py3.6")
}
}
}
def getDockerVersions = { ->
def dockerVersions = ["17.06.2-ce"]
wrappedNode(label: "ubuntu && !zfs") {
def result = sh(script: """docker run --rm \\
--entrypoint=python \\
${imageNamePy3} \\
/src/scripts/versions.py
""", returnStdout: true
)
dockerVersions = dockerVersions + result.trim().tokenize(' ')
}
return dockerVersions
}
def getAPIVersion = { engineVersion ->
def versionMap = ['17.06': '1.30', '17.12': '1.35', '18.02': '1.36', '18.03': '1.37']
def result = versionMap[engineVersion.substring(0, 5)]
if (!result) {
return '1.37'
}
return result
}
def runTests = { Map settings ->
def dockerVersion = settings.get("dockerVersion", null)
def pythonVersion = settings.get("pythonVersion", null)
def testImage = settings.get("testImage", null)
def apiVersion = getAPIVersion(dockerVersion)
if (!testImage) {
throw new Exception("Need test image object, e.g.: `runTests(testImage: img)`")
}
if (!dockerVersion) {
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '1.12.3')`")
}
if (!pythonVersion) {
throw new Exception("Need Python version being tested, e.g.: `runTests(pythonVersion: 'py2.7')`")
}
{ ->
wrappedNode(label: "ubuntu && !zfs && amd64", cleanWorkspace: true) {
stage("test python=${pythonVersion} / docker=${dockerVersion}") {
checkout(scm)
def dindContainerName = "dpy-dind-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
def testContainerName = "dpy-tests-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
def testNetwork = "dpy-testnet-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
try {
sh """docker network create ${testNetwork}"""
sh """docker run -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
dockerswarm/dind:${dockerVersion} dockerd -H tcp://0.0.0.0:2375
"""
sh """docker run \\
--name ${testContainerName} \\
-e "DOCKER_HOST=tcp://${dindContainerName}:2375" \\
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
--network ${testNetwork} \\
--volumes-from ${dindContainerName} \\
${testImage} \\
py.test -v -rxs tests/integration
"""
} finally {
sh """
docker stop ${dindContainerName} ${testContainerName}
docker rm -vf ${dindContainerName} ${testContainerName}
docker network rm ${testNetwork}
"""
}
}
}
}
}
buildImages()
def dockerVersions = getDockerVersions()
def testMatrix = [failFast: false]
for (imgKey in new ArrayList(images.keySet())) {
for (version in dockerVersions) {
testMatrix["${imgKey}_${version}"] = runTests([testImage: images[imgKey], dockerVersion: version, pythonVersion: imgKey])
}
}
parallel(testMatrix)

View File

@ -11,19 +11,15 @@
[Org] [Org]
[Org."Core maintainers"] [Org."Core maintainers"]
people = [ people = [
"glours", "shin-",
"milas",
] ]
[Org.Alumni] [Org.Alumni]
people = [ people = [
"aiordache",
"aanand", "aanand",
"bfirsh", "bfirsh",
"dnephin", "dnephin",
"mnowster", "mnowster",
"mpetazzoni", "mpetazzoni",
"shin-",
"ulyssessouza",
] ]
[people] [people]
@ -39,11 +35,6 @@
Email = "aanand@docker.com" Email = "aanand@docker.com"
GitHub = "aanand" GitHub = "aanand"
[people.aiordache]
Name = "Anca Iordache"
Email = "anca.iordache@docker.com"
GitHub = "aiordache"
[people.bfirsh] [people.bfirsh]
Name = "Ben Firshman" Name = "Ben Firshman"
Email = "b@fir.sh" Email = "b@fir.sh"
@ -54,16 +45,6 @@
Email = "dnephin@gmail.com" Email = "dnephin@gmail.com"
GitHub = "dnephin" GitHub = "dnephin"
[people.glours]
Name = "Guillaume Lours"
Email = "705411+glours@users.noreply.github.com"
GitHub = "glours"
[people.milas]
Name = "Milas Bowman"
Email = "devnull@milas.dev"
GitHub = "milas"
[people.mnowster] [people.mnowster]
Name = "Mazz Mosley" Name = "Mazz Mosley"
Email = "mazz@houseofmnowster.com" Email = "mazz@houseofmnowster.com"
@ -78,8 +59,3 @@
Name = "Joffrey F" Name = "Joffrey F"
Email = "joffrey@docker.com" Email = "joffrey@docker.com"
GitHub = "shin-" GitHub = "shin-"
[people.ulyssessouza]
Name = "Ulysses Domiciano Souza"
Email = "ulysses.souza@docker.com"
GitHub = "ulyssessouza"

8
MANIFEST.in Normal file
View File

@ -0,0 +1,8 @@
include test-requirements.txt
include requirements.txt
include README.md
include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
recursive-include tests/integration/testdata *

199
Makefile
View File

@ -1,184 +1,95 @@
TEST_API_VERSION ?= 1.45
TEST_ENGINE_VERSION ?= 26.1
ifeq ($(OS),Windows_NT)
PLATFORM := Windows
else
PLATFORM := $(shell sh -c 'uname -s 2>/dev/null || echo Unknown')
endif
ifeq ($(PLATFORM),Linux)
uid_args := "--build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g)"
endif
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER ?= $(shell git describe --match '[0-9]*' --dirty='.m' --always --tags 2>/dev/null | sed -r 's/-([0-9]+)/.dev\1/' | sed 's/-/+/')
ifeq ($(SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER),)
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER = "0.0.0.dev0"
endif
.PHONY: all .PHONY: all
all: test all: test
.PHONY: clean .PHONY: clean
clean: clean:
-docker rm -f dpy-dind dpy-dind-certs dpy-dind-ssl -docker rm -f dpy-dind-py2 dpy-dind-py3 dpy-dind-certs dpy-dind-ssl
find -name "__pycache__" | xargs rm -rf find -name "__pycache__" | xargs rm -rf
.PHONY: build-dind-ssh
build-dind-ssh:
docker build \
--pull \
-t docker-dind-ssh \
-f tests/Dockerfile-ssh-dind \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
--build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} \
--build-arg API_VERSION=${TEST_API_VERSION} \
.
.PHONY: build .PHONY: build
build: build:
docker build \ docker build -t docker-sdk-python .
--pull \
-t docker-sdk-python3 \ .PHONY: build-py3
-f tests/Dockerfile \ build-py3:
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \ docker build -t docker-sdk-python3 -f Dockerfile-py3 .
.
.PHONY: build-docs .PHONY: build-docs
build-docs: build-docs:
docker build \ docker build -t docker-sdk-python-docs -f Dockerfile-docs --build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g) .
-t docker-sdk-python-docs \
-f Dockerfile-docs \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
$(uid_args) \
.
.PHONY: build-dind-certs .PHONY: build-dind-certs
build-dind-certs: build-dind-certs:
docker build \ docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs .
-t dpy-dind-certs \
-f tests/Dockerfile-dind-certs \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
.
.PHONY: test .PHONY: test
test: ruff unit-test integration-dind integration-dind-ssl test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
.PHONY: unit-test .PHONY: unit-test
unit-test: build unit-test: build
docker run -t --rm docker-sdk-python py.test tests/unit
.PHONY: unit-test-py3
unit-test-py3: build-py3
docker run -t --rm docker-sdk-python3 py.test tests/unit docker run -t --rm docker-sdk-python3 py.test tests/unit
.PHONY: integration-test .PHONY: integration-test
integration-test: build integration-test: build
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file} docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python py.test -v tests/integration/${file}
.PHONY: integration-test-py3
integration-test-py3: build-py3
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test tests/integration/${file}
TEST_API_VERSION ?= 1.35
TEST_ENGINE_VERSION ?= 17.12.0-ce
.PHONY: setup-network .PHONY: setup-network
setup-network: setup-network:
docker network inspect dpy-tests || docker network create dpy-tests docker network inspect dpy-tests || docker network create dpy-tests
.PHONY: integration-dind .PHONY: integration-dind
integration-dind: build setup-network integration-dind: integration-dind-py2 integration-dind-py3
docker rm -vf dpy-dind || :
docker run \ .PHONY: integration-dind-py2
--detach \ integration-dind-py2: build setup-network
--name dpy-dind \ docker rm -vf dpy-dind-py2 || :
--network dpy-tests \ docker run -d --network dpy-tests --name dpy-dind-py2 --privileged\
--pull=always \ dockerswarm/dind:${TEST_ENGINE_VERSION} dockerd -H tcp://0.0.0.0:2375 --experimental
--privileged \ docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py2:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
docker:${TEST_ENGINE_VERSION}-dind \ --network dpy-tests docker-sdk-python py.test tests/integration
dockerd -H tcp://0.0.0.0:2375 --experimental docker rm -vf dpy-dind-py2
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind 2375; do sleep 1; done'
docker run \
--env="DOCKER_HOST=tcp://dpy-dind:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--network dpy-tests \
--rm \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind
.PHONY: integration-dind-ssh
integration-dind-ssh: build-dind-ssh build setup-network
docker rm -vf dpy-dind-ssh || :
docker run -d --network dpy-tests --name dpy-dind-ssh --privileged \
docker-dind-ssh dockerd --experimental
# start SSH daemon for known key
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/known_ed25519 -p 22"
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/unknown_ed25519 -p 2222"
docker run \
--tty \
--rm \
--env="DOCKER_HOST=ssh://dpy-dind-ssh" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="UNKNOWN_DOCKER_SSH_HOST=ssh://dpy-dind-ssh:2222" \
--network dpy-tests \
docker-sdk-python3 py.test tests/ssh/${file}
docker rm -vf dpy-dind-ssh
.PHONY: integration-dind-py3
integration-dind-py3: build-py3 setup-network
docker rm -vf dpy-dind-py3 || :
docker run -d --network dpy-tests --name dpy-dind-py3 --privileged\
dockerswarm/dind:${TEST_ENGINE_VERSION} dockerd -H tcp://0.0.0.0:2375 --experimental
docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py3:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network dpy-tests docker-sdk-python3 py.test tests/integration
docker rm -vf dpy-dind-py3
.PHONY: integration-dind-ssl .PHONY: integration-dind-ssl
integration-dind-ssl: build-dind-certs build setup-network integration-dind-ssl: build-dind-certs build build-py3
docker rm -vf dpy-dind-certs dpy-dind-ssl || : docker rm -vf dpy-dind-certs dpy-dind-ssl || :
docker run -d --name dpy-dind-certs dpy-dind-certs docker run -d --name dpy-dind-certs dpy-dind-certs
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\
docker run \ --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\
--detach \ --network dpy-tests --network-alias docker -v /tmp --privileged\
--env="DOCKER_CERT_PATH=/certs" \ dockerswarm/dind:${TEST_ENGINE_VERSION}\
--env="DOCKER_HOST=tcp://localhost:2375" \ dockerd --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
--env="DOCKER_TLS_VERIFY=1" \ --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 --experimental
--name dpy-dind-ssl \ docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--network dpy-tests \ --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--network-alias docker \ --network dpy-tests docker-sdk-python py.test tests/integration
--pull=always \ docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--privileged \ --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
--volume /tmp \ --network dpy-tests docker-sdk-python3 py.test tests/integration
--volumes-from dpy-dind-certs \
docker:${TEST_ENGINE_VERSION}-dind \
dockerd \
--tlsverify \
--tlscacert=/certs/ca.pem \
--tlscert=/certs/server-cert.pem \
--tlskey=/certs/server-key.pem \
-H tcp://0.0.0.0:2375 \
--experimental
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind-ssl 2375; do sleep 1; done'
docker run \
--env="DOCKER_CERT_PATH=/certs" \
--env="DOCKER_HOST=tcp://docker:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="DOCKER_TLS_VERIFY=1" \
--network dpy-tests \
--rm \
--volumes-from dpy-dind-ssl \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind-ssl dpy-dind-certs docker rm -vf dpy-dind-ssl dpy-dind-certs
.PHONY: ruff .PHONY: flake8
ruff: build flake8: build
docker run -t --rm docker-sdk-python3 ruff docker tests docker run -t --rm docker-sdk-python flake8 docker tests
.PHONY: docs .PHONY: docs
docs: build-docs docs: build-docs
@ -186,4 +97,4 @@ docs: build-docs
.PHONY: shell .PHONY: shell
shell: build shell: build
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 python docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python python

View File

@ -1,17 +1,18 @@
# Docker SDK for Python # Docker SDK for Python
[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml) [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps run containers, manage containers, manage Swarms, etc. A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps run containers, manage containers, manage Swarms, etc.
## Installation ## Installation
The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip: The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker pip install docker
> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support. If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
pip install docker[tls]
## Usage ## Usage
@ -57,7 +58,7 @@ You can stream logs:
```python ```python
>>> for line in container.logs(stream=True): >>> for line in container.logs(stream=True):
... print(line.strip()) ... print line.strip()
Reticulating spline 2... Reticulating spline 2...
Reticulating spline 3... Reticulating spline 3...
... ...

12
appveyor.yml Normal file
View File

@ -0,0 +1,12 @@
version: '{branch}-{build}'
install:
- "SET PATH=C:\\Python27-x64;C:\\Python27-x64\\Scripts;%PATH%"
- "python --version"
- "pip install tox==2.9.1"
# Build the binary after tests
build: false
test_script:
- "tox"

View File

@ -1,7 +1,7 @@
# flake8: noqa
from .api import APIClient from .api import APIClient
from .client import DockerClient, from_env from .client import DockerClient, from_env
from .context import Context, ContextAPI from .version import version, version_info
from .tls import TLSConfig
from .version import __version__
__version__ = version
__title__ = 'docker' __title__ = 'docker'

View File

@ -1 +1,2 @@
# flake8: noqa
from .client import APIClient from .client import APIClient

View File

@ -3,20 +3,23 @@ import logging
import os import os
import random import random
from .. import auth, constants, errors, utils from .. import auth
from .. import constants
from .. import errors
from .. import utils
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class BuildApiMixin: class BuildApiMixin(object):
def build(self, path=None, tag=None, quiet=False, fileobj=None, def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None, nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False, custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None, forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None, decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None, labels=None, cache_from=None, target=None, network_mode=None,
squash=None, extra_hosts=None, platform=None, isolation=None, squash=None, extra_hosts=None, platform=None, isolation=None):
use_config_proxy=True):
""" """
Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory needs to be set. ``path`` can be a local path (to a directory
@ -72,7 +75,6 @@ class BuildApiMixin:
forcerm (bool): Always remove intermediate containers, even after forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile dockerfile (str): path within the build context to the Dockerfile
gzip (bool): If set to ``True``, gzip compression/encoding is used
buildargs (dict): A dictionary of build arguments buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys: container created by the build process. Valid keys:
@ -101,10 +103,6 @@ class BuildApiMixin:
platform (str): Platform in the format ``os[/arch[/variant]]`` platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build. isolation (str): Isolation technology used during build.
Default: `None`. Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns: Returns:
A generator for the build output. A generator for the build output.
@ -118,23 +116,19 @@ class BuildApiMixin:
remote = context = None remote = context = None
headers = {} headers = {}
container_limits = container_limits or {} container_limits = container_limits or {}
buildargs = buildargs or {}
if path is None and fileobj is None: if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.") raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None: if gzip and encoding is not None:
raise errors.DockerException( raise errors.DockerException(
'Can not use custom encoding if gzip is enabled' 'Can not use custom encoding if gzip is enabled'
) )
if tag is not None:
if not utils.match_tag(tag):
raise errors.DockerException(
f"invalid tag '{tag}': invalid reference format"
)
for key in container_limits.keys(): for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS: if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException( raise errors.DockerException(
f"invalid tag '{tag}': invalid reference format" 'Invalid container_limits key {0}'.format(key)
) )
if custom_context: if custom_context:
if not fileobj: if not fileobj:
raise TypeError("You must specify fileobj with custom_context") raise TypeError("You must specify fileobj with custom_context")
@ -150,10 +144,10 @@ class BuildApiMixin:
dockerignore = os.path.join(path, '.dockerignore') dockerignore = os.path.join(path, '.dockerignore')
exclude = None exclude = None
if os.path.exists(dockerignore): if os.path.exists(dockerignore):
with open(dockerignore) as f: with open(dockerignore, 'r') as f:
exclude = list(filter( exclude = list(filter(
lambda x: x != '' and x[0] != '#', lambda x: x != '' and x[0] != '#',
[line.strip() for line in f.read().splitlines()] [l.strip() for l in f.read().splitlines()]
)) ))
dockerfile = process_dockerfile(dockerfile, path) dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar( context = utils.tar(
@ -174,10 +168,6 @@ class BuildApiMixin:
} }
params.update(container_limits) params.update(container_limits)
if use_config_proxy:
proxy_args = self._proxy_configs.get_environment()
for k, v in proxy_args.items():
buildargs.setdefault(k, v)
if buildargs: if buildargs:
params.update({'buildargs': json.dumps(buildargs)}) params.update({'buildargs': json.dumps(buildargs)})
@ -275,24 +265,10 @@ class BuildApiMixin:
return self._stream_helper(response, decode=decode) return self._stream_helper(response, decode=decode)
@utils.minimum_version('1.31') @utils.minimum_version('1.31')
def prune_builds(self, filters=None, keep_storage=None, all=None): def prune_builds(self):
""" """
Delete the builder cache Delete the builder cache
Args:
filters (dict): Filters to process on the prune list.
Needs Docker API v1.39+
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
- until (str): Can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
relative to the daemon's local time.
keep_storage (int): Amount of disk space in bytes to keep for cache.
Needs Docker API v1.39+
all (bool): Remove all types of build cache.
Needs Docker API v1.39+
Returns: Returns:
(dict): A dictionary containing information about the operation's (dict): A dictionary containing information about the operation's
result. The ``SpaceReclaimed`` key indicates the amount of result. The ``SpaceReclaimed`` key indicates the amount of
@ -303,48 +279,44 @@ class BuildApiMixin:
If the server returns an error. If the server returns an error.
""" """
url = self._url("/build/prune") url = self._url("/build/prune")
if (filters, keep_storage, all) != (None, None, None) \ return self._result(self._post(url), True)
and utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'`filters`, `keep_storage`, and `all` args are only available '
'for API version > 1.38'
)
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
if keep_storage is not None:
params['keep-storage'] = keep_storage
if all is not None:
params['all'] = all
return self._result(self._post(url, params=params), True)
def _set_auth_headers(self, headers): def _set_auth_headers(self, headers):
log.debug('Looking for auth config') log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config # If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there. # file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty: if not self._auth_configs:
log.debug("No auth config in memory - loading from filesystem") log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config()
credstore_env=self.credstore_env
)
# Send the full auth configuration (if any exists), since the build # Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries. # could use any (or all) of the registries.
if self._auth_configs: if self._auth_configs:
auth_data = self._auth_configs.get_all_credentials() auth_data = {}
if self._auth_configs.get('credsStore'):
# Using a credentials store, we need to retrieve the
# credentials for each registry listed in the config.json file
# Matches CLI behavior: https://github.com/docker/docker/blob/
# 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
# credentials/native_store.go#L68-L83
for registry in self._auth_configs.get('auths', {}).keys():
auth_data[registry] = auth.resolve_authconfig(
self._auth_configs, registry,
credstore_env=self.credstore_env,
)
else:
auth_data = self._auth_configs.get('auths', {}).copy()
# See https://github.com/docker/docker-py/issues/1683 # See https://github.com/docker/docker-py/issues/1683
if (auth.INDEX_URL not in auth_data and if auth.INDEX_NAME in auth_data:
auth.INDEX_NAME in auth_data): auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug( log.debug(
"Sending auth config (%s)", 'Sending auth config ({0})'.format(
', '.join(repr(k) for k in auth_data), ', '.join(repr(k) for k in auth_data.keys())
)
) )
if auth_data:
headers['X-Registry-Config'] = auth.encode_header( headers['X-Registry-Config'] = auth.encode_header(
auth_data auth_data
) )
@ -359,17 +331,13 @@ def process_dockerfile(dockerfile, path):
abs_dockerfile = dockerfile abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile): if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile) abs_dockerfile = os.path.join(path, dockerfile)
if constants.IS_WINDOWS_PLATFORM and path.startswith(
constants.WINDOWS_LONGPATH_PREFIX):
normpath = os.path.normpath(
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')): os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later # Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile) as df: with open(abs_dockerfile, 'r') as df:
return ( return (
f'.dockerfile.{random.getrandbits(160):x}', '.dockerfile.{0:x}'.format(random.getrandbits(160)),
df.read() df.read()
) )

View File

@ -1,35 +1,12 @@
import json import json
import struct import struct
import urllib
from functools import partial from functools import partial
import requests import requests
import requests.adapters
import requests.exceptions import requests.exceptions
import six
import websocket
from .. import auth
from ..constants import (
DEFAULT_MAX_POOL_SIZE,
DEFAULT_NUM_POOLS,
DEFAULT_NUM_POOLS_SSH,
DEFAULT_TIMEOUT_SECONDS,
DEFAULT_USER_AGENT,
IS_WINDOWS_PLATFORM,
MINIMUM_DOCKER_API_VERSION,
STREAM_HEADER_SIZE_BYTES,
)
from ..errors import (
DockerException,
InvalidVersion,
TLSParameterError,
create_api_error_from_http_exception,
)
from ..tls import TLSConfig
from ..transport import UnixHTTPAdapter
from ..utils import check_resource, config, update_headers, utils
from ..utils.json_stream import json_stream
from ..utils.proxy import ProxyConfig
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin from .build import BuildApiMixin
from .config import ConfigApiMixin from .config import ConfigApiMixin
from .container import ContainerApiMixin from .container import ContainerApiMixin
@ -42,14 +19,23 @@ from .secret import SecretApiMixin
from .service import ServiceApiMixin from .service import ServiceApiMixin
from .swarm import SwarmApiMixin from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin from .volume import VolumeApiMixin
from .. import auth
from ..constants import (
DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
MINIMUM_DOCKER_API_VERSION
)
from ..errors import (
DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception
)
from ..tls import TLSConfig
from ..transport import SSLAdapter, UnixAdapter
from ..utils import utils, check_resource, update_headers, config
from ..utils.socket import frames_iter, socket_raw_iter
from ..utils.json_stream import json_stream
try: try:
from ..transport import NpipeHTTPAdapter from ..transport import NpipeAdapter
except ImportError:
pass
try:
from ..transport import SSHHTTPAdapter
except ImportError: except ImportError:
pass pass
@ -90,7 +76,7 @@ class APIClient(
base_url (str): URL to the Docker server. For example, base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35`` automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds. timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a ``True`` to enable it with default options, or pass a
@ -99,11 +85,6 @@ class APIClient(
user_agent (str): Set a custom user agent for requests to the server. user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the credstore_env (dict): Override environment variables when calling the
credential store process. credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
""" """
__attrs__ = requests.Session.__attrs__ + ['_auth_configs', __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@ -114,10 +95,9 @@ class APIClient(
def __init__(self, base_url=None, version=None, def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False, timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None, user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
credstore_env=None, use_ssh_client=False, credstore_env=None):
max_pool_size=DEFAULT_MAX_POOL_SIZE): super(APIClient, self).__init__()
super().__init__()
if tls and not base_url: if tls and not base_url:
raise TLSParameterError( raise TLSParameterError(
@ -129,31 +109,17 @@ class APIClient(
self.headers['User-Agent'] = user_agent self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config() self._general_configs = config.load_general_config()
proxy_config = self._general_configs.get('proxies', {})
try:
proxies = proxy_config[base_url]
except KeyError:
proxies = proxy_config.get('default', {})
self._proxy_configs = ProxyConfig.from_dict(proxies)
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config(
config_dict=self._general_configs, credstore_env=credstore_env, config_dict=self._general_configs
) )
self.credstore_env = credstore_env self.credstore_env = credstore_env
base_url = utils.parse_host( base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls) base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
) )
# SSH has a different default for num_pools to all other adapters
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
if base_url.startswith('http+unix://'): if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter( self._custom_adapter = UnixAdapter(
base_url, timeout, pool_connections=num_pools, base_url, timeout, pool_connections=num_pools
max_pool_size=max_pool_size
) )
self.mount('http+docker://', self._custom_adapter) self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://') self._unmount('http://', 'https://')
@ -166,70 +132,56 @@ class APIClient(
'The npipe:// protocol is only supported on Windows' 'The npipe:// protocol is only supported on Windows'
) )
try: try:
self._custom_adapter = NpipeHTTPAdapter( self._custom_adapter = NpipeAdapter(
base_url, timeout, pool_connections=num_pools, base_url, timeout, pool_connections=num_pools
max_pool_size=max_pool_size
) )
except NameError as err: except NameError:
raise DockerException( raise DockerException(
'Install pypiwin32 package to enable npipe:// support' 'Install pypiwin32 package to enable npipe:// support'
) from err )
self.mount('http+docker://', self._custom_adapter) self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe' self.base_url = 'http+docker://localnpipe'
elif base_url.startswith('ssh://'):
try:
self._custom_adapter = SSHHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size, shell_out=use_ssh_client
)
except NameError as err:
raise DockerException(
'Install paramiko package to enable ssh:// support'
) from err
self.mount('http+docker://ssh', self._custom_adapter)
self._unmount('http://', 'https://')
self.base_url = 'http+docker://ssh'
else: else:
# Use SSLAdapter for the ability to specify SSL version # Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig): if isinstance(tls, TLSConfig):
tls.configure_client(self) tls.configure_client(self)
elif tls: elif tls:
self._custom_adapter = requests.adapters.HTTPAdapter( self._custom_adapter = SSLAdapter(pool_connections=num_pools)
pool_connections=num_pools)
self.mount('https://', self._custom_adapter) self.mount('https://', self._custom_adapter)
self.base_url = base_url self.base_url = base_url
# version detection needs to be after unix adapter mounting # version detection needs to be after unix adapter mounting
if version is None or (isinstance( if version is None:
version, self._version = DEFAULT_DOCKER_API_VERSION
str elif isinstance(version, six.string_types):
) and version.lower() == 'auto'): if version.lower() == 'auto':
self._version = self._retrieve_server_version() self._version = self._retrieve_server_version()
else: else:
self._version = version self._version = version
if not isinstance(self._version, str): else:
raise DockerException( raise DockerException(
'Version parameter must be a string or None. ' 'Version parameter must be a string or None. Found {0}'.format(
f'Found {type(version).__name__}' type(version).__name__
)
) )
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION): if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
raise InvalidVersion( raise InvalidVersion(
f'API versions below {MINIMUM_DOCKER_API_VERSION} are ' 'API versions below {} are no longer supported by this '
f'no longer supported by this library.' 'library.'.format(MINIMUM_DOCKER_API_VERSION)
) )
def _retrieve_server_version(self): def _retrieve_server_version(self):
try: try:
return self.version(api_version=False)["ApiVersion"] return self.version(api_version=False)["ApiVersion"]
except KeyError as ke: except KeyError:
raise DockerException( raise DockerException(
'Invalid response from docker daemon: key "ApiVersion"' 'Invalid response from docker daemon: key "ApiVersion"'
' is missing.' ' is missing.'
) from ke )
except Exception as e: except Exception as e:
raise DockerException( raise DockerException(
f'Error while fetching server API version: {e}' 'Error while fetching server API version: {0}'.format(e)
) from e )
def _set_request_timeout(self, kwargs): def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout """Prepare the kwargs for an HTTP request by inserting the timeout
@ -255,26 +207,28 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs): def _url(self, pathfmt, *args, **kwargs):
for arg in args: for arg in args:
if not isinstance(arg, str): if not isinstance(arg, six.string_types):
raise ValueError( raise ValueError(
f'Expected a string but found {arg} ({type(arg)}) instead' 'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
) )
quote_f = partial(urllib.parse.quote, safe="/:") quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
args = map(quote_f, args) args = map(quote_f, args)
formatted_path = pathfmt.format(*args)
if kwargs.get('versioned_api', True): if kwargs.get('versioned_api', True):
return f'{self.base_url}/v{self._version}{formatted_path}' return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else: else:
return f'{self.base_url}{formatted_path}' return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response): def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred.""" """Raises stored :class:`APIError`, if one occurred."""
try: try:
response.raise_for_status() response.raise_for_status()
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as e:
raise create_api_error_from_http_exception(e) from e raise create_api_error_from_http_exception(e)
def _result(self, response, json=False, binary=False): def _result(self, response, json=False, binary=False):
assert not (json and binary) assert not (json and binary)
@ -291,7 +245,7 @@ class APIClient(
# so we do this disgusting thing here. # so we do this disgusting thing here.
data2 = {} data2 = {}
if data is not None and isinstance(data, dict): if data is not None and isinstance(data, dict):
for k, v in iter(data.items()): for k, v in six.iteritems(data):
if v is not None: if v is not None:
data2[k] = v data2[k] = v
elif data is not None: elif data is not None:
@ -319,27 +273,18 @@ class APIClient(
return self._create_websocket_connection(full_url) return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url): def _create_websocket_connection(self, url):
try:
import websocket
return websocket.create_connection(url) return websocket.create_connection(url)
except ImportError as ie:
raise DockerException(
'The `websocket-client` library is required '
'for using websocket connections. '
'You can install the `docker` library '
'with the [websocket] extra to install it.'
) from ie
def _get_raw_response_socket(self, response): def _get_raw_response_socket(self, response):
self._raise_for_status(response) self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe": if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'): elif six.PY3:
sock = response.raw._fp.fp.channel
else:
sock = response.raw._fp.fp.raw sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"): if self.base_url.startswith("https://"):
sock = sock._sock sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try: try:
# Keep a reference to the response to stop it being garbage # Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will # collected. If the response is garbage collected, it will
@ -357,7 +302,8 @@ class APIClient(
if response.raw._fp.chunked: if response.raw._fp.chunked:
if decode: if decode:
yield from json_stream(self._stream_helper(response, False)) for chunk in json_stream(self._stream_helper(response, False)):
yield chunk
else: else:
reader = response.raw reader = response.raw
while not reader.closed: while not reader.closed:
@ -413,38 +359,22 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True): def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data''' ''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response) self._raise_for_status(response)
for out in response.iter_content(chunk_size, decode):
yield out
# Disable timeout on the underlying socket to prevent def _read_from_socket(self, response, stream, tty=False):
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
"""Consume all data from the socket, close the response and return the
data. If stream=True, then a generator is returned instead and the
caller is responsible for closing the response.
"""
socket = self._get_raw_response_socket(response) socket = self._get_raw_response_socket(response)
gen = frames_iter(socket, tty) gen = None
if tty is False:
if demux: gen = frames_iter(socket)
# The generator will output tuples (stdout, stderr)
gen = (demux_adaptor(*frame) for frame in gen)
else: else:
# The generator will output strings gen = socket_raw_iter(socket)
gen = (data for (_, data) in gen)
if stream: if stream:
return gen return gen
else: else:
try: return six.binary_type().join(gen)
# Wait for all frames, concatenate them, and return the result
return consume_socket_output(gen, demux=demux)
finally:
response.close()
def _disable_socket_timeout(self, socket): def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're """ Depending on the combination of python version and whether we're
@ -490,12 +420,12 @@ class APIClient(
self._result(res, binary=True) self._result(res, binary=True)
self._raise_for_status(res) self._raise_for_status(res)
sep = b'' sep = six.binary_type()
if stream: if stream:
return self._multiplexed_response_stream_helper(res) return self._multiplexed_response_stream_helper(res)
else: else:
return sep.join( return sep.join(
list(self._multiplexed_buffer_helper(res)) [x for x in self._multiplexed_buffer_helper(res)]
) )
def _unmount(self, *args): def _unmount(self, *args):
@ -504,7 +434,7 @@ class APIClient(
def get_adapter(self, url): def get_adapter(self, url):
try: try:
return super().get_adapter(url) return super(APIClient, self).get_adapter(url)
except requests.exceptions.InvalidSchema as e: except requests.exceptions.InvalidSchema as e:
if self._custom_adapter: if self._custom_adapter:
return self._custom_adapter return self._custom_adapter
@ -522,11 +452,9 @@ class APIClient(
Args: Args:
dockercfg_path (str): Use a custom path for the Docker config file dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present, (default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``) otherwise``$HOME/.dockercfg``)
Returns: Returns:
None None
""" """
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config(dockercfg_path)
dockercfg_path, credstore_env=self.credstore_env
)

View File

@ -1,11 +1,13 @@
import base64 import base64
import six
from .. import utils from .. import utils
class ConfigApiMixin: class ConfigApiMixin(object):
@utils.minimum_version('1.30') @utils.minimum_version('1.30')
def create_config(self, name, data, labels=None, templating=None): def create_config(self, name, data, labels=None):
""" """
Create a config Create a config
@ -13,9 +15,6 @@ class ConfigApiMixin:
name (string): Name of the config name (string): Name of the config
data (bytes): Config data to be stored data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config labels (dict): A mapping of labels to assign to the config
templating (dict): dictionary containing the name of the
templating driver to be used expressed as
{ name: <templating_driver_name>}
Returns (dict): ID of the newly created config Returns (dict): ID of the newly created config
""" """
@ -23,12 +22,12 @@ class ConfigApiMixin:
data = data.encode('utf-8') data = data.encode('utf-8')
data = base64.b64encode(data) data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii') data = data.decode('ascii')
body = { body = {
'Data': data, 'Data': data,
'Name': name, 'Name': name,
'Labels': labels, 'Labels': labels
'Templating': templating
} }
url = self._url('/configs/create') url = self._url('/configs/create')
@ -43,7 +42,7 @@ class ConfigApiMixin:
Retrieve config metadata Retrieve config metadata
Args: Args:
id (string): Full ID of the config to inspect id (string): Full ID of the config to remove
Returns (dict): A dictionary of metadata Returns (dict): A dictionary of metadata

View File

@ -1,20 +1,19 @@
import six
from datetime import datetime from datetime import datetime
from .. import errors, utils from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..types import ( from ..types import (
CancellableStream, CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
ContainerConfig, NetworkingConfig
EndpointConfig,
HostConfig,
NetworkingConfig,
) )
class ContainerApiMixin: class ContainerApiMixin(object):
@utils.check_resource('container') @utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True, def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False, demux=False): stream=False, logs=False):
""" """
Attach to a container. Attach to a container.
@ -29,15 +28,11 @@ class ContainerApiMixin:
stream (bool): Return container output progressively as an iterator stream (bool): Return container output progressively as an iterator
of strings, rather than a single string. of strings, rather than a single string.
logs (bool): Include the container's previous output. logs (bool): Include the container's previous output.
demux (bool): Keep stdout and stderr separate.
Returns: Returns:
By default, the container's output as a single string (two if By default, the container's output as a single string.
``demux=True``: one for stdout and one for stderr).
If ``stream=True``, an iterator of output strings. If If ``stream=True``, an iterator of output strings.
``demux=True``, two iterators are returned: one for stdout and one
for stderr.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -59,7 +54,8 @@ class ContainerApiMixin:
response = self._post(u, headers=headers, params=params, stream=True) response = self._post(u, headers=headers, params=params, stream=True)
output = self._read_from_socket( output = self._read_from_socket(
response, stream, self._check_is_tty(container), demux=demux) response, stream, self._check_is_tty(container)
)
if stream: if stream:
return CancellableStream(output, response) return CancellableStream(output, response)
@ -113,7 +109,7 @@ class ContainerApiMixin:
@utils.check_resource('container') @utils.check_resource('container')
def commit(self, container, repository=None, tag=None, message=None, def commit(self, container, repository=None, tag=None, message=None,
author=None, pause=True, changes=None, conf=None): author=None, changes=None, conf=None):
""" """
Commit a container to an image. Similar to the ``docker commit`` Commit a container to an image. Similar to the ``docker commit``
command. command.
@ -124,7 +120,6 @@ class ContainerApiMixin:
tag (str): The tag to push tag (str): The tag to push
message (str): A commit message message (str): A commit message
author (str): The name of the author author (str): The name of the author
pause (bool): Whether to pause the container before committing
changes (str): Dockerfile instructions to apply while committing changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the conf (dict): The configuration for the container. See the
`Engine API documentation `Engine API documentation
@ -141,7 +136,6 @@ class ContainerApiMixin:
'tag': tag, 'tag': tag,
'comment': message, 'comment': message,
'author': author, 'author': author,
'pause': pause,
'changes': changes 'changes': changes
} }
u = self._url("/commit") u = self._url("/commit")
@ -175,8 +169,7 @@ class ContainerApiMixin:
- `exited` (int): Only containers with specified exit code - `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``, - `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited`` ``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"`` - `label` (str): format either ``"key"`` or ``"key=value"``
or a list of such.
- `id` (str): The id of the container. - `id` (str): The id of the container.
- `name` (str): The name of the container. - `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of - `ancestor` (str): Filter by container ancestor. Format of
@ -225,8 +218,7 @@ class ContainerApiMixin:
working_dir=None, domainname=None, host_config=None, working_dir=None, domainname=None, host_config=None,
mac_address=None, labels=None, stop_signal=None, mac_address=None, labels=None, stop_signal=None,
networking_config=None, healthcheck=None, networking_config=None, healthcheck=None,
stop_timeout=None, runtime=None, stop_timeout=None, runtime=None):
use_config_proxy=True, platform=None):
""" """
Creates a container. Parameters are similar to those for the ``docker Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``). run`` command except it doesn't support the attach options (``-a``).
@ -245,9 +237,9 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
container_id = client.api.create_container( container_id = cli.create_container(
'busybox', 'ls', ports=[1111, 2222], 'busybox', 'ls', ports=[1111, 2222],
host_config=client.api.create_host_config(port_bindings={ host_config=cli.create_host_config(port_bindings={
1111: 4567, 1111: 4567,
2222: None 2222: None
}) })
@ -259,24 +251,22 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
client.api.create_host_config( cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
port_bindings={1111: ('127.0.0.1', 4567)}
)
Or without host port assignment: Or without host port assignment:
.. code-block:: python .. code-block:: python
client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)}) cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config: ports as such in both the config and host config:
.. code-block:: python .. code-block:: python
container_id = client.api.create_container( container_id = cli.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222], 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
host_config=client.api.create_host_config(port_bindings={ host_config=cli.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None '1111/udp': 4567, 2222: None
}) })
) )
@ -286,7 +276,7 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
client.api.create_host_config(port_bindings={ cli.create_host_config(port_bindings={
1111: [1234, 4567] 1111: [1234, 4567]
}) })
@ -294,7 +284,7 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
client.api.create_host_config(port_bindings={ cli.create_host_config(port_bindings={
1111: [ 1111: [
('192.168.0.100', 1234), ('192.168.0.100', 1234),
('192.168.0.101', 1234) ('192.168.0.101', 1234)
@ -310,9 +300,9 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
container_id = client.api.create_container( container_id = cli.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=client.api.create_host_config(binds={ host_config=cli.create_host_config(binds={
'/home/user1/': { '/home/user1/': {
'bind': '/mnt/vol2', 'bind': '/mnt/vol2',
'mode': 'rw', 'mode': 'rw',
@ -320,11 +310,6 @@ class ContainerApiMixin:
'/var/www': { '/var/www': {
'bind': '/mnt/vol1', 'bind': '/mnt/vol1',
'mode': 'ro', 'mode': 'ro',
},
'/autofs/user1': {
'bind': '/mnt/vol3',
'mode': 'rw',
'propagation': 'shared'
} }
}) })
) )
@ -334,12 +319,11 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
container_id = client.api.create_container( container_id = cli.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2', '/mnt/vol3'], 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=client.api.create_host_config(binds=[ host_config=cli.create_host_config(binds=[
'/home/user1/:/mnt/vol2', '/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro', '/var/www:/mnt/vol1:ro',
'/autofs/user1:/mnt/vol3:rw,shared',
]) ])
) )
@ -355,15 +339,15 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
networking_config = client.api.create_networking_config({ networking_config = docker_client.create_networking_config({
'network1': client.api.create_endpoint_config( 'network1': docker_client.create_endpoint_config(
ipv4_address='172.28.0.124', ipv4_address='172.28.0.124',
aliases=['foo', 'bar'], aliases=['foo', 'bar'],
links=['container2'] links=['container2']
) )
}) })
ctnr = client.api.create_container( ctnr = docker_client.create_container(
img, command, networking_config=networking_config img, command, networking_config=networking_config
) )
@ -403,11 +387,6 @@ class ContainerApiMixin:
runtime (str): Runtime to use with this container. runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the healthcheck (dict): Specify a test to perform to check that the
container is healthy. container is healthy.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being created.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Returns: Returns:
A dictionary with an image 'Id' key and a 'Warnings' key. A dictionary with an image 'Id' key and a 'Warnings' key.
@ -418,17 +397,9 @@ class ContainerApiMixin:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
if isinstance(volumes, str): if isinstance(volumes, six.string_types):
volumes = [volumes, ] volumes = [volumes, ]
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
if use_config_proxy:
environment = self._proxy_configs.inject_proxy_environment(
environment
) or None
config = self.create_container_config( config = self.create_container_config(
image, command, hostname, user, detach, stdin_open, tty, image, command, hostname, user, detach, stdin_open, tty,
ports, environment, volumes, ports, environment, volumes,
@ -437,22 +408,16 @@ class ContainerApiMixin:
stop_signal, networking_config, healthcheck, stop_signal, networking_config, healthcheck,
stop_timeout, runtime stop_timeout, runtime
) )
return self.create_container_from_config(config, name, platform) return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs): def create_container_config(self, *args, **kwargs):
return ContainerConfig(self._version, *args, **kwargs) return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None, platform=None): def create_container_from_config(self, config, name=None):
u = self._url("/containers/create") u = self._url("/containers/create")
params = { params = {
'name': name 'name': name
} }
if platform:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'platform is not supported for API version < 1.41'
)
params['platform'] = platform
res = self._post_json(u, data=config, params=params) res = self._post_json(u, data=config, params=params)
return self._result(res, True) return self._result(res, True)
@ -496,33 +461,34 @@ class ContainerApiMixin:
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container. node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers. dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains. dns_search (:py:class:`list`): DNS search domains.
extra_hosts (dict): Additional hostnames to resolve inside the extra_hosts (dict): Addtional hostnames to resolve inside the
container, as a mapping of hostname to IP address. container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as. IDs that the container process will run as.
init (bool): Run an init inside the container that forwards init (bool): Run an init inside the container that forwards
signals and reaps processes signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container. ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: ``None``. isolation (str): Isolation technology to use. Default: `None`.
links (dict): Mapping of links using the links (dict or list of tuples): Either a dictionary mapping name
``{'container': 'alias'}`` format. The alias is optional. to alias or as a list of ``(name, alias)`` tuples.
Containers declared in this dict will be linked to the new log_config (dict): Logging configuration, as a dictionary with
container using the provided alias. Default: ``None``. keys:
log_config (LogConfig): Logging configuration
- ``type`` The logging driver name.
- ``config`` A dictionary of configuration for the logging
driver.
lxc_conf (dict): LXC config. lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in (which represent the memory limit of the created container in
bytes) or a string with a units identification char bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an specified without a units character, bytes are assumed as an
mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100. behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a memswap_limit (str or int): Maximum amount of memory + swap a
@ -534,13 +500,11 @@ class ContainerApiMixin:
network_mode (str): One of: network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on - ``bridge`` Create a new network stack for the container on
the bridge network. on the bridge network.
- ``none`` No networking for this container. - ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network - ``container:<name|id>`` Reuse another container's network
stack. stack.
- ``host`` Use the host network stack. - ``host`` Use the host network stack.
This mode is incompatible with ``port_bindings``.
oom_kill_disable (bool): Whether to disable OOM killer. oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences. to the container in order to tune OOM killer preferences.
@ -550,7 +514,6 @@ class ContainerApiMixin:
unlimited. unlimited.
port_bindings (dict): See :py:meth:`create_container` port_bindings (dict): See :py:meth:`create_container`
for more information. for more information.
Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container. privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host. publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read read_only (bool): Mount the container's root filesystem as read
@ -580,12 +543,10 @@ class ContainerApiMixin:
} }
ulimits (:py:class:`list`): Ulimits to set inside the container, ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances. as a list of dicts.
userns_mode (str): Sets the user namespace mode for the container userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported when user namespace remapping option is enabled. Supported
values are: ``host`` values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from. get volumes from.
runtime (str): Runtime to use with this container. runtime (str): Runtime to use with this container.
@ -597,11 +558,8 @@ class ContainerApiMixin:
Example: Example:
>>> client.api.create_host_config( >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
... privileged=True, volumes_from=['nostalgic_newton'])
... cap_drop=['MKNOD'],
... volumes_from=['nostalgic_newton'],
... )
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
@ -631,11 +589,11 @@ class ContainerApiMixin:
Example: Example:
>>> client.api.create_network('network1') >>> docker_client.create_network('network1')
>>> networking_config = client.api.create_networking_config({ >>> networking_config = docker_client.create_networking_config({
'network1': client.api.create_endpoint_config() 'network1': docker_client.create_endpoint_config()
}) })
>>> container = client.api.create_container( >>> container = docker_client.create_container(
img, command, networking_config=networking_config img, command, networking_config=networking_config
) )
@ -651,27 +609,24 @@ class ContainerApiMixin:
aliases (:py:class:`list`): A list of aliases for this endpoint. aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the Names in that list can be used within the network to reach the
container. Defaults to ``None``. container. Defaults to ``None``.
links (dict): Mapping of links for this endpoint using the links (:py:class:`list`): A list of links for this endpoint.
``{'container': 'alias'}`` format. The alias is optional. Containers declared in this list will be linked to this
Containers declared in this dict will be linked to this container. Defaults to ``None``.
container using the provided alias. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``. network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``. network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses. addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Returns: Returns:
(dict) An endpoint config. (dict) An endpoint config.
Example: Example:
>>> endpoint_config = client.api.create_endpoint_config( >>> endpoint_config = client.create_endpoint_config(
aliases=['web', 'app'], aliases=['web', 'app'],
links={'app_db': 'db', 'another': None}, links=['app_db'],
ipv4_address='132.65.0.123' ipv4_address='132.65.0.123'
) )
@ -687,8 +642,7 @@ class ContainerApiMixin:
container (str): The container to diff container (str): The container to diff
Returns: Returns:
(list) A list of dictionaries containing the attributes `Path` (str)
and `Kind`.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -722,8 +676,7 @@ class ContainerApiMixin:
return self._stream_raw_result(res, chunk_size, False) return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container') @utils.check_resource('container')
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
encode_stream=False):
""" """
Retrieve a file or folder from a container in the form of a tar Retrieve a file or folder from a container in the form of a tar
archive. archive.
@ -734,8 +687,6 @@ class ContainerApiMixin:
chunk_size (int): The number of bytes returned by each iteration chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns: Returns:
(tuple): First element is a raw tar data stream. Second element is (tuple): First element is a raw tar data stream. Second element is
@ -744,29 +695,12 @@ class ContainerApiMixin:
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
Example:
>>> c = docker.APIClient()
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = c.api.get_archive(container, '/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
""" """
params = { params = {
'path': path 'path': path
} }
headers = {
"Accept-Encoding": "gzip, deflate"
} if encode_stream else {
"Accept-Encoding": "identity"
}
url = self._url('/containers/{0}/archive', container) url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True, headers=headers) res = self._get(url, params=params, stream=True)
self._raise_for_status(res) self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat') encoded_stat = res.headers.get('x-docker-container-path-stat')
return ( return (
@ -810,7 +744,7 @@ class ContainerApiMixin:
url = self._url("/containers/{0}/kill", container) url = self._url("/containers/{0}/kill", container)
params = {} params = {}
if signal is not None: if signal is not None:
if not isinstance(signal, str): if not isinstance(signal, six.string_types):
signal = int(signal) signal = int(signal)
params['signal'] = signal params['signal'] = signal
res = self._post(url, params=params) res = self._post(url, params=params)
@ -829,22 +763,21 @@ class ContainerApiMixin:
Args: Args:
container (str): The container to get logs from container (str): The container to get logs from
stdout (bool): Get ``STDOUT``. Default ``True`` stdout (bool): Get ``STDOUT``
stderr (bool): Get ``STDERR``. Default ``True`` stderr (bool): Get ``STDERR``
stream (bool): Stream the response. Default ``False`` stream (bool): Stream the response
timestamps (bool): Show timestamps. Default ``False`` timestamps (bool): Show timestamps
tail (str or int): Output specified number of lines at the end of tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string logs. Either an integer of number of lines or the string
``all``. Default ``all`` ``all``. Default ``all``
since (datetime, int, or float): Show logs since a given datetime, since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds) or float (in fractional seconds) integer epoch (in seconds)
follow (bool): Follow log output. Default ``False`` follow (bool): Follow log output
until (datetime, int, or float): Show logs that occurred before until (datetime or int): Show logs that occurred before the given
the given datetime, integer epoch (in seconds), or datetime or integer epoch (in seconds)
float (in fractional seconds)
Returns: Returns:
(generator of bytes or bytes) (generator or str)
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -866,12 +799,10 @@ class ContainerApiMixin:
params['since'] = utils.datetime_to_timestamp(since) params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and since > 0): elif (isinstance(since, int) and since > 0):
params['since'] = since params['since'] = since
elif (isinstance(since, float) and since > 0.0):
params['since'] = since
else: else:
raise errors.InvalidArgument( raise errors.InvalidArgument(
'since value should be datetime or positive int/float,' 'since value should be datetime or positive int, '
f' not {type(since)}' 'not {}'.format(type(since))
) )
if until is not None: if until is not None:
@ -883,12 +814,10 @@ class ContainerApiMixin:
params['until'] = utils.datetime_to_timestamp(until) params['until'] = utils.datetime_to_timestamp(until)
elif (isinstance(until, int) and until > 0): elif (isinstance(until, int) and until > 0):
params['until'] = until params['until'] = until
elif (isinstance(until, float) and until > 0.0):
params['until'] = until
else: else:
raise errors.InvalidArgument( raise errors.InvalidArgument(
f'until value should be datetime or positive int/float, ' 'until value should be datetime or positive int, '
f'not {type(until)}' 'not {}'.format(type(until))
) )
url = self._url("/containers/{0}/logs", container) url = self._url("/containers/{0}/logs", container)
@ -941,7 +870,7 @@ class ContainerApiMixin:
.. code-block:: python .. code-block:: python
>>> client.api.port('7174d6347063', 80) >>> cli.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}] [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
""" """
res = self._get(self._url("/containers/{0}/json", container)) res = self._get(self._url("/containers/{0}/json", container))
@ -959,10 +888,9 @@ class ContainerApiMixin:
if '/' in private_port: if '/' in private_port:
return port_settings.get(private_port) return port_settings.get(private_port)
for protocol in ['tcp', 'udp', 'sctp']: h_ports = port_settings.get(private_port + '/tcp')
h_ports = port_settings.get(f"{private_port}/{protocol}") if h_ports is None:
if h_ports: h_ports = port_settings.get(private_port + '/udp')
break
return h_ports return h_ports
@ -976,7 +904,7 @@ class ContainerApiMixin:
container (str): The container where the file(s) will be extracted container (str): The container where the file(s) will be extracted
path (str): Path inside the container where the file(s) will be path (str): Path inside the container where the file(s) will be
extracted. Must exist. extracted. Must exist.
data (bytes or stream): tar data to be extracted data (bytes): tar data to be extracted
Returns: Returns:
(bool): True if the call succeeds. (bool): True if the call succeeds.
@ -1120,10 +1048,10 @@ class ContainerApiMixin:
Example: Example:
>>> container = client.api.create_container( >>> container = cli.create_container(
... image='busybox:latest', ... image='busybox:latest',
... command='/bin/sleep 30') ... command='/bin/sleep 30')
>>> client.api.start(container=container.get('Id')) >>> cli.start(container=container.get('Id'))
""" """
if args or kwargs: if args or kwargs:
raise errors.DeprecatedMethod( raise errors.DeprecatedMethod(
@ -1136,7 +1064,7 @@ class ContainerApiMixin:
self._raise_for_status(res) self._raise_for_status(res)
@utils.check_resource('container') @utils.check_resource('container')
def stats(self, container, decode=None, stream=True, one_shot=None): def stats(self, container, decode=None, stream=True):
""" """
Stream statistics for a specific container. Similar to the Stream statistics for a specific container. Similar to the
``docker stats`` command. ``docker stats`` command.
@ -1144,13 +1072,9 @@ class ContainerApiMixin:
Args: Args:
container (str): The container to stream statistics from container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True. on the fly. False by default.
False by default.
stream (bool): If set to false, only the current stats will be stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default. returned instead of a stream. True by default.
one_shot (bool): If set to true, Only get a single stat instead of
waiting for 2 cycles. Must be used with stream=false. False by
default.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -1158,30 +1082,12 @@ class ContainerApiMixin:
""" """
url = self._url("/containers/{0}/stats", container) url = self._url("/containers/{0}/stats", container)
params = {
'stream': stream
}
if one_shot is not None:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'one_shot is not supported for API version < 1.41'
)
params['one-shot'] = one_shot
if stream: if stream:
if one_shot: return self._stream_helper(self._get(url, stream=True),
raise errors.InvalidArgument( decode=decode)
'one_shot is only available in conjunction with '
'stream=False'
)
return self._stream_helper(
self._get(url, stream=True, params=params), decode=decode
)
else: else:
if decode: return self._result(self._get(url, params={'stream': False}),
raise errors.InvalidArgument( json=True)
"decode is only available in conjunction with stream=True"
)
return self._result(self._get(url, params=params), json=True)
@utils.check_resource('container') @utils.check_resource('container')
def stop(self, container, timeout=None): def stop(self, container, timeout=None):
@ -1264,8 +1170,8 @@ class ContainerApiMixin:
cpu_shares (int): CPU shares (relative weight) cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution cpuset_mems (str): MEMs in which to allow execution
mem_limit (float or str): Memory limit mem_limit (int or str): Memory limit
mem_reservation (float or str): Memory soft limit mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap disable swap
kernel_memory (int or str): Kernel memory limit kernel_memory (int or str): Kernel memory limit

View File

@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils from .. import auth, types, utils
class DaemonApiMixin: class DaemonApiMixin(object):
@utils.minimum_version('1.25') @utils.minimum_version('1.25')
def df(self): def df(self):
""" """
@ -42,8 +42,8 @@ class DaemonApiMixin:
Example: Example:
>>> for event in client.events(decode=True) >>> for event in client.events()
... print(event) ... print event
{u'from': u'image/with:tag', {u'from': u'image/with:tag',
u'id': u'container-id', u'id': u'container-id',
u'status': u'start', u'status': u'start',
@ -54,7 +54,7 @@ class DaemonApiMixin:
>>> events = client.events() >>> events = client.events()
>>> for event in events: >>> for event in events:
... print(event) ... print event
>>> # and cancel from another thread >>> # and cancel from another thread
>>> events.close() >>> events.close()
""" """
@ -109,7 +109,7 @@ class DaemonApiMixin:
the Docker server. the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present, (default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``) otherwise``$HOME/.dockercfg``)
Returns: Returns:
(dict): The response from the login request (dict): The response from the login request
@ -124,15 +124,13 @@ class DaemonApiMixin:
# If dockercfg_path is passed check to see if the config file exists, # If dockercfg_path is passed check to see if the config file exists,
# if so load that config. # if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path): if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config(dockercfg_path)
dockercfg_path, credstore_env=self.credstore_env elif not self._auth_configs:
) self._auth_configs = auth.load_config()
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry) authcfg = auth.resolve_authconfig(
self._auth_configs, registry, credstore_env=self.credstore_env,
)
# If we found an existing auth config for this registry and username # If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested. # combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \ if authcfg and authcfg.get('username', None) == username \
@ -148,7 +146,9 @@ class DaemonApiMixin:
response = self._post_json(self._url('/auth'), data=req_data) response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200: if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) if 'auths' not in self._auth_configs:
self._auth_configs['auths'] = {}
self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True) return self._result(response, json=True)
def ping(self): def ping(self):

View File

@ -1,8 +1,10 @@
from .. import errors, utils import six
from ..types import CancellableStream
from .. import errors
from .. import utils
class ExecApiMixin: class ExecApiMixin(object):
@utils.check_resource('container') @utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True, def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='', stdin=False, tty=False, privileged=False, user='',
@ -43,7 +45,7 @@ class ExecApiMixin:
'Setting environment for exec is not supported in API < 1.25' 'Setting environment for exec is not supported in API < 1.25'
) )
if isinstance(cmd, str): if isinstance(cmd, six.string_types):
cmd = utils.split_command(cmd) cmd = utils.split_command(cmd)
if isinstance(environment, dict): if isinstance(environment, dict):
@ -116,7 +118,7 @@ class ExecApiMixin:
@utils.check_resource('exec_id') @utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False, def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False, demux=False): socket=False):
""" """
Start a previously set up exec instance. Start a previously set up exec instance.
@ -125,19 +127,14 @@ class ExecApiMixin:
detach (bool): If true, detach from the exec command. detach (bool): If true, detach from the exec command.
Default: False Default: False
tty (bool): Allocate a pseudo-TTY. Default: False tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Return response data progressively as an iterator stream (bool): Stream response data. Default: False
of strings, rather than a single string.
socket (bool): Return the connection socket to allow custom socket (bool): Return the connection socket to allow custom
read/write operations. Must be closed by the caller when done. read/write operations.
demux (bool): Return stdout and stderr separately
Returns: Returns:
(generator or str): If ``stream=True``, a generator yielding
(generator or str or tuple): If ``stream=True``, a generator response chunks. If ``socket=True``, a socket object for the
yielding response chunks. If ``socket=True``, a socket object for connection. A string containing response data otherwise.
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -162,15 +159,7 @@ class ExecApiMixin:
stream=True stream=True
) )
if detach: if detach:
try:
return self._result(res) return self._result(res)
finally:
res.close()
if socket: if socket:
return self._get_raw_response_socket(res) return self._get_raw_response_socket(res)
return self._read_from_socket(res, stream, tty)
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
if stream:
return CancellableStream(output, res)
else:
return output

View File

@ -1,13 +1,15 @@
import logging import logging
import os import os
import six
from .. import auth, errors, utils from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class ImageApiMixin: class ImageApiMixin(object):
@utils.check_resource('image') @utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE): def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@ -29,8 +31,8 @@ class ImageApiMixin:
Example: Example:
>>> image = client.api.get_image("busybox:latest") >>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb') >>> f = open('/tmp/busybox-latest.tar', 'w')
>>> for chunk in image: >>> for chunk in image:
>>> f.write(chunk) >>> f.write(chunk)
>>> f.close() >>> f.close()
@ -47,7 +49,7 @@ class ImageApiMixin:
image (str): The image to show history for image (str): The image to show history for
Returns: Returns:
(list): The history of the image (str): The history of the image
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -68,8 +70,7 @@ class ImageApiMixin:
filters (dict): Filters to be processed on the image list. filters (dict): Filters to be processed on the image list.
Available filters: Available filters:
- ``dangling`` (bool) - ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"`` - ``label`` (str): format either ``key`` or ``key=value``
or a list of such.
Returns: Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict. (dict or list): A list if ``quiet=True``, otherwise a dict.
@ -79,18 +80,10 @@ class ImageApiMixin:
If the server returns an error. If the server returns an error.
""" """
params = { params = {
'filter': name,
'only_ids': 1 if quiet else 0, 'only_ids': 1 if quiet else 0,
'all': 1 if all else 0, 'all': 1 if all else 0,
} }
if name:
if utils.version_lt(self._version, '1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
if filters:
filters['reference'] = name
else:
filters = {'reference': name}
if filters: if filters:
params['filters'] = utils.convert_filters(filters) params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params), res = self._result(self._get(self._url("/images/json"), params=params),
@ -128,7 +121,7 @@ class ImageApiMixin:
params = _import_image_params( params = _import_image_params(
repository, tag, image, repository, tag, image,
src=(src if isinstance(src, str) else None), src=(src if isinstance(src, six.string_types) else None),
changes=changes changes=changes
) )
headers = {'Content-Type': 'application/tar'} headers = {'Content-Type': 'application/tar'}
@ -137,7 +130,7 @@ class ImageApiMixin:
return self._result( return self._result(
self._post(u, data=None, params=params) self._post(u, data=None, params=params)
) )
elif isinstance(src, str): # from file path elif isinstance(src, six.string_types): # from file path
with open(src, 'rb') as f: with open(src, 'rb') as f:
return self._result( return self._result(
self._post( self._post(
@ -254,15 +247,12 @@ class ImageApiMixin:
@utils.minimum_version('1.30') @utils.minimum_version('1.30')
@utils.check_resource('image') @utils.check_resource('image')
def inspect_distribution(self, image, auth_config=None): def inspect_distribution(self, image):
""" """
Get image digest and platform information by contacting the registry. Get image digest and platform information by contacting the registry.
Args: Args:
image (str): The image name to inspect image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns: Returns:
(dict): A dict containing distribution data (dict): A dict containing distribution data
@ -271,21 +261,9 @@ class ImageApiMixin:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
registry, _ = auth.resolve_repository_name(image)
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
url = self._url("/distribution/{0}/json", image)
return self._result( return self._result(
self._get(url, headers=headers), True self._get(self._url("/distribution/{0}/json", image)), True
) )
def load_image(self, data, quiet=None): def load_image(self, data, quiet=None):
@ -349,24 +327,21 @@ class ImageApiMixin:
return self._result(self._post(url, params=params), True) return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None, def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None, all_tags=False): decode=False, platform=None):
""" """
Pulls an image. Similar to the ``docker pull`` command. Pulls an image. Similar to the ``docker pull`` command.
Args: Args:
repository (str): The repository to pull repository (str): The repository to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it tag (str): The tag to pull
is set to ``latest``. stream (bool): Stream the output as a generator
stream (bool): Stream the output as a generator. Make sure to auth_config (dict): Override the credentials that
consume the generator, otherwise pull might get cancelled. :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
auth_config (dict): Override the credentials that are found in the this request. ``auth_config`` should contain the ``username``
config for this request. ``auth_config`` should contain the and ``password`` keys to be valid.
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts. decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True`` Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]`` platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns: Returns:
(generator or str): The output (generator or str): The output
@ -377,9 +352,8 @@ class ImageApiMixin:
Example: Example:
>>> resp = client.api.pull('busybox', stream=True, decode=True) >>> for line in cli.pull('busybox', stream=True):
... for line in resp: ... print(json.dumps(json.loads(line), indent=4))
... print(json.dumps(line, indent=4))
{ {
"status": "Pulling image (latest) from busybox", "status": "Pulling image (latest) from busybox",
"progressDetail": {}, "progressDetail": {},
@ -392,12 +366,8 @@ class ImageApiMixin:
} }
""" """
repository, image_tag = utils.parse_repository_tag(repository) if not tag:
tag = tag or image_tag or 'latest' repository, tag = utils.parse_repository_tag(repository)
if all_tags:
tag = None
registry, repo_name = auth.resolve_repository_name(repository) registry, repo_name = auth.resolve_repository_name(repository)
params = { params = {
@ -443,9 +413,10 @@ class ImageApiMixin:
repository (str): The repository to push to repository (str): The repository to push to
tag (str): An optional tag to push tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the auth_config (dict): Override the credentials that
config for this request. ``auth_config`` should contain the :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
``username`` and ``password`` keys to be valid. this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts. decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True`` Only applies with ``stream=True``
@ -457,17 +428,12 @@ class ImageApiMixin:
If the server returns an error. If the server returns an error.
Example: Example:
>>> resp = client.api.push( >>> for line in cli.push('yourname/app', stream=True):
... 'yourname/app', ... print line
... stream=True, {"status":"Pushing repository yourname/app (1 tags)"}
... decode=True, {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
... ) {"status":"Image already pushed, skipping","progressDetail":{},
... for line in resp: "id":"511136ea3c5a"}
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
... ...
""" """
@ -513,14 +479,13 @@ class ImageApiMixin:
res = self._delete(self._url("/images/{0}", image), params=params) res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True) return self._result(res, True)
def search(self, term, limit=None): def search(self, term):
""" """
Search for images on Docker Hub. Similar to the ``docker search`` Search for images on Docker Hub. Similar to the ``docker search``
command. command.
Args: Args:
term (str): A term to search for. term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns: Returns:
(list of dicts): The response of the search. (list of dicts): The response of the search.
@ -529,12 +494,8 @@ class ImageApiMixin:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
params = {'term': term}
if limit is not None:
params['limit'] = limit
return self._result( return self._result(
self._get(self._url("/images/search"), params=params), self._get(self._url("/images/search"), params={'term': term}),
True True
) )
@ -558,7 +519,7 @@ class ImageApiMixin:
Example: Example:
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest', >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True) force=True)
""" """
params = { params = {
@ -575,7 +536,7 @@ class ImageApiMixin:
def is_file(src): def is_file(src):
try: try:
return ( return (
isinstance(src, str) and isinstance(src, six.string_types) and
os.path.isfile(src) os.path.isfile(src)
) )
except TypeError: # a data string will make isfile() raise a TypeError except TypeError: # a data string will make isfile() raise a TypeError

View File

@ -1,12 +1,13 @@
from .. import utils
from ..errors import InvalidVersion from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version, version_lt from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin: class NetworkApiMixin(object):
def networks(self, names=None, ids=None, filters=None): def networks(self, names=None, ids=None, filters=None):
""" """
List networks. Similar to the ``docker network ls`` command. List networks. Similar to the ``docker networks ls`` command.
Args: Args:
names (:py:class:`list`): List of names to filter by names (:py:class:`list`): List of names to filter by
@ -14,8 +15,7 @@ class NetworkApiMixin:
filters (dict): Filters to be processed on the network list. filters (dict): Filters to be processed on the network list.
Available filters: Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver. - ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of - ``label=[<key>]`` or ``label=[<key>=<value>]``.
such.
- ``type=["custom"|"builtin"]`` Filters networks by type. - ``type=["custom"|"builtin"]`` Filters networks by type.
Returns: Returns:
@ -74,7 +74,7 @@ class NetworkApiMixin:
Example: Example:
A network using the bridge driver: A network using the bridge driver:
>>> client.api.create_network("network1", driver="bridge") >>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to configurations. For example, setting the subnet to
@ -89,7 +89,7 @@ class NetworkApiMixin:
>>> ipam_config = docker.types.IPAMConfig( >>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool] pool_configs=[ipam_pool]
) )
>>> client.api.create_network("network1", driver="bridge", >>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config) ipam=ipam_config)
""" """
if options is not None and not isinstance(options, dict): if options is not None and not isinstance(options, dict):
@ -215,8 +215,7 @@ class NetworkApiMixin:
def connect_container_to_network(self, container, net_id, def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None, ipv4_address=None, ipv6_address=None,
aliases=None, links=None, aliases=None, links=None,
link_local_ips=None, driver_opt=None, link_local_ips=None):
mac_address=None):
""" """
Connect a container to a network. Connect a container to a network.
@ -235,16 +234,12 @@ class NetworkApiMixin:
network, using the IPv6 protocol. Defaults to ``None``. network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses. (IPv4/IPv6) addresses.
mac_address (str): The MAC address of this container on the
network. Defaults to ``None``.
""" """
data = { data = {
"Container": container, "Container": container,
"EndpointConfig": self.create_endpoint_config( "EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address, aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips, ipv6_address=ipv6_address, link_local_ips=link_local_ips
driver_opt=driver_opt,
mac_address=mac_address
), ),
} }

View File

@ -1,7 +1,9 @@
import six
from .. import auth, utils from .. import auth, utils
class PluginApiMixin: class PluginApiMixin(object):
@utils.minimum_version('1.25') @utils.minimum_version('1.25')
@utils.check_resource('name') @utils.check_resource('name')
def configure_plugin(self, name, options): def configure_plugin(self, name, options):
@ -19,7 +21,7 @@ class PluginApiMixin:
url = self._url('/plugins/{0}/set', name) url = self._url('/plugins/{0}/set', name)
data = options data = options
if isinstance(data, dict): if isinstance(data, dict):
data = [f'{k}={v}' for k, v in data.items()] data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
res = self._post_json(url, data=data) res = self._post_json(url, data=data)
self._raise_for_status(res) self._raise_for_status(res)
return True return True
@ -51,20 +53,19 @@ class PluginApiMixin:
return True return True
@utils.minimum_version('1.25') @utils.minimum_version('1.25')
def disable_plugin(self, name, force=False): def disable_plugin(self, name):
""" """
Disable an installed plugin. Disable an installed plugin.
Args: Args:
name (string): The name of the plugin. The ``:latest`` tag is name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted. optional, and is the default if omitted.
force (bool): To enable the force query parameter.
Returns: Returns:
``True`` if successful ``True`` if successful
""" """
url = self._url('/plugins/{0}/disable', name) url = self._url('/plugins/{0}/disable', name)
res = self._post(url, params={'force': force}) res = self._post(url)
self._raise_for_status(res) self._raise_for_status(res)
return True return True

View File

@ -1,9 +1,12 @@
import base64 import base64
from .. import errors, utils import six
from .. import errors
from .. import utils
class SecretApiMixin: class SecretApiMixin(object):
@utils.minimum_version('1.25') @utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None): def create_secret(self, name, data, labels=None, driver=None):
""" """
@ -22,6 +25,7 @@ class SecretApiMixin:
data = data.encode('utf-8') data = data.encode('utf-8')
data = base64.b64encode(data) data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii') data = data.decode('ascii')
body = { body = {
'Data': data, 'Data': data,
@ -49,7 +53,7 @@ class SecretApiMixin:
Retrieve secret metadata Retrieve secret metadata
Args: Args:
id (string): Full ID of the secret to inspect id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata Returns (dict): A dictionary of metadata

View File

@ -2,12 +2,13 @@ from .. import auth, errors, utils
from ..types import ServiceMode from ..types import ServiceMode
def _check_api_features(version, task_template, update_config, endpoint_spec, def _check_api_features(version, task_template, update_config, endpoint_spec):
rollback_config):
def raise_version_error(param, min_version): def raise_version_error(param, min_version):
raise errors.InvalidVersion( raise errors.InvalidVersion(
f'{param} is not supported in API version < {min_version}' '{} is not supported in API version < {}'.format(
param, min_version
)
) )
if update_config is not None: if update_config is not None:
@ -17,24 +18,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
if 'Monitor' in update_config: if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25') raise_version_error('UpdateConfig.monitor', '1.25')
if utils.version_lt(version, '1.28'):
if update_config.get('FailureAction') == 'rollback':
raise_version_error(
'UpdateConfig.failure_action rollback', '1.28'
)
if utils.version_lt(version, '1.29'): if utils.version_lt(version, '1.29'):
if 'Order' in update_config: if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29') raise_version_error('UpdateConfig.order', '1.29')
if rollback_config is not None:
if utils.version_lt(version, '1.28'):
raise_version_error('rollback_config', '1.28')
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('RollbackConfig.order', '1.29')
if endpoint_spec is not None: if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec: if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']): if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
@ -86,10 +73,6 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
if container_spec.get('Isolation') is not None: if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35') raise_version_error('ContainerSpec.isolation', '1.35')
if utils.version_lt(version, '1.38'):
if container_spec.get('Init') is not None:
raise_version_error('ContainerSpec.init', '1.38')
if task_template.get('Resources'): if task_template.get('Resources'):
if utils.version_lt(version, '1.32'): if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'): if task_template['Resources'].get('GenericResources'):
@ -111,12 +94,12 @@ def _merge_task_template(current, override):
return merged return merged
class ServiceApiMixin: class ServiceApiMixin(object):
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def create_service( def create_service(
self, task_template, name=None, labels=None, mode=None, self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None, update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None, rollback_config=None endpoint_spec=None
): ):
""" """
Create a service. Create a service.
@ -131,11 +114,8 @@ class ServiceApiMixin:
or global). Defaults to replicated. or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None`` of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback networks (:py:class:`list`): List of network names or IDs to attach
strategy of the service. Default: ``None`` the service to. Default: ``None``.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``. access and load balance a service. Default: ``None``.
@ -149,8 +129,7 @@ class ServiceApiMixin:
""" """
_check_api_features( _check_api_features(
self._version, task_template, update_config, endpoint_spec, self._version, task_template, update_config, endpoint_spec
rollback_config
) )
url = self._url('/services/create') url = self._url('/services/create')
@ -181,9 +160,6 @@ class ServiceApiMixin:
if update_config is not None: if update_config is not None:
data['UpdateConfig'] = update_config data['UpdateConfig'] = update_config
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
return self._result( return self._result(
self._post_json(url, data=data, headers=headers), True self._post_json(url, data=data, headers=headers), True
) )
@ -200,8 +176,7 @@ class ServiceApiMixin:
into the service inspect output. into the service inspect output.
Returns: Returns:
(dict): A dictionary of the server-side representation of the ``True`` if successful.
service, including all relevant properties.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -260,7 +235,7 @@ class ServiceApiMixin:
return True return True
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def services(self, filters=None, status=None): def services(self, filters=None):
""" """
List services. List services.
@ -268,8 +243,6 @@ class ServiceApiMixin:
filters (dict): Filters to process on the nodes list. Valid filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``. filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``. Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns: Returns:
A list of dictionaries containing data about each service. A list of dictionaries containing data about each service.
@ -281,12 +254,6 @@ class ServiceApiMixin:
params = { params = {
'filters': utils.convert_filters(filters) if filters else None 'filters': utils.convert_filters(filters) if filters else None
} }
if status is not None:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'status is not supported in API version < 1.41'
)
params['status'] = status
url = self._url('/services') url = self._url('/services')
return self._result(self._get(url, params=params), True) return self._result(self._get(url, params=params), True)
@ -369,8 +336,7 @@ class ServiceApiMixin:
def update_service(self, service, version, task_template=None, name=None, def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None, labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None, networks=None, endpoint_config=None,
endpoint_spec=None, fetch_current_spec=False, endpoint_spec=None, fetch_current_spec=False):
rollback_config=None):
""" """
Update a service. Update a service.
@ -388,18 +354,15 @@ class ServiceApiMixin:
or global). Defaults to replicated. or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``. of the service. Default: ``None``.
rollback_config (RollbackConfig): Specification for the rollback networks (:py:class:`list`): List of network names or IDs to attach
strategy of the service. Default: ``None`` the service to. Default: ``None``.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``. access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False`` current specification of the service. Default: ``False``
Returns: Returns:
A dictionary containing a ``Warnings`` key. ``True`` if successful.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -407,8 +370,7 @@ class ServiceApiMixin:
""" """
_check_api_features( _check_api_features(
self._version, task_template, update_config, endpoint_spec, self._version, task_template, update_config, endpoint_spec
rollback_config
) )
if fetch_current_spec: if fetch_current_spec:
@ -454,11 +416,6 @@ class ServiceApiMixin:
else: else:
data['UpdateConfig'] = current.get('UpdateConfig') data['UpdateConfig'] = current.get('UpdateConfig')
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
else:
data['RollbackConfig'] = current.get('RollbackConfig')
if networks is not None: if networks is not None:
converted_networks = utils.convert_service_networks(networks) converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'): if utils.version_lt(self._version, '1.25'):
@ -483,4 +440,5 @@ class ServiceApiMixin:
resp = self._post_json( resp = self._post_json(
url, data=data, params={'version': version}, headers=headers url, data=data, params={'version': version}, headers=headers
) )
return self._result(resp, json=True) self._raise_for_status(resp)
return True

View File

@ -1,13 +1,13 @@
import http.client as http_client
import logging import logging
from six.moves import http_client
from .. import errors, types, utils from .. import errors
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE from .. import types
from .. import utils
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class SwarmApiMixin: class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs): def create_swarm_spec(self, *args, **kwargs):
""" """
@ -57,10 +57,10 @@ class SwarmApiMixin:
Example: Example:
>>> spec = client.api.create_swarm_spec( >>> spec = client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200 snapshot_interval=5000, log_entries_for_slow_followers=1200
) )
>>> client.api.init_swarm( >>> client.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000', advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec force_new_cluster=False, swarm_spec=spec
) )
@ -82,9 +82,7 @@ class SwarmApiMixin:
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377', def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None, force_new_cluster=False, swarm_spec=None):
default_addr_pool=None, subnet_size=None,
data_path_addr=None, data_path_port=None):
""" """
Initialize a new Swarm using the current connected engine as the first Initialize a new Swarm using the current connected engine as the first
node. node.
@ -109,20 +107,9 @@ class SwarmApiMixin:
swarm_spec (dict): Configuration settings of the new Swarm. Use swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid ``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None configuration. Default: None
default_addr_pool (list of strings): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
Returns: Returns:
(str): The ID of the created node. ``True`` if successful.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -132,52 +119,15 @@ class SwarmApiMixin:
url = self._url('/swarm/init') url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict): if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary') raise TypeError('swarm_spec must be a dictionary')
if default_addr_pool is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Address pool is only available for API version >= 1.39'
)
# subnet_size becomes 0 if not set with default_addr_pool
if subnet_size is None:
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
if subnet_size is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Subnet size is only available for API version >= 1.39'
)
# subnet_size is ignored if set without default_addr_pool
if default_addr_pool is None:
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
data = { data = {
'AdvertiseAddr': advertise_addr, 'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr, 'ListenAddr': listen_addr,
'DefaultAddrPool': default_addr_pool,
'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster, 'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec, 'Spec': swarm_spec,
} }
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
if data_path_port is not None:
if utils.version_lt(self._version, '1.40'):
raise errors.InvalidVersion(
'Data path port is only available for '
'API version >= 1.40'
)
data['DataPathPort'] = data_path_port
response = self._post_json(url, data=data) response = self._post_json(url, data=data)
return self._result(response, json=True) self._raise_for_status(response)
return True
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def inspect_swarm(self): def inspect_swarm(self):
@ -215,7 +165,7 @@ class SwarmApiMixin:
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377', def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None, data_path_addr=None): advertise_addr=None):
""" """
Make this Engine join a swarm that has already been created. Make this Engine join a swarm that has already been created.
@ -226,7 +176,7 @@ class SwarmApiMixin:
listen_addr (string): Listen address used for inter-manager listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``'0.0.0.0:2377`` Endpoint (VTEP). Default: ``None``
advertise_addr (string): Externally reachable address advertised advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a in the form ``192.168.1.1:4567``, or an interface followed by a
@ -234,8 +184,6 @@ class SwarmApiMixin:
the port number from the listen address is used. If the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None`` detected when possible. Default: ``None``
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns: Returns:
``True`` if the request went through. ``True`` if the request went through.
@ -245,20 +193,11 @@ class SwarmApiMixin:
If the server returns an error. If the server returns an error.
""" """
data = { data = {
'RemoteAddrs': remote_addrs, "RemoteAddrs": remote_addrs,
'ListenAddr': listen_addr, "ListenAddr": listen_addr,
'JoinToken': join_token, "JoinToken": join_token,
'AdvertiseAddr': advertise_addr, "AdvertiseAddr": advertise_addr,
} }
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
url = self._url('/swarm/join') url = self._url('/swarm/join')
response = self._post_json(url, data=data) response = self._post_json(url, data=data)
self._raise_for_status(response) self._raise_for_status(response)
@ -364,8 +303,8 @@ class SwarmApiMixin:
Example: Example:
>>> key = client.api.get_unlock_key() >>> key = client.get_unlock_key()
>>> client.unlock_swarm(key) >>> client.unlock_node(key)
""" """
if isinstance(key, dict): if isinstance(key, dict):
@ -406,7 +345,7 @@ class SwarmApiMixin:
'Role': 'manager', 'Role': 'manager',
'Labels': {'foo': 'bar'} 'Labels': {'foo': 'bar'}
} }
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8, >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec) node_spec=node_spec)
""" """
@ -416,10 +355,8 @@ class SwarmApiMixin:
return True return True
@utils.minimum_version('1.24') @utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_worker_token=False, rotate_manager_token=False):
rotate_manager_token=False,
rotate_manager_unlock_key=False):
""" """
Update the Swarm's configuration Update the Swarm's configuration
@ -433,8 +370,6 @@ class SwarmApiMixin:
``False``. ``False``.
rotate_manager_token (bool): Rotate the manager join token. rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``. Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Returns: Returns:
``True`` if the request went through. ``True`` if the request went through.
@ -443,20 +378,12 @@ class SwarmApiMixin:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
url = self._url('/swarm/update') url = self._url('/swarm/update')
params = { response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token, 'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token, 'rotateManagerToken': rotate_manager_token,
'version': version 'version': version
} })
if rotate_manager_unlock_key:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Rotate manager unlock key '
'is only available for API version >= 1.25'
)
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response) self._raise_for_status(response)
return True return True

View File

@ -1,7 +1,8 @@
from .. import errors, utils from .. import errors
from .. import utils
class VolumeApiMixin: class VolumeApiMixin(object):
def volumes(self, filters=None): def volumes(self, filters=None):
""" """
List volumes currently registered by the docker daemon. Similar to the List volumes currently registered by the docker daemon. Similar to the
@ -20,7 +21,7 @@ class VolumeApiMixin:
Example: Example:
>>> client.api.volumes() >>> cli.volumes()
{u'Volumes': [{u'Driver': u'local', {u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}, u'Name': u'foobar'},
@ -55,13 +56,10 @@ class VolumeApiMixin:
Example: Example:
>>> volume = client.api.create_volume( >>> volume = cli.create_volume(name='foobar', driver='local',
... name='foobar', driver_opts={'foo': 'bar', 'baz': 'false'},
... driver='local', labels={"key": "value"})
... driver_opts={'foo': 'bar', 'baz': 'false'}, >>> print(volume)
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local', {u'Driver': u'local',
u'Labels': {u'key': u'value'}, u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
@ -106,7 +104,7 @@ class VolumeApiMixin:
Example: Example:
>>> client.api.inspect_volume('foobar') >>> cli.inspect_volume('foobar')
{u'Driver': u'local', {u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'} u'Name': u'foobar'}

View File

@ -2,11 +2,14 @@ import base64
import json import json
import logging import logging
from . import credentials, errors import dockerpycreds
import six
from . import errors
from .utils import config from .utils import config
INDEX_NAME = 'docker.io' INDEX_NAME = 'docker.io'
INDEX_URL = f'https://index.{INDEX_NAME}/v1/' INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
TOKEN_USERNAME = '<token>' TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -15,32 +18,32 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name): def resolve_repository_name(repo_name):
if '://' in repo_name: if '://' in repo_name:
raise errors.InvalidRepository( raise errors.InvalidRepository(
f'Repository name cannot contain a scheme ({repo_name})' 'Repository name cannot contain a scheme ({0})'.format(repo_name)
) )
index_name, remote_name = split_repo_name(repo_name) index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-': if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository( raise errors.InvalidRepository(
f'Invalid index name ({index_name}). ' 'Invalid index name ({0}). Cannot begin or end with a'
'Cannot begin or end with a hyphen.' ' hyphen.'.format(index_name)
) )
return resolve_index_name(index_name), remote_name return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name): def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name) index_name = convert_to_hostname(index_name)
if index_name == f"index.{INDEX_NAME}": if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME index_name = INDEX_NAME
return index_name return index_name
def get_config_header(client, registry): def get_config_header(client, registry):
log.debug('Looking for auth config') log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty: if not client._auth_configs:
log.debug( log.debug(
"No auth config in memory - loading from filesystem" "No auth config in memory - loading from filesystem"
) )
client._auth_configs = load_config(credstore_env=client.credstore_env) client._auth_configs = load_config()
authcfg = resolve_authconfig( authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env client._auth_configs, registry, credstore_env=client.credstore_env
) )
@ -67,187 +70,61 @@ def split_repo_name(repo_name):
def get_credential_store(authconfig, registry): def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig): if not registry or registry == INDEX_NAME:
authconfig = AuthConfig(authconfig) registry = 'https://index.docker.io/v1/'
return authconfig.get_credential_store(registry)
return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
class AuthConfig(dict): 'credsStore'
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
f'Config entry for key {registry} is not auth config'
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
f'Invalid configuration for registry {registry}'
)
return {}
if 'identitytoken' in entry:
log.debug(f'Found an IdentityToken entry for registry {registry}')
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
f'Auth data for {registry} is absent. '
f'Client might be using a credentials store instead.'
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
f'Found entry (registry={registry!r}, username={username!r})'
) )
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod def resolve_authconfig(authconfig, registry=None, credstore_env=None):
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self):
return self.get('auths', {})
@property
def creds_store(self):
return self.get('credsStore', None)
@property
def cred_helpers(self):
return self.get('credHelpers', {})
@property
def is_empty(self):
return (
not self.auths and not self.creds_store and not self.cred_helpers
)
def resolve_authconfig(self, registry=None):
""" """
Returns the authentication data from the given auth configuration for a Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the specific registry. As with the Docker client, legacy entries in the config
config with full URLs are stripped down to hostnames before checking with full URLs are stripped down to hostnames before checking for a match.
for a match. Returns None if no match was found. Returns None if no match was found.
""" """
if self.creds_store or self.cred_helpers: if 'credHelpers' in authconfig or 'credsStore' in authconfig:
store_name = self.get_credential_store(registry) store_name = get_credential_store(authconfig, registry)
if store_name is not None: if store_name is not None:
log.debug( log.debug(
f'Using credentials store "{store_name}"' 'Using credentials store "{0}"'.format(store_name)
)
cfg = _resolve_authconfig_credstore(
authconfig, registry, store_name, env=credstore_env
) )
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None: if cfg is not None:
return cfg return cfg
log.debug('No entry in credstore - fetching from auth dict') log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server # Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug(f"Looking for auth entry for {repr(registry)}") log.debug("Looking for auth entry for {0}".format(repr(registry)))
if registry in self.auths: authdict = authconfig.get('auths', {})
log.debug(f"Found {repr(registry)}") if registry in authdict:
return self.auths[registry] log.debug("Found {0}".format(repr(registry)))
return authdict[registry]
for key, conf in self.auths.items(): for key, conf in six.iteritems(authdict):
if resolve_index_name(key) == registry: if resolve_index_name(key) == registry:
log.debug(f"Found {repr(key)}") log.debug("Found {0}".format(repr(key)))
return conf return conf
log.debug("No entry found") log.debug("No entry found")
return None return None
def _resolve_authconfig_credstore(self, registry, credstore_name):
def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
env=None):
if not registry or registry == INDEX_NAME: if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS # The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary. # docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL registry = INDEX_URL
log.debug(f"Looking for auth entry for {repr(registry)}") log.debug("Looking for auth entry for {0}".format(repr(registry)))
store = self._get_store_instance(credstore_name) store = dockerpycreds.Store(credstore_name, environment=env)
try: try:
data = store.get(registry) data = store.get(registry)
res = { res = {
@ -261,55 +138,13 @@ class AuthConfig(dict):
'Password': data['Secret'], 'Password': data['Secret'],
}) })
return res return res
except credentials.CredentialsNotFound: except dockerpycreds.CredentialsNotFound as e:
log.debug('No entry found') log.debug('No entry found')
return None return None
except credentials.StoreError as e: except dockerpycreds.StoreError as e:
raise errors.DockerException( raise errors.DockerException(
f'Credentials store error: {repr(e)}' 'Credentials store error: {0}'.format(repr(e))
) from e
def _get_store_instance(self, name):
if name not in self._stores:
self._stores[name] = credentials.Store(
name, environment=self._credstore_env
) )
return self._stores[name]
def get_credential_store(self, registry):
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list().keys():
auth_data[k] = self._resolve_authconfig_credstore(
k, self.creds_store
)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(
reg, store_name
)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg, data):
self['auths'][reg] = data
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url): def convert_to_hostname(url):
@ -317,7 +152,7 @@ def convert_to_hostname(url):
def decode_auth(auth): def decode_auth(auth):
if isinstance(auth, str): if isinstance(auth, six.string_types):
auth = auth.encode('ascii') auth = auth.encode('ascii')
s = base64.b64decode(auth) s = base64.b64decode(auth)
login, pwd = s.split(b':', 1) login, pwd = s.split(b':', 1)
@ -342,11 +177,100 @@ def parse_auth(entries, raise_on_error=False):
Authentication registry. Authentication registry.
""" """
return AuthConfig.parse_auth(entries, raise_on_error) conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(registry)
)
# We sometimes fall back to parsing the whole config as if it was
# the auth config by itself, for legacy purposes. In that case, we
# fail silently and return an empty conf if any of the keys is not
# formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(registry)
)
return {}
if 'identitytoken' in entry:
log.debug('Found an IdentityToken entry for registry {0}'.format(
registry
))
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token, skip.
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
def load_config(config_path=None, config_dict=None, credstore_env=None): def load_config(config_path=None, config_dict=None):
return AuthConfig.load_config(config_path, config_dict, credstore_env) """
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment variable >
~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return _load_legacy_config(config_file)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return res
log.debug(
"Couldn't find auth-related section ; attempting to interpret"
"as auth-only file"
)
return {'auths': parse_auth(config_dict)}
def _load_legacy_config(config_file): def _load_legacy_config(config_file):
@ -373,6 +297,7 @@ def _load_legacy_config(config_file):
}} }}
except Exception as e: except Exception as e:
log.debug(e) log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config") log.debug("All parsing attempts failed - returning empty config")
return {} return {}

View File

@ -1,5 +1,5 @@
from .api.client import APIClient from .api.client import APIClient
from .constants import DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS from .constants import DEFAULT_TIMEOUT_SECONDS
from .models.configs import ConfigCollection from .models.configs import ConfigCollection
from .models.containers import ContainerCollection from .models.containers import ContainerCollection
from .models.images import ImageCollection from .models.images import ImageCollection
@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env from .utils import kwargs_from_env
class DockerClient: class DockerClient(object):
""" """
A client for communicating with a Docker server. A client for communicating with a Docker server.
@ -26,7 +26,7 @@ class DockerClient:
base_url (str): URL to the Docker server. For example, base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35`` automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds. timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a ``True`` to enable it with default options, or pass a
@ -35,11 +35,6 @@ class DockerClient:
user_agent (str): Set a custom user agent for requests to the server. user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the credstore_env (dict): Override environment variables when calling the
credential store process. credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs) self.api = APIClient(*args, **kwargs)
@ -67,17 +62,14 @@ class DockerClient:
Args: Args:
version (str): The version of the API to use. Set to ``auto`` to version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto`` automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds. timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections ssl_version (int): A valid `SSL version`_.
to save in the pool. assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ`` from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling credstore_env (dict): Override environment variables when calling
the credential store process. the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example: Example:
@ -88,15 +80,9 @@ class DockerClient:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1 https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
""" """
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS) timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None) version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls( return cls(
timeout=timeout, timeout=timeout, version=version, **kwargs_from_env(**kwargs)
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
) )
# Resources # Resources
@ -210,7 +196,7 @@ class DockerClient:
close.__doc__ = APIClient.close.__doc__ close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name): def __getattr__(self, name):
s = [f"'DockerClient' object has no attribute '{name}'"] s = ["'DockerClient' object has no attribute '{}'".format(name)]
# If a user calls a method on APIClient, they # If a user calls a method on APIClient, they
if hasattr(APIClient, name): if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the " s.append("In Docker SDK for Python 2.0, this method is now on the "

View File

@ -1,45 +1,20 @@
import sys import sys
from .version import version
from .version import __version__ DEFAULT_DOCKER_API_VERSION = '1.35'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_DOCKER_API_VERSION = '1.45'
MINIMUM_DOCKER_API_VERSION = '1.24'
DEFAULT_TIMEOUT_SECONDS = 60 DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8 STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [ CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus' 'memory', 'memswap', 'cpushares', 'cpusetcpus'
] ]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
INSECURE_REGISTRY_DEPRECATION_WARNING = \ INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \ 'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.' 'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32') IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}" DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25 DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
DEFAULT_SWARM_SUBNET_SIZE = 24

View File

@ -1,2 +0,0 @@
from .api import ContextAPI
from .context import Context

View File

@ -1,206 +0,0 @@
import json
import os
from docker import errors
from .config import (
METAFILE,
get_current_context_name,
get_meta_dir,
write_context_name_to_docker_config,
)
from .context import Context
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = Context("default", "swarm")
@classmethod
def create_context(
cls, name, orchestrator=None, host=None, tls_cfg=None,
default_namespace=None, skip_tls_verify=False):
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'"default" is a reserved context name')
ctx = Context.load_context(name)
if ctx:
raise errors.ContextAlreadyExists(name)
endpoint = "docker"
if orchestrator and orchestrator != "swarm":
endpoint = orchestrator
ctx = Context(name, orchestrator)
ctx.set_endpoint(
endpoint, host, tls_cfg,
skip_tls_verify=skip_tls_verify,
def_namespace=default_namespace)
ctx.save()
return ctx
@classmethod
def get_context(cls, name=None):
"""Retrieves a context object.
Args:
name (str): The name of the context
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
name = get_current_context_name()
if name == "default":
return cls.DEFAULT_CONTEXT
return Context.load_context(name)
@classmethod
def contexts(cls):
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
names = []
for dirname, dirnames, fnames in os.walk(get_meta_dir()):
for filename in fnames + dirnames:
if filename == METAFILE:
try:
data = json.load(
open(os.path.join(dirname, filename)))
names.append(data["Name"])
except Exception as e:
raise errors.ContextException(
f"Failed to load metafile {filename}: {e}",
) from e
contexts = [cls.DEFAULT_CONTEXT]
for name in names:
contexts.append(Context.load_context(name))
return contexts
@classmethod
def get_current_context(cls):
"""Get current context.
Returns:
(Context): current context object.
"""
return cls.get_context()
@classmethod
def set_current_context(cls, name="default"):
ctx = cls.get_context(name)
if not ctx:
raise errors.ContextNotFound(name)
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(
f'Failed to set current context: {err}')
@classmethod
def remove_context(cls, name):
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'context "default" cannot be removed')
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
if name == get_current_context_name():
write_context_name_to_docker_config(None)
ctx.remove()
@classmethod
def inspect_context(cls, name="default"):
"""Remove a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
return cls.DEFAULT_CONTEXT()
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
return ctx()

View File

@ -1,81 +0,0 @@
import hashlib
import json
import os
from docker import utils
from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
from docker.utils.config import find_config_file
METAFILE = "meta.json"
def get_current_context_name():
name = "default"
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
name = json.load(f).get("currentContext", "default")
except Exception:
return "default"
return name
def write_context_name_to_docker_config(name=None):
if name == 'default':
name = None
docker_cfg_path = find_config_file()
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
config = json.load(f)
except Exception as e:
return e
current_context = config.get("currentContext", None)
if current_context and not name:
del config["currentContext"]
elif name:
config["currentContext"] = name
else:
return
try:
with open(docker_cfg_path, "w") as f:
json.dump(config, f, indent=4)
except Exception as e:
return e
def get_context_id(name):
return hashlib.sha256(name.encode('utf-8')).hexdigest()
def get_context_dir():
return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
def get_meta_dir(name=None):
meta_dir = os.path.join(get_context_dir(), "meta")
if name:
return os.path.join(meta_dir, get_context_id(name))
return meta_dir
def get_meta_file(name):
return os.path.join(get_meta_dir(name), METAFILE)
def get_tls_dir(name=None, endpoint=""):
context_dir = get_context_dir()
if name:
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
return os.path.join(context_dir, "tls")
def get_context_host(path=None, tls=False):
host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
if host == DEFAULT_UNIX_SOCKET:
# remove http+ from default docker socket url
if host.startswith("http+"):
host = host[5:]
return host

View File

@ -1,249 +0,0 @@
import json
import os
from shutil import copyfile, rmtree
from docker.errors import ContextException
from docker.tls import TLSConfig
from .config import (
get_context_host,
get_meta_dir,
get_meta_file,
get_tls_dir,
)
class Context:
"""A context."""
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
raise Exception("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = "docker" if (
not orchestrator or orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, tls),
"SkipTLSVerify": not tls
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException(
f"Unknown endpoint format for context {name}: {v}",
)
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
host, tls))
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
"SkipTLSVerify", not tls))
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
skip_tls_verify=False, def_namespace=None):
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify),
"SkipTLSVerify": skip_tls_verify
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self):
return self.__call__()
@classmethod
def load_context(cls, name):
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None))
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name):
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata = {}
try:
with open(meta_file) as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception(
f"Detected corrupted meta file for context {name} : {e}"
) from e
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False))
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True))
return metadata
def _load_certs(self):
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints.keys():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self):
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "w") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(ca_file, os.path.join(
tls_dir, endpoint, os.path.basename(ca_file)))
if tls.cert:
cert_file, key_file = tls.cert
copyfile(cert_file, os.path.join(
tls_dir, endpoint, os.path.basename(cert_file)))
copyfile(key_file, os.path.join(
tls_dir, endpoint, os.path.basename(key_file)))
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self):
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
def __call__(self):
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self):
return self.context_type is None
@property
def Name(self):
return self.name
@property
def Host(self):
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None)
return None
return self.endpoints[self.orchestrator].get("Host", None)
@property
def Orchestrator(self):
return self.orchestrator
@property
def Metadata(self):
meta = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {
"Name": self.name,
"Metadata": meta,
"Endpoints": self.endpoints
}
@property
def TLSConfig(self):
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg.keys():
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self):
certs = {}
for endpoint, tls in self.tls_cfg.items():
cert, key = tls.cert
certs[endpoint] = list(
map(os.path.basename, [tls.ca_cert, cert, key]))
return {
"TLSMaterial": certs
}
@property
def Storage(self):
return {
"Storage": {
"MetadataPath": self.meta_path,
"TLSPath": self.tls_path
}}

View File

@ -1,8 +0,0 @@
from .constants import (
DEFAULT_LINUX_STORE,
DEFAULT_OSX_STORE,
DEFAULT_WIN32_STORE,
PROGRAM_PREFIX,
)
from .errors import CredentialsNotFound, StoreError
from .store import Store

View File

@ -1,4 +0,0 @@
PROGRAM_PREFIX = 'docker-credential-'
DEFAULT_LINUX_STORE = 'secretservice'
DEFAULT_OSX_STORE = 'osxkeychain'
DEFAULT_WIN32_STORE = 'wincred'

View File

@ -1,17 +0,0 @@
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe, program):
message = cpe.output.decode('utf-8')
if 'credentials not found in native keychain' in message:
return CredentialsNotFound(f'No matching credentials in {program}')
return StoreError(f'Credentials store {program} exited with "{message}".')

View File

@ -1,93 +0,0 @@
import errno
import json
import shutil
import subprocess
import warnings
from . import constants, errors
from .utils import create_environment_dict
class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = shutil.which(self.program)
self.environment = environment
if self.exe is None:
warnings.warn(
f'{self.program} not installed or not available in PATH',
stacklevel=1,
)
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
f'No matching credentials in {self.program}'
)
return result
def store(self, server, username, secret):
""" Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
def erase(self, server):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
def list(self):
""" List stored credentials. Requires v0.4.0+ of the helper.
"""
data = self._execute('list', None)
return json.loads(data.decode('utf-8'))
def _execute(self, subcmd, data_input):
if self.exe is None:
raise errors.StoreError(
f'{self.program} not installed or not available in PATH'
)
output = None
env = create_environment_dict(self.environment)
try:
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program) from e
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
f'{self.program} not installed or not available in PATH'
) from e
else:
raise errors.StoreError(
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
) from e
return output

View File

@ -1,10 +0,0 @@
import os
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View File

@ -1,14 +1,5 @@
import requests import requests
_image_not_found_explanation_fragments = frozenset(
fragment.lower() for fragment in [
'no such image',
'not found: does not exist or no pull access',
'repository does not exist',
'was found but does not match the specified platform',
]
)
class DockerException(Exception): class DockerException(Exception):
""" """
@ -27,16 +18,17 @@ def create_api_error_from_http_exception(e):
try: try:
explanation = response.json()['message'] explanation = response.json()['message']
except ValueError: except ValueError:
explanation = (response.text or '').strip() explanation = (response.content or '').strip()
cls = APIError cls = APIError
if response.status_code == 404: if response.status_code == 404:
explanation_msg = (explanation or '').lower() if explanation and ('No such image' in str(explanation) or
if any(fragment in explanation_msg 'not found: does not exist or no pull access'
for fragment in _image_not_found_explanation_fragments): in str(explanation) or
'repository does not exist' in str(explanation)):
cls = ImageNotFound cls = ImageNotFound
else: else:
cls = NotFound cls = NotFound
raise cls(e, response=response, explanation=explanation) from e raise cls(e, response=response, explanation=explanation)
class APIError(requests.exceptions.HTTPError, DockerException): class APIError(requests.exceptions.HTTPError, DockerException):
@ -46,27 +38,23 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None): def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but # requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't # requests 1.1 doesn't
super().__init__(message) super(APIError, self).__init__(message)
self.response = response self.response = response
self.explanation = explanation self.explanation = explanation
def __str__(self): def __str__(self):
message = super().__str__() message = super(APIError, self).__str__()
if self.is_client_error(): if self.is_client_error():
message = ( message = '{0} Client Error: {1}'.format(
f'{self.response.status_code} Client Error for ' self.response.status_code, self.response.reason)
f'{self.response.url}: {self.response.reason}'
)
elif self.is_server_error(): elif self.is_server_error():
message = ( message = '{0} Server Error: {1}'.format(
f'{self.response.status_code} Server Error for ' self.response.status_code, self.response.reason)
f'{self.response.url}: {self.response.reason}'
)
if self.explanation: if self.explanation:
message = f'{message} ("{self.explanation}")' message = '{0} ("{1}")'.format(message, self.explanation)
return message return message
@ -75,9 +63,6 @@ class APIError(requests.exceptions.HTTPError, DockerException):
if self.response is not None: if self.response is not None:
return self.response.status_code return self.response.status_code
def is_error(self):
return self.is_client_error() or self.is_server_error()
def is_client_error(self): def is_client_error(self):
if self.status_code is None: if self.status_code is None:
return False return False
@ -143,11 +128,11 @@ class ContainerError(DockerException):
self.image = image self.image = image
self.stderr = stderr self.stderr = stderr
err = f": {stderr}" if stderr is not None else "" err = ": {}".format(stderr) if stderr is not None else ""
super().__init__( msg = ("Command '{}' in image '{}' returned non-zero exit "
f"Command '{command}' in image '{image}' " "status {}{}").format(command, image, exit_status, err)
f"returned non-zero exit status {exit_status}{err}"
) super(ContainerError, self).__init__(msg)
class StreamParseError(RuntimeError): class StreamParseError(RuntimeError):
@ -157,7 +142,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException): class BuildError(DockerException):
def __init__(self, reason, build_log): def __init__(self, reason, build_log):
super().__init__(reason) super(BuildError, self).__init__(reason)
self.msg = reason self.msg = reason
self.build_log = build_log self.build_log = build_log
@ -167,43 +152,11 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs): def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)] quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
text = [f"{name}() "] text = ["{}() ".format(name)]
if len(quoted_kwargs) == 1: if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ") text.append("got an unexpected keyword argument ")
else: else:
text.append("got unexpected keyword arguments ") text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs)) text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text)) return TypeError(''.join(text))
class MissingContextParameter(DockerException):
def __init__(self, param):
self.param = param
def __str__(self):
return (f"missing parameter: {self.param}")
class ContextAlreadyExists(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context {self.name} already exists")
class ContextException(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return (self.msg)
class ContextNotFound(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context '{self.name}' not found")

View File

@ -1,5 +1,5 @@
from ..api import APIClient from ..api import APIClient
from .resource import Collection, Model from .resource import Model, Collection
class Config(Model): class Config(Model):
@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID' id_attribute = 'ID'
def __repr__(self): def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>" return "<%s: '%s'>" % (self.__class__.__name__, self.name)
@property @property
def name(self): def name(self):
@ -30,7 +30,6 @@ class ConfigCollection(Collection):
def create(self, **kwargs): def create(self, **kwargs):
obj = self.client.api.create_config(**kwargs) obj = self.client.api.create_config(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj) return self.prepare_model(obj)
create.__doc__ = APIClient.create_config.__doc__ create.__doc__ = APIClient.create_config.__doc__

View File

@ -5,25 +5,16 @@ from collections import namedtuple
from ..api import APIClient from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import ( from ..errors import (
ContainerError, ContainerError, DockerException, ImageNotFound,
DockerException, NotFound, create_unexpected_kwargs_error
ImageNotFound,
NotFound,
create_unexpected_kwargs_error,
) )
from ..types import HostConfig, NetworkingConfig from ..types import HostConfig
from ..utils import version_gte from ..utils import version_gte
from .images import Image from .images import Image
from .resource import Collection, Model from .resource import Collection, Model
class Container(Model): class Container(Model):
""" Local representation of a container object. Detailed configuration may
be accessed through the :py:attr:`attrs` attribute. Note that local
attributes are cached; users may call :py:meth:`reload` to
query the Docker daemon for the current properties, causing
:py:attr:`attrs` to be refreshed.
"""
@property @property
def name(self): def name(self):
@ -51,11 +42,11 @@ class Container(Model):
try: try:
result = self.attrs['Config'].get('Labels') result = self.attrs['Config'].get('Labels')
return result or {} return result or {}
except KeyError as ke: except KeyError:
raise DockerException( raise DockerException(
'Label data is not available for sparse objects. Call reload()' 'Label data is not available for sparse objects. Call reload()'
' to retrieve all information' ' to retrieve all information'
) from ke )
@property @property
def status(self): def status(self):
@ -66,22 +57,6 @@ class Container(Model):
return self.attrs['State']['Status'] return self.attrs['State']['Status']
return self.attrs['State'] return self.attrs['State']
@property
def health(self):
"""
The healthcheck status of the container.
For example, ``healthy`, or ``unhealthy`.
"""
return self.attrs.get('State', {}).get('Health', {}).get('Status', 'unknown')
@property
def ports(self):
"""
The ports that the container exposes as a dictionary.
"""
return self.attrs.get('NetworkSettings', {}).get('Ports', {})
def attach(self, **kwargs): def attach(self, **kwargs):
""" """
Attach to this container. Attach to this container.
@ -134,7 +109,6 @@ class Container(Model):
tag (str): The tag to push tag (str): The tag to push
message (str): A commit message message (str): A commit message
author (str): The name of the author author (str): The name of the author
pause (bool): Whether to pause the container before committing
changes (str): Dockerfile instructions to apply while committing changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the conf (dict): The configuration for the container. See the
`Engine API documentation `Engine API documentation
@ -155,8 +129,7 @@ class Container(Model):
Inspect changes on a container's filesystem. Inspect changes on a container's filesystem.
Returns: Returns:
(list) A list of dictionaries containing the attributes `Path` (str)
and `Kind`.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -166,7 +139,7 @@ class Container(Model):
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False, privileged=False, user='', detach=False, stream=False,
socket=False, environment=None, workdir=None, demux=False): socket=False, environment=None, workdir=None):
""" """
Run a command inside this container. Similar to Run a command inside this container. Similar to
``docker exec``. ``docker exec``.
@ -181,26 +154,23 @@ class Container(Model):
user (str): User to execute command as. Default: root user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command. detach (bool): If true, detach from the exec command.
Default: False Default: False
stream (bool): Stream response data. Ignored if ``detach`` is true. stream (bool): Stream response data. Default: False
Default: False
socket (bool): Return the connection socket to allow custom socket (bool): Return the connection socket to allow custom
read/write operations. Default: False read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``. ``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session workdir (str): Path to working directory for this exec session
demux (bool): Return stdout and stderr separately
Returns: Returns:
(ExecResult): A tuple of (exit_code, output) (ExecResult): A tuple of (exit_code, output)
exit_code: (int): exit_code: (int):
Exit code for the executed command or ``None`` if Exit code for the executed command or ``None`` if
either ``stream`` or ``socket`` is ``True``. either ``stream```or ``socket`` is ``True``.
output: (generator, bytes, or tuple): output: (generator or str):
If ``stream=True``, a generator yielding response chunks. If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection. If ``socket=True``, a socket object for the connection.
If ``demux=True``, a tuple of two bytes: stdout and stderr. A string containing response data otherwise.
A bytestring containing response data otherwise.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -209,11 +179,10 @@ class Container(Model):
resp = self.client.api.exec_create( resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty, self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment, privileged=privileged, user=user, environment=environment,
workdir=workdir, workdir=workdir
) )
exec_output = self.client.api.exec_start( exec_output = self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket, resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
demux=demux
) )
if socket or stream: if socket or stream:
return ExecResult(None, exec_output) return ExecResult(None, exec_output)
@ -241,8 +210,7 @@ class Container(Model):
""" """
return self.client.api.export(self.id, chunk_size) return self.client.api.export(self.id, chunk_size)
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
encode_stream=False):
""" """
Retrieve a file or folder from the container in the form of a tar Retrieve a file or folder from the container in the form of a tar
archive. archive.
@ -252,8 +220,6 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns: Returns:
(tuple): First element is a raw tar data stream. Second element is (tuple): First element is a raw tar data stream. Second element is
@ -262,20 +228,8 @@ class Container(Model):
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
Example:
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = container.get_archive('/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
""" """
return self.client.api.get_archive(self.id, path, return self.client.api.get_archive(self.id, path, chunk_size)
chunk_size, encode_stream)
def kill(self, signal=None): def kill(self, signal=None):
""" """
@ -299,22 +253,21 @@ class Container(Model):
generator you can iterate over to retrieve log output as it happens. generator you can iterate over to retrieve log output as it happens.
Args: Args:
stdout (bool): Get ``STDOUT``. Default ``True`` stdout (bool): Get ``STDOUT``
stderr (bool): Get ``STDERR``. Default ``True`` stderr (bool): Get ``STDERR``
stream (bool): Stream the response. Default ``False`` stream (bool): Stream the response
timestamps (bool): Show timestamps. Default ``False`` timestamps (bool): Show timestamps
tail (str or int): Output specified number of lines at the end of tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string logs. Either an integer of number of lines or the string
``all``. Default ``all`` ``all``. Default ``all``
since (datetime, int, or float): Show logs since a given datetime, since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds) or float (in nanoseconds) integer epoch (in seconds)
follow (bool): Follow log output. Default ``False`` follow (bool): Follow log output
until (datetime, int, or float): Show logs that occurred before until (datetime or int): Show logs that occurred before the given
the given datetime, integer epoch (in seconds), or datetime or integer epoch (in seconds)
float (in nanoseconds)
Returns: Returns:
(generator of bytes or bytes): Logs from the container. (generator or str): Logs from the container.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -340,7 +293,7 @@ class Container(Model):
Args: Args:
path (str): Path inside the container where the file(s) will be path (str): Path inside the container where the file(s) will be
extracted. Must exist. extracted. Must exist.
data (bytes or stream): tar data to be extracted data (bytes): tar data to be extracted
Returns: Returns:
(bool): True if the call succeeds. (bool): True if the call succeeds.
@ -427,8 +380,7 @@ class Container(Model):
Args: Args:
decode (bool): If set to true, stream will be decoded into dicts decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True. on the fly. False by default.
False by default.
stream (bool): If set to false, only the current stats will be stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default. returned instead of a stream. True by default.
@ -569,20 +521,12 @@ class ContainerCollection(Collection):
cap_add (list of str): Add kernel capabilities. For example, cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``. ``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities. cap_drop (list of str): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cgroupns (str): Override the default cgroup namespace mode for the
container. One of:
- ``private`` the container runs in its own private cgroup
namespace.
- ``host`` use the host system's cgroup namespace.
cpu_count (int): Number of usable CPUs (Windows only). cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs cpu_percent (int): Usable percentage of the available CPUs
(Windows only). (Windows only).
cpu_period (int): The length of a CPU period in microseconds. cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period. get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight). cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``, cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``). ``0,1``).
@ -605,9 +549,6 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container. node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers. dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file. container's ``resolv.conf`` file.
@ -617,50 +558,31 @@ class ContainerCollection(Collection):
environment (dict or list): Environment variables to set inside environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``. format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Additional hostnames to resolve inside the extra_hosts (dict): Addtional hostnames to resolve inside the
container, as a mapping of hostname to IP address. container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as. IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to check that the healthcheck (dict): Specify a test to perform to check that the
container is healthy. The dict takes the following keys: container is healthy.
- test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
- interval (int): The time to wait between checks in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
- timeout (int): The time to wait before considering the check
to have hung. It should be 0 or at least 1000000 (1 ms).
- retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
hostname (str): Optional hostname for the container. hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards init (bool): Run an init inside the container that forwards
signals and reaps processes signals and reaps processes
init_path (str): Path to the docker-init binary init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container. ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`. isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g. labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of ``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g. names of labels to set with empty values (e.g.
``["label1", "label2"]``) ``["label1", "label2"]``)
links (dict): Mapping of links using the links (dict or list of tuples): Either a dictionary mapping name
``{'container': 'alias'}`` format. The alias is optional. to alias or as a list of ``(name, alias)`` tuples.
Containers declared in this dict will be linked to the new log_config (dict): Logging configuration, as a dictionary with
container using the provided alias. Default: ``None``. keys:
log_config (LogConfig): Logging configuration.
lxc_conf (dict): LXC config. - ``type`` The logging driver name.
- ``config`` A dictionary of configuration for the logging
driver.
mac_address (str): MAC address to assign to the container. mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in (which represent the memory limit of the created container in
@ -668,7 +590,6 @@ class ContainerCollection(Collection):
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an specified without a units character, bytes are assumed as an
intended unit. intended unit.
mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100. behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a memswap_limit (str or int): Maximum amount of memory + swap a
@ -687,22 +608,13 @@ class ContainerCollection(Collection):
network_mode (str): One of: network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on - ``bridge`` Create a new network stack for the container on
the bridge network. on the bridge network.
- ``none`` No networking for this container. - ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network - ``container:<name|id>`` Reuse another container's network
stack. stack.
- ``host`` Use the host network stack. - ``host`` Use the host network stack.
This mode is incompatible with ``ports``.
Incompatible with ``network``. Incompatible with ``network``.
networking_config (Dict[str, EndpointConfig]):
Dictionary of EndpointConfig objects for each container network.
The key is the name of the network.
Defaults to ``None``.
Used in conjuction with ``network``.
Incompatible with ``network_mode``.
oom_kill_disable (bool): Whether to disable OOM killer. oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences. to the container in order to tune OOM killer preferences.
@ -716,8 +628,8 @@ class ContainerCollection(Collection):
The keys of the dictionary are the ports to bind inside the The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp``, ``port/protocol``, where the protocol is either ``tcp`` or
``udp``, or ``sctp``. ``udp``.
The values of the dictionary are the corresponding ports to The values of the dictionary are the corresponding ports to
open on the host, which can be either: open on the host, which can be either:
@ -734,7 +646,6 @@ class ContainerCollection(Collection):
to a single container port. For example, to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``. ``{'1111/tcp': [1234, 4567]}``.
Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container. privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host. publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read read_only (bool): Mount the container's root filesystem as read
@ -751,7 +662,6 @@ class ContainerCollection(Collection):
For example: For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}`` ``{"Name": "on-failure", "MaximumRetryCount": 5}``
runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux. customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``). shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
@ -781,21 +691,13 @@ class ContainerCollection(Collection):
} }
tty (bool): Allocate a pseudo-TTY. tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container, ulimits (:py:class:`list`): Ulimits to set inside the container, as
as a list of :py:class:`docker.types.Ulimit` instances. a list of dicts.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the user (str or int): Username or UID to run commands as inside the
container. container.
userns_mode (str): Sets the user namespace mode for the container userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported when user namespace remapping option is enabled. Supported
values are: ``host`` values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin. volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a inside the container. The key is either the host path or a
@ -812,18 +714,10 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'}, {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}} '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
Or a list of strings which each one of its elements specifies a
mount volume.
For example:
.. code-block:: python
['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
volumes_from (:py:class:`list`): List of container names or IDs to volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from. get volumes from.
working_dir (str): Path to the working directory. working_dir (str): Path to the working directory.
runtime (str): Runtime to use with this container.
Returns: Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both, The container logs, either ``STDOUT``, ``STDERR``, or both,
@ -852,7 +746,7 @@ class ContainerCollection(Collection):
image = image.id image = image.id
stream = kwargs.pop('stream', False) stream = kwargs.pop('stream', False)
detach = kwargs.pop('detach', False) detach = kwargs.pop('detach', False)
platform = kwargs.get('platform', None) platform = kwargs.pop('platform', None)
if detach and remove: if detach and remove:
if version_gte(self.client.api._version, '1.25'): if version_gte(self.client.api._version, '1.25'):
@ -867,12 +761,6 @@ class ContainerCollection(Collection):
'together.' 'together.'
) )
if kwargs.get('networking_config') and not kwargs.get('network'):
raise RuntimeError(
'The option "networking_config" can not be used '
'without "network".'
)
try: try:
container = self.create(image=image, command=command, container = self.create(image=image, command=command,
detach=detach, **kwargs) detach=detach, **kwargs)
@ -907,9 +795,9 @@ class ContainerCollection(Collection):
container, exit_status, command, image, out container, exit_status, command, image, out
) )
if stream or out is None: return out if stream or out is None else b''.join(
return out [line for line in out]
return b''.join(out) )
def create(self, image, command=None, **kwargs): def create(self, image, command=None, **kwargs):
""" """
@ -975,8 +863,7 @@ class ContainerCollection(Collection):
- `exited` (int): Only containers with specified exit code - `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``, - `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited`` ``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"`` - `label` (str): format either ``"key"`` or ``"key=value"``
or a list of such.
- `id` (str): The id of the container. - `id` (str): The id of the container.
- `name` (str): The name of the container. - `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of - `ancestor` (str): Filter by container ancestor. Format of
@ -1025,7 +912,6 @@ class ContainerCollection(Collection):
def prune(self, filters=None): def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters) return self.client.api.prune_containers(filters=filters)
prune.__doc__ = APIClient.prune_containers.__doc__ prune.__doc__ = APIClient.prune_containers.__doc__
@ -1043,12 +929,11 @@ RUN_CREATE_KWARGS = [
'mac_address', 'mac_address',
'name', 'name',
'network_disabled', 'network_disabled',
'platform',
'stdin_open', 'stdin_open',
'stop_signal', 'stop_signal',
'tty', 'tty',
'use_config_proxy',
'user', 'user',
'volume_driver',
'working_dir', 'working_dir',
] ]
@ -1060,7 +945,6 @@ RUN_HOST_CONFIG_KWARGS = [
'cap_add', 'cap_add',
'cap_drop', 'cap_drop',
'cgroup_parent', 'cgroup_parent',
'cgroupns',
'cpu_count', 'cpu_count',
'cpu_percent', 'cpu_percent',
'cpu_period', 'cpu_period',
@ -1076,7 +960,6 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps', 'device_write_bps',
'device_write_iops', 'device_write_iops',
'devices', 'devices',
'device_requests',
'dns_opt', 'dns_opt',
'dns_search', 'dns_search',
'dns', 'dns',
@ -1112,9 +995,7 @@ RUN_HOST_CONFIG_KWARGS = [
'tmpfs', 'tmpfs',
'ulimits', 'ulimits',
'userns_mode', 'userns_mode',
'uts_mode',
'version', 'version',
'volume_driver',
'volumes_from', 'volumes_from',
'runtime' 'runtime'
] ]
@ -1144,17 +1025,8 @@ def _create_container_args(kwargs):
host_config_kwargs['binds'] = volumes host_config_kwargs['binds'] = volumes
network = kwargs.pop('network', None) network = kwargs.pop('network', None)
networking_config = kwargs.pop('networking_config', None)
if network: if network:
if networking_config: create_kwargs['networking_config'] = {network: None}
# Sanity check: check if the network is defined in the
# networking config dict, otherwise switch to None
if network not in networking_config:
networking_config = None
create_kwargs['networking_config'] = NetworkingConfig(
networking_config
) if networking_config else {network: None}
host_config_kwargs['network_mode'] = network host_config_kwargs['network_mode'] = network
# All kwargs should have been consumed by this point, so raise # All kwargs should have been consumed by this point, so raise
@ -1187,10 +1059,8 @@ def _host_volume_from_bind(bind):
bits = rest.split(':', 1) bits = rest.split(':', 1)
if len(bits) == 1 or bits[1] in ('ro', 'rw'): if len(bits) == 1 or bits[1] in ('ro', 'rw'):
return drive + bits[0] return drive + bits[0]
elif bits[1].endswith(':ro') or bits[1].endswith(':rw'):
return bits[1][:-3]
else: else:
return bits[1] return bits[1].rstrip(':ro').rstrip(':rw')
ExecResult = namedtuple('ExecResult', 'exit_code,output') ExecResult = namedtuple('ExecResult', 'exit_code,output')

View File

@ -1,6 +1,7 @@
import itertools import itertools
import re import re
import warnings
import six
from ..api import APIClient from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..constants import DEFAULT_DATA_CHUNK_SIZE
@ -15,8 +16,7 @@ class Image(Model):
An image on the server. An image on the server.
""" """
def __repr__(self): def __repr__(self):
tag_str = "', '".join(self.tags) return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
return f"<{self.__class__.__name__}: '{tag_str}'>"
@property @property
def labels(self): def labels(self):
@ -29,12 +29,12 @@ class Image(Model):
@property @property
def short_id(self): def short_id(self):
""" """
The ID of the image truncated to 12 characters, plus the ``sha256:`` The ID of the image truncated to 10 characters, plus the ``sha256:``
prefix. prefix.
""" """
if self.id.startswith('sha256:'): if self.id.startswith('sha256:'):
return self.id[:19] return self.id[:17]
return self.id[:12] return self.id[:10]
@property @property
def tags(self): def tags(self):
@ -51,7 +51,7 @@ class Image(Model):
Show the history of an image. Show the history of an image.
Returns: Returns:
(list): The history of the image. (str): The history of the image.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -59,38 +59,14 @@ class Image(Model):
""" """
return self.client.api.history(self.id) return self.client.api.history(self.id)
def remove(self, force=False, noprune=False): def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Remove this image.
Args:
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_image(
self.id,
force=force,
noprune=noprune,
)
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
""" """
Get a tarball of an image. Similar to the ``docker save`` command. Get a tarball of an image. Similar to the ``docker save`` command.
Args: Args:
chunk_size (int): The generator will return up to that much data chunk_size (int): The number of bytes returned by each iteration
per iteration, but may return less. If ``None``, data will be of the generator. If ``None``, data will be streamed as it is
streamed as it is received. Default: 2 MB received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns: Returns:
(generator): A stream of raw archive data. (generator): A stream of raw archive data.
@ -101,23 +77,13 @@ class Image(Model):
Example: Example:
>>> image = cli.images.get("busybox:latest") >>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb') >>> f = open('/tmp/busybox-latest.tar', 'w')
>>> for chunk in image.save(): >>> for chunk in image:
>>> f.write(chunk) >>> f.write(chunk)
>>> f.close() >>> f.close()
""" """
img = self.id return self.client.api.get_image(self.id, chunk_size)
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
f"{named} is not a valid tag for this image"
)
img = named
return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs): def tag(self, repository, tag=None, **kwargs):
""" """
@ -144,7 +110,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms. Image metadata stored on the registry, including available platforms.
""" """
def __init__(self, image_name, *args, **kwargs): def __init__(self, image_name, *args, **kwargs):
super().__init__(*args, **kwargs) super(RegistryData, self).__init__(*args, **kwargs)
self.image_name = image_name self.image_name = image_name
@property @property
@ -157,10 +123,10 @@ class RegistryData(Model):
@property @property
def short_id(self): def short_id(self):
""" """
The ID of the image truncated to 12 characters, plus the ``sha256:`` The ID of the image truncated to 10 characters, plus the ``sha256:``
prefix. prefix.
""" """
return self.id[:19] return self.id[:17]
def pull(self, platform=None): def pull(self, platform=None):
""" """
@ -197,7 +163,7 @@ class RegistryData(Model):
parts = platform.split('/') parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1: if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument( raise InvalidArgument(
f'"{platform}" is not a valid platform descriptor' '"{0}" is not a valid platform descriptor'.format(platform)
) )
platform = {'os': parts[0]} platform = {'os': parts[0]}
if len(parts) > 2: if len(parts) > 2:
@ -222,10 +188,10 @@ class ImageCollection(Collection):
Build an image and return it. Similar to the ``docker build`` Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set. command. Either ``path`` or ``fileobj`` must be set.
If you already have a tar file for the Docker build context (including If you have a tar file for the Docker build context (including a
a Dockerfile), pass a readable file-like object to ``fileobj`` Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is also and also pass ``custom_context=True``. If the stream is compressed
compressed, set ``encoding`` to the correct value (e.g ``gzip``). also, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
@ -275,14 +241,10 @@ class ImageCollection(Collection):
platform (str): Platform in the format ``os[/arch[/variant]]``. platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build. isolation (str): Isolation technology used during build.
Default: `None`. Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns: Returns:
(tuple): The first item is the :py:class:`Image` object for the (tuple): The first item is the :py:class:`Image` object for the
image that was built. The second item is a generator of the image that was build. The second item is a generator of the
build logs as JSON-decoded objects. build logs as JSON-decoded objects.
Raises: Raises:
@ -294,7 +256,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified. If neither ``path`` nor ``fileobj`` is specified.
""" """
resp = self.client.api.build(**kwargs) resp = self.client.api.build(**kwargs)
if isinstance(resp, str): if isinstance(resp, six.string_types):
return self.get(resp) return self.get(resp)
last_event = None last_event = None
image_id = None image_id = None
@ -332,26 +294,22 @@ class ImageCollection(Collection):
""" """
return self.prepare_model(self.client.api.inspect_image(name)) return self.prepare_model(self.client.api.inspect_image(name))
def get_registry_data(self, name, auth_config=None): def get_registry_data(self, name):
""" """
Gets the registry data for an image. Gets the registry data for an image.
Args: Args:
name (str): The name of the image. name (str): The name of the image.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns: Returns:
(:py:class:`RegistryData`): The data object. (:py:class:`RegistryData`): The data object.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
return RegistryData( return RegistryData(
image_name=name, image_name=name,
attrs=self.client.api.inspect_distribution(name, auth_config), attrs=self.client.api.inspect_distribution(name),
client=self.client, client=self.client,
collection=self, collection=self,
) )
@ -367,8 +325,7 @@ class ImageCollection(Collection):
filters (dict): Filters to be processed on the image list. filters (dict): Filters to be processed on the image list.
Available filters: Available filters:
- ``dangling`` (bool) - ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"`` - ``label`` (str): format either ``key`` or ``key=value``
or a list of such.
Returns: Returns:
(list of :py:class:`Image`): The images. (list of :py:class:`Image`): The images.
@ -407,18 +364,17 @@ class ImageCollection(Collection):
if match: if match:
image_id = match.group(2) image_id = match.group(2)
images.append(image_id) images.append(image_id)
if 'errorDetail' in chunk: if 'error' in chunk:
raise ImageLoadError(chunk['errorDetail']['message']) raise ImageLoadError(chunk['error'])
return [self.get(i) for i in images] return [self.get(i) for i in images]
def pull(self, repository, tag=None, all_tags=False, **kwargs): def pull(self, repository, tag=None, **kwargs):
""" """
Pull an image of the given name and return it. Similar to the Pull an image of the given name and return it. Similar to the
``docker pull`` command. ``docker pull`` command.
If ``tag`` is ``None`` or empty, it is set to ``latest``. If no tag is specified, all tags from that repository will be
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image pulled.
tags will be pulled.
If you want to get the raw pull output, use the If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@ -427,15 +383,15 @@ class ImageCollection(Collection):
Args: Args:
repository (str): The repository to pull repository (str): The repository to pull
tag (str): The tag to pull tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the auth_config (dict): Override the credentials that
config for this request. ``auth_config`` should contain the :py:meth:`~docker.client.DockerClient.login` has set for
``username`` and ``password`` keys to be valid. this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]`` platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns: Returns:
(:py:class:`Image` or list): The image that has been pulled. (:py:class:`Image` or list): The image that has been pulled.
If ``all_tags`` is True, the method will return a list If no ``tag`` was specified, the method will return a list
of :py:class:`Image` objects belonging to this repository. of :py:class:`Image` objects belonging to this repository.
Raises: Raises:
@ -445,33 +401,19 @@ class ImageCollection(Collection):
Example: Example:
>>> # Pull the image tagged `latest` in the busybox repo >>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox') >>> image = client.images.pull('busybox:latest')
>>> # Pull all tags in the busybox repo >>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox', all_tags=True) >>> images = client.images.pull('busybox')
""" """
repository, image_tag = parse_repository_tag(repository) if not tag:
tag = tag or image_tag or 'latest' repository, tag = parse_repository_tag(repository)
if 'stream' in kwargs: self.client.api.pull(repository, tag=tag, **kwargs)
warnings.warn( if tag:
'`stream` is not a valid parameter for this method' return self.get('{0}{2}{1}'.format(
' and will be overridden', repository, tag, '@' if tag.startswith('sha256:') else ':'
stacklevel=1, ))
)
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if not all_tags:
sep = '@' if tag.startswith('sha256:') else ':'
return self.get(f'{repository}{sep}{tag}')
return self.list(repository) return self.list(repository)
def push(self, repository, tag=None, **kwargs): def push(self, repository, tag=None, **kwargs):

View File

@ -1,7 +1,7 @@
from ..api import APIClient from ..api import APIClient
from ..utils import version_gte from ..utils import version_gte
from .containers import Container from .containers import Container
from .resource import Collection, Model from .resource import Model, Collection
class Network(Model): class Network(Model):
@ -46,8 +46,6 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``. network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses. addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -184,7 +182,7 @@ class NetworkCollection(Collection):
def list(self, *args, **kwargs): def list(self, *args, **kwargs):
""" """
List networks. Similar to the ``docker network ls`` command. List networks. Similar to the ``docker networks ls`` command.
Args: Args:
names (:py:class:`list`): List of names to filter by. names (:py:class:`list`): List of names to filter by.
@ -192,8 +190,7 @@ class NetworkCollection(Collection):
filters (dict): Filters to be processed on the network list. filters (dict): Filters to be processed on the network list.
Available filters: Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver. - ``driver=[<driver-name>]`` Matches a network's driver.
- `label` (str|list): format either ``"key"``, ``"key=value"`` - ``label=[<key>]`` or ``label=[<key>=<value>]``.
or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type. - ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually. greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them. You might want this to get the containers attached to them.

View File

@ -1,4 +1,4 @@
from .resource import Collection, Model from .resource import Model, Collection
class Node(Model): class Node(Model):

View File

@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server. A plugin on the server.
""" """
def __repr__(self): def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>" return "<%s: '%s'>" % (self.__class__.__name__, self.name)
@property @property
def name(self): def name(self):
@ -44,19 +44,16 @@ class Plugin(Model):
self.client.api.configure_plugin(self.name, options) self.client.api.configure_plugin(self.name, options)
self.reload() self.reload()
def disable(self, force=False): def disable(self):
""" """
Disable the plugin. Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
""" """
self.client.api.disable_plugin(self.name, force) self.client.api.disable_plugin(self.name)
self.reload() self.reload()
def enable(self, timeout=0): def enable(self, timeout=0):
@ -120,12 +117,9 @@ class Plugin(Model):
if remote is None: if remote is None:
remote = self.name remote = self.name
privileges = self.client.api.plugin_privileges(remote) privileges = self.client.api.plugin_privileges(remote)
yield from self.client.api.upgrade_plugin( for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
self.name, yield d
remote, self._reload()
privileges,
)
self.reload()
class PluginCollection(Collection): class PluginCollection(Collection):
@ -187,7 +181,7 @@ class PluginCollection(Collection):
""" """
privileges = self.client.api.plugin_privileges(remote_name) privileges = self.client.api.plugin_privileges(remote_name)
it = self.client.api.pull_plugin(remote_name, privileges, local_name) it = self.client.api.pull_plugin(remote_name, privileges, local_name)
for _data in it: for data in it:
pass pass
return self.get(local_name or remote_name) return self.get(local_name or remote_name)

View File

@ -1,4 +1,5 @@
class Model:
class Model(object):
""" """
A base class for representing a single object on the server. A base class for representing a single object on the server.
""" """
@ -17,13 +18,13 @@ class Model:
self.attrs = {} self.attrs = {}
def __repr__(self): def __repr__(self):
return f"<{self.__class__.__name__}: {self.short_id}>" return "<%s: %s>" % (self.__class__.__name__, self.short_id)
def __eq__(self, other): def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self): def __hash__(self):
return hash(f"{self.__class__.__name__}:{self.id}") return hash("%s:%s" % (self.__class__.__name__, self.id))
@property @property
def id(self): def id(self):
@ -35,9 +36,9 @@ class Model:
@property @property
def short_id(self): def short_id(self):
""" """
The ID of the object, truncated to 12 characters. The ID of the object, truncated to 10 characters.
""" """
return self.id[:12] return self.id[:10]
def reload(self): def reload(self):
""" """
@ -48,7 +49,7 @@ class Model:
self.attrs = new_model.attrs self.attrs = new_model.attrs
class Collection: class Collection(object):
""" """
A base class for representing all objects of a particular type on the A base class for representing all objects of a particular type on the
server. server.
@ -64,10 +65,9 @@ class Collection:
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
raise TypeError( raise TypeError(
f"'{self.__class__.__name__}' object is not callable. " "'{}' object is not callable. You might be trying to use the old "
"You might be trying to use the old (pre-2.0) API - " "(pre-2.0) API - use docker.APIClient if so."
"use docker.APIClient if so." .format(self.__class__.__name__))
)
def list(self): def list(self):
raise NotImplementedError raise NotImplementedError
@ -89,4 +89,5 @@ class Collection:
elif isinstance(attrs, dict): elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self) return self.model(attrs=attrs, client=self.client, collection=self)
else: else:
raise Exception(f"Can't create {self.model.__name__} from {attrs}") raise Exception("Can't create %s from %s" %
(self.model.__name__, attrs))

View File

@ -1,5 +1,5 @@
from ..api import APIClient from ..api import APIClient
from .resource import Collection, Model from .resource import Model, Collection
class Secret(Model): class Secret(Model):
@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID' id_attribute = 'ID'
def __repr__(self): def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>" return "<%s: '%s'>" % (self.__class__.__name__, self.name)
@property @property
def name(self): def name(self):
@ -30,7 +30,6 @@ class SecretCollection(Collection):
def create(self, **kwargs): def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs) obj = self.client.api.create_secret(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj) return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__ create.__doc__ = APIClient.create_secret.__doc__

View File

@ -1,9 +1,7 @@
import copy import copy
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
from docker.errors import InvalidArgument, create_unexpected_kwargs_error from docker.types import TaskTemplate, ContainerSpec, ServiceMode
from docker.types import ContainerSpec, Placement, ServiceMode, TaskTemplate from .resource import Model, Collection
from .resource import Collection, Model
class Service(Model): class Service(Model):
@ -44,7 +42,7 @@ class Service(Model):
``label``, and ``desired-state``. ``label``, and ``desired-state``.
Returns: Returns:
:py:class:`list`: List of task dictionaries. (:py:class:`list`): List of task dictionaries.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -105,8 +103,7 @@ class Service(Model):
integer or ``'all'`` to output all log lines. integer or ``'all'`` to output all log lines.
Default: ``all`` Default: ``all``
Returns: Returns (generator): Logs for the service.
generator: Logs for the service.
""" """
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get( is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False 'TTY', False
@ -121,7 +118,7 @@ class Service(Model):
replicas (int): The number of containers that should be running. replicas (int): The number of containers that should be running.
Returns: Returns:
bool: ``True`` if successful. ``True``if successful.
""" """
if 'Global' in self.attrs['Spec']['Mode'].keys(): if 'Global' in self.attrs['Spec']['Mode'].keys():
@ -137,7 +134,7 @@ class Service(Model):
Force update the service even if no changes require it. Force update the service even if no changes require it.
Returns: Returns:
bool: ``True`` if successful. ``True``if successful.
""" """
return self.update(force_update=True, fetch_current_spec=True) return self.update(force_update=True, fetch_current_spec=True)
@ -155,22 +152,13 @@ class ServiceCollection(Collection):
image (str): The image name to use for the containers. image (str): The image name to use for the containers.
command (list of str or str): Command to run. command (list of str or str): Command to run.
args (list of str): Arguments to the command. args (list of str): Arguments to the command.
constraints (list of str): :py:class:`~docker.types.Placement` constraints (list of str): Placement constraints.
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container. container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``. access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form env (list of str): Environment variables, in the form
``KEY=val``. ``KEY=val``.
hostname (string): Hostname to set on the container. hostname (string): Hostname to set on the container.
init (boolean): Run an init inside the container that forwards
signals and reaps processes
isolation (string): Isolation technology used by the service's isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers. containers. Only used for Windows containers.
labels (dict): Labels to apply to the service. labels (dict): Labels to apply to the service.
@ -182,19 +170,16 @@ class ServiceCollection(Collection):
``source:target:options``, where options is either ``source:target:options``, where options is either
``ro`` or ``rw``. ``ro`` or ``rw``.
name (str): Name to give to the service. name (str): Name to give to the service.
networks (:py:class:`list`): List of network names or IDs or networks (list of str): List of network names or IDs to attach
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the the service to. Default: ``None``.
service to. Default: ``None``.
resources (Resources): Resource limits and reservations. resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers. restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`~docker.types.SecretReference`): List secrets (list of :py:class:`docker.types.SecretReference`): List
of secrets accessible to containers for this service. of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them. containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None`` of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
user (str): User to run commands as. user (str): User to run commands as.
workdir (str): Working directory for commands to run. workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated. tty (boolean): Whether a pseudo-TTY should be allocated.
@ -210,20 +195,13 @@ class ServiceCollection(Collection):
the container's `hosts` file. the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file. related configurations in resolver configuration file.
configs (:py:class:`list`): List of configs (:py:class:`list`): List of :py:class:`ConfigReference`
:py:class:`~docker.types.ConfigReference` that will be exposed that will be exposed to the service.
to the service.
privileges (Privileges): Security options for the service's privileges (Privileges): Security options for the service's
containers. containers.
cap_add (:py:class:`list`): A list of kernel capabilities to add to
the default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
from the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to the
container
Returns: Returns:
:py:class:`Service`: The created service. (:py:class:`Service`) The created service.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -245,7 +223,7 @@ class ServiceCollection(Collection):
into the output. into the output.
Returns: Returns:
:py:class:`Service`: The service. (:py:class:`Service`): The service.
Raises: Raises:
:py:class:`docker.errors.NotFound` :py:class:`docker.errors.NotFound`
@ -268,11 +246,9 @@ class ServiceCollection(Collection):
filters (dict): Filters to process on the nodes list. Valid filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``. filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``. Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns: Returns:
list of :py:class:`Service`: The services. (list of :py:class:`Service`): The services.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -287,8 +263,6 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec # kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [ CONTAINER_SPEC_KWARGS = [
'args', 'args',
'cap_add',
'cap_drop',
'command', 'command',
'configs', 'configs',
'dns_config', 'dns_config',
@ -298,7 +272,6 @@ CONTAINER_SPEC_KWARGS = [
'hostname', 'hostname',
'hosts', 'hosts',
'image', 'image',
'init',
'isolation', 'isolation',
'labels', 'labels',
'mounts', 'mounts',
@ -311,7 +284,6 @@ CONTAINER_SPEC_KWARGS = [
'tty', 'tty',
'user', 'user',
'workdir', 'workdir',
'sysctls',
] ]
# kwargs to copy straight over to TaskTemplate # kwargs to copy straight over to TaskTemplate
@ -327,17 +299,9 @@ CREATE_SERVICE_KWARGS = [
'labels', 'labels',
'mode', 'mode',
'update_config', 'update_config',
'rollback_config',
'endpoint_spec', 'endpoint_spec',
] ]
PLACEMENT_KWARGS = [
'constraints',
'preferences',
'platforms',
'maxreplicas',
]
def _get_create_service_kwargs(func_name, kwargs): def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly # Copy over things which can be copied directly
@ -357,12 +321,10 @@ def _get_create_service_kwargs(func_name, kwargs):
if 'container_labels' in kwargs: if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels') container_spec_kwargs['labels'] = kwargs.pop('container_labels')
placement = {} if 'constraints' in kwargs:
for key in copy.copy(kwargs): task_template_kwargs['placement'] = {
if key in PLACEMENT_KWARGS: 'Constraints': kwargs.pop('constraints')
placement[key] = kwargs.pop(key) }
placement = Placement(**placement)
task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs: if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = { task_template_kwargs['log_driver'] = {

View File

@ -1,6 +1,5 @@
from docker.api import APIClient from docker.api import APIClient
from docker.errors import APIError from docker.errors import APIError
from .resource import Model from .resource import Model
@ -12,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID' id_attribute = 'ID'
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super(Swarm, self).__init__(*args, **kwargs)
if self.client: if self.client:
try: try:
self.reload() self.reload()
@ -35,9 +34,7 @@ class Swarm(Model):
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__ get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, default_addr_pool=None, force_new_cluster=False, **kwargs):
subnet_size=None, data_path_addr=None, data_path_port=None,
**kwargs):
""" """
Initialize a new swarm on this Engine. Initialize a new swarm on this Engine.
@ -59,17 +56,6 @@ class Swarm(Model):
is used. Default: ``0.0.0.0:2377`` is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False already part of one. Default: False
default_addr_pool (list of str): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
task_history_retention_limit (int): Maximum number of tasks task_history_retention_limit (int): Maximum number of tasks
history stored. history stored.
snapshot_interval (int): Number of logs entries between snapshot. snapshot_interval (int): Number of logs entries between snapshot.
@ -103,7 +89,7 @@ class Swarm(Model):
created in the orchestrator. created in the orchestrator.
Returns: Returns:
(str): The ID of the created node. ``True`` if the request went through.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
@ -113,8 +99,7 @@ class Swarm(Model):
>>> client.swarm.init( >>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000', advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, default_addr_pool=['10.20.0.0/16], force_new_cluster=False, snapshot_interval=5000,
subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200 log_entries_for_slow_followers=1200
) )
@ -122,16 +107,11 @@ class Swarm(Model):
init_kwargs = { init_kwargs = {
'advertise_addr': advertise_addr, 'advertise_addr': advertise_addr,
'listen_addr': listen_addr, 'listen_addr': listen_addr,
'force_new_cluster': force_new_cluster, 'force_new_cluster': force_new_cluster
'default_addr_pool': default_addr_pool,
'subnet_size': subnet_size,
'data_path_addr': data_path_addr,
'data_path_port': data_path_port,
} }
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs) init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
node_id = self.client.api.init_swarm(**init_kwargs) self.client.api.init_swarm(**init_kwargs)
self.reload() self.reload()
return node_id
def join(self, *args, **kwargs): def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs) return self.client.api.join_swarm(*args, **kwargs)
@ -157,7 +137,7 @@ class Swarm(Model):
unlock.__doc__ = APIClient.unlock_swarm.__doc__ unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False, def update(self, rotate_worker_token=False, rotate_manager_token=False,
rotate_manager_unlock_key=False, **kwargs): **kwargs):
""" """
Update the swarm's configuration. Update the swarm's configuration.
@ -170,8 +150,7 @@ class Swarm(Model):
``False``. ``False``.
rotate_manager_token (bool): Rotate the manager join token. rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``. Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Raises: Raises:
:py:class:`docker.errors.APIError` :py:class:`docker.errors.APIError`
If the server returns an error. If the server returns an error.
@ -185,6 +164,5 @@ class Swarm(Model):
version=self.version, version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs), swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token, rotate_worker_token=rotate_worker_token,
rotate_manager_token=rotate_manager_token, rotate_manager_token=rotate_manager_token
rotate_manager_unlock_key=rotate_manager_unlock_key
) )

View File

@ -1,5 +1,5 @@
from ..api import APIClient from ..api import APIClient
from .resource import Collection, Model from .resource import Model, Collection
class Volume(Model): class Volume(Model):

View File

@ -1,31 +1,68 @@
import os import os
import ssl
from . import errors from . import errors
from .transport import SSLAdapter
class TLSConfig: class TLSConfig(object):
""" """
TLS configuration. TLS configuration.
Args: Args:
client_cert (tuple of str): Path to client cert, path to client key. client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file. ca_cert (str): Path to CA cert file.
verify (bool or str): This can be a bool or a path to a CA cert verify (bool or str): This can be ``False`` or a path to a CA cert
file to verify against. If ``True``, verify using ca_cert; file.
if ``False`` or not specified, do not verify. ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
""" """
cert = None cert = None
ca_cert = None ca_cert = None
verify = None verify = None
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None): def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with # Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/ # https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls' # This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by # here, but also disable any public/default CA pool verification by
# leaving verify=False # leaving tls_verify=False
# "client_cert" must have both or neither cert/key files. In self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
# depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
# to exclude versions. But I think that might require a bigger
# architectural change, so I've opted not to pursue it at this time
# If the user provides an SSL version, we should use their preference
if ssl_version:
self.ssl_version = ssl_version
else:
# If the user provides no ssl version, we should default to
# TLSv1_2. This option is the most secure, and will work for the
# majority of users with reasonably up-to-date software. However,
# before doing so, detect openssl version to ensure we can support
# it.
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
ssl, 'PROTOCOL_TLSv1_2'):
# If the OpenSSL version is high enough to support TLSv1_2,
# then we should use it.
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
else:
# Otherwise, TLS v1.0 seems to be the safest default;
# SSLv23 fails in mysterious ways:
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
# "tls" and "tls_verify" must have both or neither cert/key files In
# either case, Alert the user when both are expected, but any are # either case, Alert the user when both are expected, but any are
# missing. # missing.
@ -34,15 +71,15 @@ class TLSConfig:
tls_cert, tls_key = client_cert tls_cert, tls_key = client_cert
except ValueError: except ValueError:
raise errors.TLSParameterError( raise errors.TLSParameterError(
'client_cert must be a tuple of' 'client_config must be a tuple of'
' (client certificate, key file)' ' (client certificate, key file)'
) from None )
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)): not os.path.isfile(tls_key)):
raise errors.TLSParameterError( raise errors.TLSParameterError(
'Path to a certificate and key files must be provided' 'Path to a certificate and key files must be provided'
' through the client_cert param' ' through the client_config param'
) )
self.cert = (tls_cert, tls_key) self.cert = (tls_cert, tls_key)
@ -51,13 +88,15 @@ class TLSConfig:
self.ca_cert = ca_cert self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError( raise errors.TLSParameterError(
'Invalid CA certificate provided for `ca_cert`.' 'Invalid CA certificate provided for `tls_ca_cert`.'
) )
def configure_client(self, client): def configure_client(self, client):
""" """
Configure a client with these TLS options. Configure a client with these TLS options.
""" """
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert: if self.verify and self.ca_cert:
client.verify = self.ca_cert client.verify = self.ca_cert
else: else:
@ -65,3 +104,9 @@ class TLSConfig:
if self.cert: if self.cert:
client.cert = self.cert client.cert = self.cert
client.mount('https://', SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
))

View File

@ -1,12 +1,8 @@
from .unixconn import UnixHTTPAdapter # flake8: noqa
from .unixconn import UnixAdapter
from .ssladapter import SSLAdapter
try: try:
from .npipeconn import NpipeHTTPAdapter from .npipeconn import NpipeAdapter
from .npipesocket import NpipeSocket from .npipesocket import NpipeSocket
except ImportError: except ImportError:
pass pass
try:
from .sshconn import SSHHTTPAdapter
except ImportError:
pass

View File

@ -1,13 +0,0 @@
import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
super().close()
if hasattr(self, 'pools'):
self.pools.clear()
# Fix for requests 2.32.2+:
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
return self.get_connection(request.url, proxies)

View File

@ -1,19 +1,25 @@
import queue import six
import requests.adapters import requests.adapters
import urllib3
import urllib3.connection
from .. import constants from .. import constants
from .basehttpadapter import BaseHTTPAdapter
from .npipesocket import NpipeSocket from .npipesocket import NpipeSocket
if six.PY3:
import http.client as httplib
else:
import httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(urllib3.connection.HTTPConnection): class NpipeHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, npipe_path, timeout=60): def __init__(self, npipe_path, timeout=60):
super().__init__( super(NpipeHTTPConnection, self).__init__(
'localhost', timeout=timeout 'localhost', timeout=timeout
) )
self.npipe_path = npipe_path self.npipe_path = npipe_path
@ -28,7 +34,7 @@ class NpipeHTTPConnection(urllib3.connection.HTTPConnection):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10): def __init__(self, npipe_path, timeout=60, maxsize=10):
super().__init__( super(NpipeHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize 'localhost', timeout=timeout, maxsize=maxsize
) )
self.npipe_path = npipe_path self.npipe_path = npipe_path
@ -46,38 +52,36 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
conn = None conn = None
try: try:
conn = self.pool.get(block=self.block, timeout=timeout) conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty: except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except six.moves.queue.Empty:
if self.block: if self.block:
raise urllib3.exceptions.EmptyPoolError( raise urllib3.exceptions.EmptyPoolError(
self, self,
"Pool reached maximum size and no more " "Pool reached maximum size and no more "
"connections are allowed." "connections are allowed."
) from None )
# Oh well, we'll create a new connection then pass # Oh well, we'll create a new connection then
return conn or self._new_conn() return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter): class NpipeAdapter(requests.adapters.HTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools', 'pools',
'timeout', 'timeout']
'max_pool_size']
def __init__(self, base_url, timeout=60, def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS, pool_connections=constants.DEFAULT_NUM_POOLS):
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '') self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer( self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close() pool_connections, dispose_func=lambda p: p.close()
) )
super().__init__() super(NpipeAdapter, self).__init__()
def get_connection(self, url, proxies=None): def get_connection(self, url, proxies=None):
with self.pools.lock: with self.pools.lock:
@ -86,8 +90,7 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
return pool return pool
pool = NpipeHTTPConnectionPool( pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout, self.npipe_path, self.timeout
maxsize=self.max_pool_size
) )
self.pools[url] = pool self.pools[url] = pool
@ -100,3 +103,6 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
# anyway, we simply return the path URL directly. # anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811 # See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url return request.path_url
def close(self):
self.pools.clear()

View File

@ -1,10 +1,7 @@
import functools import functools
import io import io
import time
import pywintypes import six
import win32api
import win32event
import win32file import win32file
import win32pipe import win32pipe
@ -12,7 +9,7 @@ cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000 cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0 cSECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10 RETRY_WAIT_TIMEOUT = 10000
def check_closed(f): def check_closed(f):
@ -26,7 +23,7 @@ def check_closed(f):
return wrapped return wrapped
class NpipeSocket: class NpipeSocket(object):
""" Partial implementation of the socket API over windows named pipes. """ Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket, This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not and server-specific methods (bind, listen, accept...) are not
@ -49,7 +46,8 @@ class NpipeSocket:
self._closed = True self._closed = True
@check_closed @check_closed
def connect(self, address, retry_count=0): def connect(self, address):
win32pipe.WaitNamedPipe(address, self._timeout)
try: try:
handle = win32file.CreateFile( handle = win32file.CreateFile(
address, address,
@ -57,9 +55,7 @@ class NpipeSocket:
0, 0,
None, None,
win32file.OPEN_EXISTING, win32file.OPEN_EXISTING,
(cSECURITY_ANONYMOUS cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
| cSECURITY_SQOS_PRESENT
| win32file.FILE_FLAG_OVERLAPPED),
0 0
) )
except win32pipe.error as e: except win32pipe.error as e:
@ -69,10 +65,8 @@ class NpipeSocket:
# Another program or thread has grabbed our pipe instance # Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to # before we got to it. Wait for availability and attempt to
# connect again. # connect again.
retry_count = retry_count + 1 win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
if (retry_count < MAXIMUM_RETRY_COUNT): return self.connect(address)
time.sleep(1)
return self.connect(address, retry_count)
raise e raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
@ -93,6 +87,10 @@ class NpipeSocket:
def dup(self): def dup(self):
return NpipeSocket(self._handle) return NpipeSocket(self._handle)
@check_closed
def fileno(self):
return int(self._handle)
def getpeername(self): def getpeername(self):
return self._address return self._address
@ -132,41 +130,29 @@ class NpipeSocket:
@check_closed @check_closed
def recv_into(self, buf, nbytes=0): def recv_into(self, buf, nbytes=0):
if six.PY2:
return self._recv_into_py2(buf, nbytes)
readbuf = buf readbuf = buf
if not isinstance(buf, memoryview): if not isinstance(buf, memoryview):
readbuf = memoryview(buf) readbuf = memoryview(buf)
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
err, data = win32file.ReadFile( err, data = win32file.ReadFile(
self._handle, self._handle,
readbuf[:nbytes] if nbytes else readbuf, readbuf[:nbytes] if nbytes else readbuf
overlapped
) )
wait_result = win32event.WaitForSingleObject(event, self._timeout) return len(data)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle) def _recv_into_py2(self, buf, nbytes):
raise TimeoutError err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
return win32file.GetOverlappedResult(self._handle, overlapped, 0) n = len(data)
finally: buf[:n] = data
win32api.CloseHandle(event) return n
@check_closed @check_closed
def send(self, string, flags=0): def send(self, string, flags=0):
event = win32event.CreateEvent(None, True, True, None) err, nbytes = win32file.WriteFile(self._handle, string)
try: return nbytes
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
win32file.WriteFile(self._handle, string, overlapped)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed @check_closed
def sendall(self, string, flags=0): def sendall(self, string, flags=0):
@ -185,12 +171,15 @@ class NpipeSocket:
def settimeout(self, value): def settimeout(self, value):
if value is None: if value is None:
# Blocking mode # Blocking mode
self._timeout = win32event.INFINITE self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0: elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range') raise ValueError('Timeout value out of range')
elif value == 0:
# Non-blocking mode
self._timeout = win32pipe.NMPWAIT_NO_WAIT
else: else:
# Timeout mode - Value converted to milliseconds # Timeout mode - Value converted to milliseconds
self._timeout = int(value * 1000) self._timeout = value * 1000
def gettimeout(self): def gettimeout(self):
return self._timeout return self._timeout
@ -208,7 +197,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket self.sock = npipe_socket
def close(self): def close(self):
super().close() super(NpipeFileIOBase, self).close()
self.sock = None self.sock = None
def fileno(self): def fileno(self):

View File

@ -1,250 +0,0 @@
import logging
import os
import queue
import signal
import socket
import subprocess
import urllib.parse
import paramiko
import requests.adapters
import urllib3
import urllib3.connection
from .. import constants
from .basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(urllib3.connection.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
) from None
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
"username": base_url.username
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super().close()
if self.ssh_client:
self.ssh_client.close()

View File

@ -0,0 +1,71 @@
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
import sys
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
# Monkey-patching match_hostname with a version that supports
# IP-address checking. Not necessary for Python 3.5 and above
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
from backports.ssl_match_hostname import match_hostname
urllib3.connection.match_hostname = match_hostname
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
'assert_hostname',
'ssl_version']
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return StrictVersion(urllib_ver) > StrictVersion('1.5')

View File

@ -1,24 +1,40 @@
import socket import six
import requests.adapters import requests.adapters
import urllib3 import socket
import urllib3.connection from six.moves import http_client as httplib
from .. import constants from .. import constants
from .basehttpadapter import BaseHTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(urllib3.connection.HTTPConnection): class UnixHTTPResponse(httplib.HTTPResponse, object):
def __init__(self, sock, *args, **kwargs):
disable_buffering = kwargs.pop('disable_buffering', False)
if six.PY2:
# FIXME: We may need to disable buffering on Py3 as well,
# but there's no clear way to do it at the moment. See:
# https://github.com/docker/docker-py/issues/1799
kwargs['buffering'] = not disable_buffering
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
class UnixHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, base_url, unix_socket, timeout=60): def __init__(self, base_url, unix_socket, timeout=60):
super().__init__( super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout 'localhost', timeout=timeout
) )
self.base_url = base_url self.base_url = base_url
self.unix_socket = unix_socket self.unix_socket = unix_socket
self.timeout = timeout self.timeout = timeout
self.disable_buffering = False
def connect(self): def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@ -26,10 +42,21 @@ class UnixHTTPConnection(urllib3.connection.HTTPConnection):
sock.connect(self.unix_socket) sock.connect(self.unix_socket)
self.sock = sock self.sock = sock
def putheader(self, header, *values):
super(UnixHTTPConnection, self).putheader(header, *values)
if header == 'Connection' and 'Upgrade' in values:
self.disable_buffering = True
def response_class(self, sock, *args, **kwargs):
if self.disable_buffering:
kwargs['disable_buffering'] = True
return UnixHTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10): def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super().__init__( super(UnixHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize 'localhost', timeout=timeout, maxsize=maxsize
) )
self.base_url = base_url self.base_url = base_url
@ -42,26 +69,23 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
) )
class UnixHTTPAdapter(BaseHTTPAdapter): class UnixAdapter(requests.adapters.HTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path', 'socket_path',
'timeout', 'timeout']
'max_pool_size']
def __init__(self, socket_url, timeout=60, def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS, pool_connections=constants.DEFAULT_NUM_POOLS):
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '') socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'): if not socket_path.startswith('/'):
socket_path = f"/{socket_path}" socket_path = '/' + socket_path
self.socket_path = socket_path self.socket_path = socket_path
self.timeout = timeout self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer( self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close() pool_connections, dispose_func=lambda p: p.close()
) )
super().__init__() super(UnixAdapter, self).__init__()
def get_connection(self, url, proxies=None): def get_connection(self, url, proxies=None):
with self.pools.lock: with self.pools.lock:
@ -70,8 +94,7 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
return pool return pool
pool = UnixHTTPConnectionPool( pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout, url, self.socket_path, self.timeout
maxsize=self.max_pool_size
) )
self.pools[url] = pool self.pools[url] = pool
@ -84,3 +107,6 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
# anyway, we simply return the path URL directly. # anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811 # See also: https://github.com/docker/docker-py/issues/811
return request.path_url return request.path_url
def close(self):
self.pools.clear()

View File

@ -1,24 +1,11 @@
from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit # flake8: noqa
from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
from .daemon import CancellableStream from .daemon import CancellableStream
from .healthcheck import Healthcheck from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import ( from .services import (
ConfigReference, ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
ContainerSpec, Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
DNSConfig, ServiceMode, TaskTemplate, UpdateConfig
DriverConfig,
EndpointSpec,
Mount,
NetworkAttachmentConfig,
Placement,
PlacementPreference,
Privileges,
Resources,
RestartPolicy,
RollbackConfig,
SecretReference,
ServiceMode,
TaskTemplate,
UpdateConfig,
) )
from .swarm import SwarmExternalCA, SwarmSpec from .swarm import SwarmSpec, SwarmExternalCA

View File

@ -1,4 +1,7 @@
import six
class DictType(dict): class DictType(dict):
def __init__(self, init): def __init__(self, init):
for k, v in init.items(): for k, v in six.iteritems(init):
self[k] = v self[k] = v

View File

@ -1,22 +1,16 @@
import six
from .. import errors from .. import errors
from ..utils.utils import ( from ..utils.utils import (
convert_port_bindings, convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
convert_tmpfs_mounts, format_environment, format_extra_hosts, normalize_links, parse_bytes,
convert_volume_binds, parse_devices, split_command, version_gte, version_lt,
format_environment,
format_extra_hosts,
normalize_links,
parse_bytes,
parse_devices,
split_command,
version_gte,
version_lt,
) )
from .base import DictType from .base import DictType
from .healthcheck import Healthcheck from .healthcheck import Healthcheck
class LogConfigTypesEnum: class LogConfigTypesEnum(object):
_values = ( _values = (
'json-file', 'json-file',
'syslog', 'syslog',
@ -29,38 +23,6 @@ class LogConfigTypesEnum:
class LogConfig(DictType): class LogConfig(DictType):
"""
Configure logging for a container, when provided as an argument to
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
You may refer to the
`official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
for more information.
Args:
type (str): Indicate which log driver to use. A set of valid drivers
is provided as part of the :py:attr:`LogConfig.types`
enum. Other values may be accepted depending on the engine version
and available logging plugins.
config (dict): A driver-dependent configuration dictionary. Please
refer to the driver's documentation for a list of valid config
keys.
Example:
>>> from docker.types import LogConfig
>>> lc = LogConfig(type=LogConfig.types.JSON, config={
... 'max-size': '1g',
... 'labels': 'production_status,geo'
... })
>>> hc = client.create_host_config(log_config=lc)
>>> container = client.create_container('busybox', 'true',
... host_config=hc)
>>> client.inspect_container(container)['HostConfig']['LogConfig']
{
'Type': 'json-file',
'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
}
"""
types = LogConfigTypesEnum types = LogConfigTypesEnum
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -70,7 +32,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict): if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary") raise ValueError("LogConfig.config must be a dictionary")
super().__init__({ super(LogConfig, self).__init__({
'Type': log_driver_type, 'Type': log_driver_type,
'Config': config 'Config': config
}) })
@ -88,51 +50,25 @@ class LogConfig(DictType):
return self['Config'] return self['Config']
def set_config_value(self, key, value): def set_config_value(self, key, value):
""" Set a the value for ``key`` to ``value`` inside the ``config``
dict.
"""
self.config[key] = value self.config[key] = value
def unset_config(self, key): def unset_config(self, key):
""" Remove the ``key`` property from the ``config`` dict. """
if key in self.config: if key in self.config:
del self.config[key] del self.config[key]
class Ulimit(DictType): class Ulimit(DictType):
"""
Create a ulimit declaration to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
Example:
>>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
>>> hc = client.create_host_config(ulimits=[nproc_limit])
>>> container = client.create_container(
'busybox', 'true', host_config=hc
)
>>> client.inspect_container(container)['HostConfig']['Ulimits']
[{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name')) name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft')) soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard')) hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, str): if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string") raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int): if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer") raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int): if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer") raise ValueError("Ulimit.hard must be an integer")
super().__init__({ super(Ulimit, self).__init__({
'Name': name, 'Name': name,
'Soft': soft, 'Soft': soft,
'Hard': hard 'Hard': hard
@ -163,104 +99,6 @@ class Ulimit(DictType):
self['Hard'] = value self['Hard'] = value
class DeviceRequest(DictType):
"""
Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(self, **kwargs):
driver = kwargs.get('driver', kwargs.get('Driver'))
count = kwargs.get('count', kwargs.get('Count'))
device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
options = kwargs.get('options', kwargs.get('Options'))
if driver is None:
driver = ''
elif not isinstance(driver, str):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
elif not isinstance(count, int):
raise ValueError('DeviceRequest.count must be an integer')
if device_ids is None:
device_ids = []
elif not isinstance(device_ids, list):
raise ValueError('DeviceRequest.device_ids must be a list')
if capabilities is None:
capabilities = []
elif not isinstance(capabilities, list):
raise ValueError('DeviceRequest.capabilities must be a list')
if options is None:
options = {}
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
super().__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
'Capabilities': capabilities,
'Options': options
})
@property
def driver(self):
return self['Driver']
@driver.setter
def driver(self, value):
self['Driver'] = value
@property
def count(self):
return self['Count']
@count.setter
def count(self, value):
self['Count'] = value
@property
def device_ids(self):
return self['DeviceIDs']
@device_ids.setter
def device_ids(self, value):
self['DeviceIDs'] = value
@property
def capabilities(self):
return self['Capabilities']
@capabilities.setter
def capabilities(self, value):
self['Capabilities'] = value
@property
def options(self):
return self['Options']
@options.setter
def options(self, value):
self['Options'] = value
class HostConfig(dict): class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None, def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None, lxc_conf=None, publish_all_ports=False, links=None,
@ -277,14 +115,13 @@ class HostConfig(dict):
device_read_iops=None, device_write_iops=None, device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None, oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None, tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, uts_mode=None, cpuset_cpus=None, userns_mode=None, pids_limit=None,
pids_limit=None, isolation=None, auto_remove=False, isolation=None, auto_remove=False, storage_opt=None,
storage_opt=None, init=None, init_path=None, init=None, init_path=None, volume_driver=None,
volume_driver=None, cpu_count=None, cpu_percent=None, cpu_count=None, cpu_percent=None, nano_cpus=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None, cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None, device_requests=None, device_cgroup_rules=None):
cgroupns=None):
if mem_limit is not None: if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit) self['Memory'] = parse_bytes(mem_limit)
@ -307,7 +144,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness self['MemorySwappiness'] = mem_swappiness
if shm_size is not None: if shm_size is not None:
if isinstance(shm_size, str): if isinstance(shm_size, six.string_types):
shm_size = parse_bytes(shm_size) shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size self['ShmSize'] = shm_size
@ -344,11 +181,10 @@ class HostConfig(dict):
if dns_search: if dns_search:
self['DnsSearch'] = dns_search self['DnsSearch'] = dns_search
if network_mode == 'host' and port_bindings: if network_mode:
raise host_config_incompatible_error( self['NetworkMode'] = network_mode
'network_mode', 'host', 'port_bindings' elif network_mode is None:
) self['NetworkMode'] = 'default'
self['NetworkMode'] = network_mode or 'default'
if restart_policy: if restart_policy:
if not isinstance(restart_policy, dict): if not isinstance(restart_policy, dict):
@ -368,7 +204,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices) self['Devices'] = parse_devices(devices)
if group_add: if group_add:
self['GroupAdd'] = [str(grp) for grp in group_add] self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None: if dns is not None:
self['Dns'] = dns self['Dns'] = dns
@ -388,11 +224,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict): if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict') raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {} self['Sysctls'] = {}
for k, v in sysctls.items(): for k, v in six.iteritems(sysctls):
self['Sysctls'][k] = str(v) self['Sysctls'][k] = six.text_type(v)
if volumes_from is not None: if volumes_from is not None:
if isinstance(volumes_from, str): if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',') volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from self['VolumesFrom'] = volumes_from
@ -414,7 +250,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict): if isinstance(lxc_conf, dict):
formatted = [] formatted = []
for k, v in lxc_conf.items(): for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)}) formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted lxc_conf = formatted
@ -428,10 +264,10 @@ class HostConfig(dict):
if not isinstance(ulimits, list): if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list') raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = [] self['Ulimits'] = []
for lmt in ulimits: for l in ulimits:
if not isinstance(lmt, Ulimit): if not isinstance(l, Ulimit):
lmt = Ulimit(**lmt) l = Ulimit(**l)
self['Ulimits'].append(lmt) self['Ulimits'].append(l)
if log_config is not None: if log_config is not None:
if not isinstance(log_config, LogConfig): if not isinstance(log_config, LogConfig):
@ -556,11 +392,6 @@ class HostConfig(dict):
raise host_config_value_error("userns_mode", userns_mode) raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode self['UsernsMode'] = userns_mode
if uts_mode:
if uts_mode != "host":
raise host_config_value_error("uts_mode", uts_mode)
self['UTSMode'] = uts_mode
if pids_limit: if pids_limit:
if not isinstance(pids_limit, int): if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int') raise host_config_type_error('pids_limit', pids_limit, 'int')
@ -569,7 +400,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit self["PidsLimit"] = pids_limit
if isolation: if isolation:
if not isinstance(isolation, str): if not isinstance(isolation, six.string_types):
raise host_config_type_error('isolation', isolation, 'string') raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'): if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24') raise host_config_version_error('isolation', '1.24')
@ -619,7 +450,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent self['CpuPercent'] = cpu_percent
if nano_cpus: if nano_cpus:
if not isinstance(nano_cpus, int): if not isinstance(nano_cpus, six.integer_types):
raise host_config_type_error('nano_cpus', nano_cpus, 'int') raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'): if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25') raise host_config_version_error('nano_cpus', '1.25')
@ -645,44 +476,21 @@ class HostConfig(dict):
) )
self['DeviceCgroupRules'] = device_cgroup_rules self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
if cgroupns:
self['CgroupnsMode'] = cgroupns
def host_config_type_error(param, param_value, expected): def host_config_type_error(param, param_value, expected):
return TypeError( error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
f'Invalid type for {param} param: expected {expected} ' return TypeError(error_msg.format(param, expected, type(param_value)))
f'but found {type(param_value)}'
)
def host_config_version_error(param, version, less_than=True): def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>' operator = '<' if less_than else '>'
return errors.InvalidVersion( error_msg = '{0} param is not supported in API versions {1} {2}'
f'{param} param is not supported in API versions {operator} {version}', return errors.InvalidVersion(error_msg.format(param, operator, version))
)
def host_config_value_error(param, param_value): def host_config_value_error(param, param_value):
return ValueError(f'Invalid value for {param} param: {param_value}') error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
def host_config_incompatible_error(param, param_value, incompatible_param):
return errors.InvalidArgument(
f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
)
class ContainerConfig(dict): class ContainerConfig(dict):
@ -712,17 +520,17 @@ class ContainerConfig(dict):
'version 1.29' 'version 1.29'
) )
if isinstance(command, str): if isinstance(command, six.string_types):
command = split_command(command) command = split_command(command)
if isinstance(entrypoint, str): if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint) entrypoint = split_command(entrypoint)
if isinstance(environment, dict): if isinstance(environment, dict):
environment = format_environment(environment) environment = format_environment(environment)
if isinstance(labels, list): if isinstance(labels, list):
labels = {lbl: '' for lbl in labels} labels = dict((lbl, six.text_type('')) for lbl in labels)
if isinstance(ports, list): if isinstance(ports, list):
exposed_ports = {} exposed_ports = {}
@ -733,10 +541,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2: if len(port_definition) == 2:
proto = port_definition[1] proto = port_definition[1]
port = port_definition[0] port = port_definition[0]
exposed_ports[f'{port}/{proto}'] = {} exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports ports = exposed_ports
if isinstance(volumes, str): if isinstance(volumes, six.string_types):
volumes = [volumes, ] volumes = [volumes, ]
if isinstance(volumes, list): if isinstance(volumes, list):
@ -765,7 +573,7 @@ class ContainerConfig(dict):
'Hostname': hostname, 'Hostname': hostname,
'Domainname': domainname, 'Domainname': domainname,
'ExposedPorts': ports, 'ExposedPorts': ports,
'User': str(user) if user is not None else None, 'User': six.text_type(user) if user else None,
'Tty': tty, 'Tty': tty,
'OpenStdin': stdin_open, 'OpenStdin': stdin_open,
'StdinOnce': stdin_once, 'StdinOnce': stdin_once,

View File

@ -1,18 +1,19 @@
import socket import socket
import urllib3 try:
import requests.packages.urllib3 as urllib3
from ..errors import DockerException except ImportError:
import urllib3
class CancellableStream: class CancellableStream(object):
""" """
Stream wrapper for real-time events, logs, etc. from the server. Stream wrapper for real-time events, logs, etc. from the server.
Example: Example:
>>> events = client.events() >>> events = client.events()
>>> for event in events: >>> for event in events:
... print(event) ... print event
>>> # and cancel from another thread >>> # and cancel from another thread
>>> events.close() >>> events.close()
""" """
@ -28,9 +29,9 @@ class CancellableStream:
try: try:
return next(self._stream) return next(self._stream)
except urllib3.exceptions.ProtocolError: except urllib3.exceptions.ProtocolError:
raise StopIteration from None raise StopIteration
except OSError: except socket.error:
raise StopIteration from None raise StopIteration
next = __next__ next = __next__
@ -54,17 +55,9 @@ class CancellableStream:
elif hasattr(sock_raw, '_sock'): elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
)
else: else:
sock = sock_fp._sock sock = sock_fp._sock
if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket sock = sock.socket
sock.shutdown(socket.SHUT_RDWR) sock.shutdown(socket.SHUT_RDWR)

View File

@ -1,5 +1,7 @@
from .base import DictType from .base import DictType
import six
class Healthcheck(DictType): class Healthcheck(DictType):
""" """
@ -12,7 +14,7 @@ class Healthcheck(DictType):
- Empty list: Inherit healthcheck from parent image - Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck - ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly. - ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's - ``["CMD-SHELL", command]``: RUn command in the system's
default shell. default shell.
If a string is provided, it will be used as a ``CMD-SHELL`` If a string is provided, it will be used as a ``CMD-SHELL``
@ -21,15 +23,15 @@ class Healthcheck(DictType):
should be 0 or at least 1000000 (1 ms). should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms). have hung. It should be 0 or at least 1000000 (1 ms).
retries (int): The number of consecutive failures needed to retries (integer): The number of consecutive failures needed to
consider a container as unhealthy. consider a container as unhealthy.
start_period (int): Start period for the container to start_period (integer): Start period for the container to
initialize before starting health-retries countdown in initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms). nanoseconds. It should be 0 or at least 1000000 (1 ms).
""" """
def __init__(self, **kwargs): def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test')) test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, str): if isinstance(test, six.string_types):
test = ["CMD-SHELL", test] test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval')) interval = kwargs.get('interval', kwargs.get('Interval'))
@ -37,7 +39,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries')) retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
super().__init__({ super(Healthcheck, self).__init__({
'Test': test, 'Test': test,
'Interval': interval, 'Interval': interval,
'Timeout': timeout, 'Timeout': timeout,
@ -51,8 +53,6 @@ class Healthcheck(DictType):
@test.setter @test.setter
def test(self, value): def test(self, value):
if isinstance(value, str):
value = ["CMD-SHELL", value]
self['Test'] = value self['Test'] = value
@property @property

View File

@ -4,8 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict): class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None, def __init__(self, version, aliases=None, links=None, ipv4_address=None,
ipv6_address=None, link_local_ips=None, driver_opt=None, ipv6_address=None, link_local_ips=None):
mac_address=None):
if version_lt(version, '1.22'): if version_lt(version, '1.22'):
raise errors.InvalidVersion( raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22' 'Endpoint config is not supported for API version < 1.22'
@ -24,13 +23,6 @@ class EndpointConfig(dict):
if ipv6_address: if ipv6_address:
ipam_config['IPv6Address'] = ipv6_address ipam_config['IPv6Address'] = ipv6_address
if mac_address:
if version_lt(version, '1.25'):
raise errors.InvalidVersion(
'mac_address is not supported for API version < 1.25'
)
self['MacAddress'] = mac_address
if link_local_ips is not None: if link_local_ips is not None:
if version_lt(version, '1.24'): if version_lt(version, '1.24'):
raise errors.InvalidVersion( raise errors.InvalidVersion(
@ -41,15 +33,6 @@ class EndpointConfig(dict):
if ipam_config: if ipam_config:
self['IPAMConfig'] = ipam_config self['IPAMConfig'] = ipam_config
if driver_opt:
if version_lt(version, '1.32'):
raise errors.InvalidVersion(
'DriverOpts is not supported for API version < 1.32'
)
if not isinstance(driver_opt, dict):
raise TypeError('driver_opt must be a dictionary')
self['DriverOpts'] = driver_opt
class NetworkingConfig(dict): class NetworkingConfig(dict):
def __init__(self, endpoints_config=None): def __init__(self, endpoints_config=None):

View File

@ -1,12 +1,10 @@
import six
from .. import errors from .. import errors
from ..constants import IS_WINDOWS_PLATFORM from ..constants import IS_WINDOWS_PLATFORM
from ..utils import ( from ..utils import (
check_resource, check_resource, format_environment, format_extra_hosts, parse_bytes,
convert_service_networks, split_command, convert_service_networks,
format_environment,
format_extra_hosts,
parse_bytes,
split_command,
) )
@ -28,12 +26,11 @@ class TaskTemplate(dict):
placement (Placement): Placement instructions for the scheduler. placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object. constraints as part of a :py:class:`Placement` object.
networks (:py:class:`list`): List of network names or IDs or networks (:py:class:`list`): List of network names or IDs to attach
:py:class:`NetworkAttachmentConfig` to attach the service to. the containers to.
force_update (int): A counter that triggers an update even if no force_update (int): A counter that triggers an update even if no
relevant parameters have been changed. relevant parameters have been changed.
""" """
def __init__(self, container_spec, resources=None, restart_policy=None, def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None, networks=None, placement=None, log_driver=None, networks=None,
force_update=None): force_update=None):
@ -113,26 +110,16 @@ class ContainerSpec(dict):
privileges (Privileges): Security options for the service's containers. privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers. containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
cap_add (:py:class:`list`): A list of kernel capabilities to add to the
default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to
the container
""" """
def __init__(self, image, command=None, args=None, hostname=None, env=None, def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None, workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None, stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None, open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None, healthcheck=None, hosts=None, dns_config=None, configs=None,
privileges=None, isolation=None, init=None, cap_add=None, privileges=None, isolation=None):
cap_drop=None, sysctls=None):
self['Image'] = image self['Image'] = image
if isinstance(command, str): if isinstance(command, six.string_types):
command = split_command(command) command = split_command(command)
self['Command'] = command self['Command'] = command
self['Args'] = args self['Args'] = args
@ -162,7 +149,7 @@ class ContainerSpec(dict):
if mounts is not None: if mounts is not None:
parsed_mounts = [] parsed_mounts = []
for mount in mounts: for mount in mounts:
if isinstance(mount, str): if isinstance(mount, six.string_types):
parsed_mounts.append(Mount.parse_mount_string(mount)) parsed_mounts.append(Mount.parse_mount_string(mount))
else: else:
# If mount already parsed # If mount already parsed
@ -196,27 +183,6 @@ class ContainerSpec(dict):
if isolation is not None: if isolation is not None:
self['Isolation'] = isolation self['Isolation'] = isolation
if init is not None:
self['Init'] = init
if cap_add is not None:
if not isinstance(cap_add, list):
raise TypeError('cap_add must be a list')
self['CapabilityAdd'] = cap_add
if cap_drop is not None:
if not isinstance(cap_drop, list):
raise TypeError('cap_drop must be a list')
self['CapabilityDrop'] = cap_drop
if sysctls is not None:
if not isinstance(sysctls, dict):
raise TypeError('sysctls must be a dict')
self['Sysctls'] = sysctls
class Mount(dict): class Mount(dict):
""" """
@ -242,20 +208,18 @@ class Mount(dict):
for the ``volume`` type. for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type. for the ``volume`` type.
subpath (str): Path inside a volume to mount instead of the volume root.
tmpfs_size (int or string): The size for the tmpfs mount in bytes. tmpfs_size (int or string): The size for the tmpfs mount in bytes.
tmpfs_mode (int): The permission mode for the tmpfs mount. tmpfs_mode (int): The permission mode for the tmpfs mount.
""" """
def __init__(self, target, source, type='volume', read_only=False, def __init__(self, target, source, type='volume', read_only=False,
consistency=None, propagation=None, no_copy=False, consistency=None, propagation=None, no_copy=False,
labels=None, driver_config=None, tmpfs_size=None, labels=None, driver_config=None, tmpfs_size=None,
tmpfs_mode=None, subpath=None): tmpfs_mode=None):
self['Target'] = target self['Target'] = target
self['Source'] = source self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'): if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument( raise errors.InvalidArgument(
f'Unsupported mount type: "{type}"' 'Unsupported mount type: "{}"'.format(type)
) )
self['Type'] = type self['Type'] = type
self['ReadOnly'] = read_only self['ReadOnly'] = read_only
@ -268,7 +232,7 @@ class Mount(dict):
self['BindOptions'] = { self['BindOptions'] = {
'Propagation': propagation 'Propagation': propagation
} }
if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode, subpath]): if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
raise errors.InvalidArgument( raise errors.InvalidArgument(
'Incompatible options have been provided for the bind ' 'Incompatible options have been provided for the bind '
'type mount.' 'type mount.'
@ -281,8 +245,6 @@ class Mount(dict):
volume_opts['Labels'] = labels volume_opts['Labels'] = labels
if driver_config: if driver_config:
volume_opts['DriverConfig'] = driver_config volume_opts['DriverConfig'] = driver_config
if subpath:
volume_opts['Subpath'] = subpath
if volume_opts: if volume_opts:
self['VolumeOptions'] = volume_opts self['VolumeOptions'] = volume_opts
if any([propagation, tmpfs_size, tmpfs_mode]): if any([propagation, tmpfs_size, tmpfs_mode]):
@ -293,7 +255,7 @@ class Mount(dict):
elif type == 'tmpfs': elif type == 'tmpfs':
tmpfs_opts = {} tmpfs_opts = {}
if tmpfs_mode: if tmpfs_mode:
if not isinstance(tmpfs_mode, int): if not isinstance(tmpfs_mode, six.integer_types):
raise errors.InvalidArgument( raise errors.InvalidArgument(
'tmpfs_mode must be an integer' 'tmpfs_mode must be an integer'
) )
@ -313,7 +275,7 @@ class Mount(dict):
parts = string.split(':') parts = string.split(':')
if len(parts) > 3: if len(parts) > 3:
raise errors.InvalidArgument( raise errors.InvalidArgument(
f'Invalid mount format "{string}"' 'Invalid mount format "{0}"'.format(string)
) )
if len(parts) == 1: if len(parts) == 1:
return cls(target=parts[0], source=None) return cls(target=parts[0], source=None)
@ -349,7 +311,6 @@ class Resources(dict):
``{ resource_name: resource_value }``. Alternatively, a list of ``{ resource_name: resource_value }``. Alternatively, a list of
of resource specifications as defined by the Engine API. of resource specifications as defined by the Engine API.
""" """
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None, generic_resources=None): mem_reservation=None, generic_resources=None):
limits = {} limits = {}
@ -377,20 +338,20 @@ def _convert_generic_resources_dict(generic_resources):
return generic_resources return generic_resources
if not isinstance(generic_resources, dict): if not isinstance(generic_resources, dict):
raise errors.InvalidArgument( raise errors.InvalidArgument(
'generic_resources must be a dict or a list ' 'generic_resources must be a dict or a list'
f'(found {type(generic_resources)})' ' (found {})'.format(type(generic_resources))
) )
resources = [] resources = []
for kind, value in generic_resources.items(): for kind, value in six.iteritems(generic_resources):
resource_type = None resource_type = None
if isinstance(value, int): if isinstance(value, int):
resource_type = 'DiscreteResourceSpec' resource_type = 'DiscreteResourceSpec'
elif isinstance(value, str): elif isinstance(value, str):
resource_type = 'NamedResourceSpec' resource_type = 'NamedResourceSpec'
else: else:
kv = {kind: value}
raise errors.InvalidArgument( raise errors.InvalidArgument(
f'Unsupported generic resource reservation type: {kv}' 'Unsupported generic resource reservation '
'type: {}'.format({kind: value})
) )
resources.append({ resources.append({
resource_type: {'Kind': kind, 'Value': value} resource_type: {'Kind': kind, 'Value': value}
@ -407,28 +368,26 @@ class UpdateConfig(dict):
parallelism (int): Maximum number of tasks to be updated in one parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0. iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates, in nanoseconds. delay (int): Amount of time between updates.
failure_action (string): Action to take if an updated task fails to failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are run, or stops running during the update. Acceptable values are
``continue``, ``pause``, as well as ``rollback`` since API v1.28. ``continue`` and ``pause``. Default: ``continue``
Default: ``continue``
monitor (int): Amount of time to monitor each updated task for monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds. failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0 floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out an order (string): Specifies the order of operations when rolling out an
updated task. Either ``start-first`` or ``stop-first`` are accepted. updated task. Either ``start_first`` or ``stop_first`` are accepted.
""" """
def __init__(self, parallelism=0, delay=None, failure_action='continue', def __init__(self, parallelism=0, delay=None, failure_action='continue',
monitor=None, max_failure_ratio=None, order=None): monitor=None, max_failure_ratio=None, order=None):
self['Parallelism'] = parallelism self['Parallelism'] = parallelism
if delay is not None: if delay is not None:
self['Delay'] = delay self['Delay'] = delay
if failure_action not in ('pause', 'continue', 'rollback'): if failure_action not in ('pause', 'continue'):
raise errors.InvalidArgument( raise errors.InvalidArgument(
'failure_action must be one of `pause`, `continue`, `rollback`' 'failure_action must be either `pause` or `continue`.'
) )
self['FailureAction'] = failure_action self['FailureAction'] = failure_action
@ -454,32 +413,7 @@ class UpdateConfig(dict):
self['Order'] = order self['Order'] = order
class RollbackConfig(UpdateConfig): class RestartConditionTypesEnum(object):
"""
Used to specify the way container rollbacks should be performed by a
service
Args:
parallelism (int): Maximum number of tasks to be rolled back in one
iteration (0 means unlimited parallelism). Default: 0
delay (int): Amount of time between rollbacks, in nanoseconds.
failure_action (string): Action to take if a rolled back task fails to
run, or stops running during the rollback. Acceptable values are
``continue``, ``pause`` or ``rollback``.
Default: ``continue``
monitor (int): Amount of time to monitor each rolled back task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
a rollback before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out a
rolled back task. Either ``start-first`` or ``stop-first`` are
accepted.
"""
pass
class RestartConditionTypesEnum:
_values = ( _values = (
'none', 'none',
'on-failure', 'on-failure',
@ -510,7 +444,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0): max_attempts=0, window=0):
if condition not in self.condition_types._values: if condition not in self.condition_types._values:
raise TypeError( raise TypeError(
f'Invalid RestartPolicy condition {condition}' 'Invalid RestartPolicy condition {0}'.format(condition)
) )
self['Condition'] = condition self['Condition'] = condition
@ -532,7 +466,6 @@ class DriverConfig(dict):
name (string): Name of the driver to use. name (string): Name of the driver to use.
options (dict): Driver-specific options. Default: ``None``. options (dict): Driver-specific options. Default: ``None``.
""" """
def __init__(self, name, options=None): def __init__(self, name, options=None):
self['Name'] = name self['Name'] = name
if options: if options:
@ -554,7 +487,6 @@ class EndpointSpec(dict):
is ``(target_port [, protocol [, publish_mode]])``. is ``(target_port [, protocol [, publish_mode]])``.
Ports can only be provided if the ``vip`` resolution mode is used. Ports can only be provided if the ``vip`` resolution mode is used.
""" """
def __init__(self, mode=None, ports=None): def __init__(self, mode=None, ports=None):
if ports: if ports:
self['Ports'] = convert_service_ports(ports) self['Ports'] = convert_service_ports(ports)
@ -571,7 +503,7 @@ def convert_service_ports(ports):
) )
result = [] result = []
for k, v in ports.items(): for k, v in six.iteritems(ports):
port_spec = { port_spec = {
'Protocol': 'tcp', 'Protocol': 'tcp',
'PublishedPort': k 'PublishedPort': k
@ -597,70 +529,37 @@ def convert_service_ports(ports):
class ServiceMode(dict): class ServiceMode(dict):
""" """
Indicate whether a service or a job should be deployed as a replicated Indicate whether a service should be deployed as a replicated or global
or global service, and associated parameters service, and associated parameters
Args: Args:
mode (string): Can be either ``replicated``, ``global``, mode (string): Can be either ``replicated`` or ``global``
``replicated-job`` or ``global-job``
replicas (int): Number of replicas. For replicated services only. replicas (int): Number of replicas. For replicated services only.
concurrency (int): Number of concurrent jobs. For replicated job
services only.
""" """
def __init__(self, mode, replicas=None):
def __init__(self, mode, replicas=None, concurrency=None): if mode not in ('replicated', 'global'):
replicated_modes = ('replicated', 'replicated-job')
supported_modes = replicated_modes + ('global', 'global-job')
if mode not in supported_modes:
raise errors.InvalidArgument( raise errors.InvalidArgument(
'mode must be either "replicated", "global", "replicated-job"' 'mode must be either "replicated" or "global"'
' or "global-job"'
) )
if mode != 'replicated' and replicas is not None:
if mode not in replicated_modes: raise errors.InvalidArgument(
'replicas can only be used for replicated mode'
)
self[mode] = {}
if replicas is not None: if replicas is not None:
raise errors.InvalidArgument( self[mode]['Replicas'] = replicas
'replicas can only be used for "replicated" or'
' "replicated-job" mode'
)
if concurrency is not None: @property
raise errors.InvalidArgument( def mode(self):
'concurrency can only be used for "replicated-job" mode' if 'global' in self:
) return 'global'
return 'replicated'
service_mode = self._convert_mode(mode)
self.mode = service_mode
self[service_mode] = {}
if replicas is not None:
if mode == 'replicated':
self[service_mode]['Replicas'] = replicas
if mode == 'replicated-job':
self[service_mode]['MaxConcurrent'] = concurrency or 1
self[service_mode]['TotalCompletions'] = replicas
@staticmethod
def _convert_mode(original_mode):
if original_mode == 'global-job':
return 'GlobalJob'
if original_mode == 'replicated-job':
return 'ReplicatedJob'
return original_mode
@property @property
def replicas(self): def replicas(self):
if 'replicated' in self: if self.mode != 'replicated':
return self['replicated'].get('Replicas')
if 'ReplicatedJob' in self:
return self['ReplicatedJob'].get('TotalCompletions')
return None return None
return self['replicated'].get('Replicas')
class SecretReference(dict): class SecretReference(dict):
@ -724,29 +623,18 @@ class Placement(dict):
Placement constraints to be used as part of a :py:class:`TaskTemplate` Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args: Args:
constraints (:py:class:`list` of str): A list of constraints constraints (:py:class:`list`): A list of constraints
preferences (:py:class:`list` of tuple): Preferences provide a way preferences (:py:class:`list`): Preferences provide a way to make
to make the scheduler aware of factors such as topology. They the scheduler aware of factors such as topology. They are
are provided in order from highest to lowest precedence and provided in order from highest to lowest precedence.
are expressed as ``(strategy, descriptor)`` tuples. See platforms (:py:class:`list`): A list of platforms expressed as
:py:class:`PlacementPreference` for details. ``(arch, os)`` tuples
maxreplicas (int): Maximum number of replicas per node
platforms (:py:class:`list` of tuple): A list of platforms
expressed as ``(arch, os)`` tuples
""" """
def __init__(self, constraints=None, preferences=None, platforms=None):
def __init__(self, constraints=None, preferences=None, platforms=None,
maxreplicas=None):
if constraints is not None: if constraints is not None:
self['Constraints'] = constraints self['Constraints'] = constraints
if preferences is not None: if preferences is not None:
self['Preferences'] = [] self['Preferences'] = preferences
for pref in preferences:
if isinstance(pref, tuple):
pref = PlacementPreference(*pref)
self['Preferences'].append(pref)
if maxreplicas is not None:
self['MaxReplicas'] = maxreplicas
if platforms: if platforms:
self['Platforms'] = [] self['Platforms'] = []
for plat in platforms: for plat in platforms:
@ -755,28 +643,6 @@ class Placement(dict):
}) })
class PlacementPreference(dict):
"""
Placement preference to be used as an element in the list of
preferences for :py:class:`Placement` objects.
Args:
strategy (string): The placement strategy to implement. Currently,
the only supported strategy is ``spread``.
descriptor (string): A label descriptor. For the spread strategy,
the scheduler will try to spread tasks evenly over groups of
nodes identified by this label.
"""
def __init__(self, strategy, descriptor):
if strategy != 'spread':
raise errors.InvalidArgument(
f'PlacementPreference strategy value is invalid ({strategy}): '
'must be "spread".'
)
self['Spread'] = {'SpreadDescriptor': descriptor}
class DNSConfig(dict): class DNSConfig(dict):
""" """
Specification for DNS related configurations in resolver configuration Specification for DNS related configurations in resolver configuration
@ -789,7 +655,6 @@ class DNSConfig(dict):
options (:py:class:`list`): A list of internal resolver variables options (:py:class:`list`): A list of internal resolver variables
to be modified (e.g., ``debug``, ``ndots:3``, etc.). to be modified (e.g., ``debug``, ``ndots:3``, etc.).
""" """
def __init__(self, nameservers=None, search=None, options=None): def __init__(self, nameservers=None, search=None, options=None):
self['Nameservers'] = nameservers self['Nameservers'] = nameservers
self['Search'] = search self['Search'] = search
@ -797,7 +662,7 @@ class DNSConfig(dict):
class Privileges(dict): class Privileges(dict):
r""" """
Security options for a service's containers. Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition. Part of a :py:class:`ContainerSpec` definition.
@ -820,7 +685,6 @@ class Privileges(dict):
selinux_type (string): SELinux type label selinux_type (string): SELinux type label
selinux_level (string): SELinux level label selinux_level (string): SELinux level label
""" """
def __init__(self, credentialspec_file=None, credentialspec_registry=None, def __init__(self, credentialspec_file=None, credentialspec_registry=None,
selinux_disable=None, selinux_user=None, selinux_role=None, selinux_disable=None, selinux_user=None, selinux_role=None,
selinux_type=None, selinux_level=None): selinux_type=None, selinux_level=None):
@ -849,22 +713,3 @@ class Privileges(dict):
if len(selinux_context) > 0: if len(selinux_context) > 0:
self['SELinuxContext'] = selinux_context self['SELinuxContext'] = selinux_context
class NetworkAttachmentConfig(dict):
"""
Network attachment options for a service.
Args:
target (str): The target network for attachment.
Can be a network name or ID.
aliases (:py:class:`list`): A list of discoverable alternate names
for the service.
options (:py:class:`dict`): Driver attachment options for the
network target.
"""
def __init__(self, target, aliases=None, options=None):
self['Target'] = target
self['Aliases'] = aliases
self['DriverOpts'] = options

View File

@ -1,28 +1,13 @@
# flake8: noqa
from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar from .build import create_archive, exclude_paths, mkbuildcontext, tar
from .decorators import check_resource, minimum_version, update_headers from .decorators import check_resource, minimum_version, update_headers
from .utils import ( from .utils import (
compare_version, compare_version, convert_port_bindings, convert_volume_binds,
convert_filters, parse_repository_tag, parse_host,
convert_port_bindings, kwargs_from_env, convert_filters, datetime_to_timestamp,
convert_service_networks, create_host_config, parse_bytes, parse_env_file, version_lt,
convert_volume_binds, version_gte, decode_json_header, split_command, create_ipam_config,
create_host_config, create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
create_ipam_config, format_environment, format_extra_hosts
create_ipam_pool,
datetime_to_timestamp,
decode_json_header,
format_environment,
format_extra_hosts,
kwargs_from_env,
normalize_links,
parse_bytes,
parse_devices,
parse_env_file,
parse_host,
parse_repository_tag,
split_command,
version_gte,
version_lt,
) )

View File

@ -4,19 +4,13 @@ import re
import tarfile import tarfile
import tempfile import tempfile
from ..constants import IS_WINDOWS_PLATFORM import six
from .fnmatch import fnmatch from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/') _SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
_TAG = re.compile(
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*"
r"(?::[0-9]+)?(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*"
r"(:[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})?$"
)
def match_tag(tag: str) -> bool:
return bool(_TAG.match(tag))
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False): def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
@ -50,7 +44,7 @@ def exclude_paths(root, patterns, dockerfile=None):
if dockerfile is None: if dockerfile is None:
dockerfile = 'Dockerfile' dockerfile = 'Dockerfile'
patterns.append(f"!{dockerfile}") patterns.append('!' + dockerfile)
pm = PatternMatcher(patterns) pm = PatternMatcher(patterns)
return set(pm.walk(root)) return set(pm.walk(root))
@ -75,7 +69,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj) t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None: if files is None:
files = build_file_list(root) files = build_file_list(root)
extra_names = {e[0] for e in extra_files} extra_names = set(e[0] for e in extra_files)
for path in files: for path in files:
if path in extra_names: if path in extra_names:
# Extra files override context files with the same name # Extra files override context files with the same name
@ -101,19 +95,18 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try: try:
with open(full_path, 'rb') as f: with open(full_path, 'rb') as f:
t.addfile(i, f) t.addfile(i, f)
except OSError as oe: except IOError:
raise OSError( raise IOError(
f'Can not read file in context: {full_path}' 'Can not read file in context: {}'.format(full_path)
) from oe )
else: else:
# Directories, FIFOs, symlinks... don't need to be read. # Directories, FIFOs, symlinks... don't need to be read.
t.addfile(i, None) t.addfile(i, None)
for name, contents in extra_files: for name, contents in extra_files:
info = tarfile.TarInfo(name) info = tarfile.TarInfo(name)
contents_encoded = contents.encode('utf-8') info.size = len(contents)
info.size = len(contents_encoded) t.addfile(info, io.BytesIO(contents.encode('utf-8')))
t.addfile(info, io.BytesIO(contents_encoded))
t.close() t.close()
fileobj.seek(0) fileobj.seek(0)
@ -125,8 +118,12 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f) t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO): if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile') dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory ' raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3') 'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO): elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile') dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue()) dfinfo.size = len(dockerfile.getvalue())
@ -156,7 +153,7 @@ def walk(root, patterns, default=True):
# Heavily based on # Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go # https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher: class PatternMatcher(object):
def __init__(self, patterns): def __init__(self, patterns):
self.patterns = list(filter( self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns] lambda p: p.dirs, [Pattern(p) for p in patterns]
@ -188,7 +185,7 @@ class PatternMatcher:
fpath = os.path.join( fpath = os.path.join(
os.path.relpath(current_dir, root), f os.path.relpath(current_dir, root), f
) )
if fpath.startswith(f".{os.path.sep}"): if fpath.startswith('.' + os.path.sep):
fpath = fpath[2:] fpath = fpath[2:]
match = self.matches(fpath) match = self.matches(fpath)
if not match: if not match:
@ -214,12 +211,13 @@ class PatternMatcher:
break break
if skip: if skip:
continue continue
yield from rec_walk(cur) for sub in rec_walk(cur):
yield sub
return rec_walk(root) return rec_walk(root)
class Pattern: class Pattern(object):
def __init__(self, pattern_str): def __init__(self, pattern_str):
self.exclusion = False self.exclusion = False
if pattern_str.startswith('!'): if pattern_str.startswith('!'):
@ -232,9 +230,6 @@ class Pattern:
@classmethod @classmethod
def normalize(cls, p): def normalize(cls, p):
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes, # Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "." # "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole # components are not relevant either, even if the whole

View File

@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4 os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
])) ]))
log.debug(f"Trying paths: {repr(paths)}") log.debug("Trying paths: {0}".format(repr(paths)))
for path in paths: for path in paths:
if os.path.exists(path): if os.path.exists(path):
log.debug(f"Found file at path: {path}") log.debug("Found file at path: {0}".format(path))
return path return path
log.debug("No config file found") log.debug("No config file found")
@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try: try:
with open(config_file) as f: with open(config_file) as f:
return json.load(f) return json.load(f)
except (OSError, ValueError) as e: except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't # In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data. # be able to load any JSON data.
log.debug(e) log.debug(e)

View File

@ -27,7 +27,9 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version): if utils.version_lt(self._version, version):
raise errors.InvalidVersion( raise errors.InvalidVersion(
f'{f.__name__} is not available for version < {version}', '{0} is not available for version < {1}'.format(
f.__name__, version
)
) )
return f(self, *args, **kwargs) return f(self, *args, **kwargs)
return wrapper return wrapper

View File

@ -79,18 +79,18 @@ def translate(pat):
i = i + 1 i = i + 1
if i >= n: if i >= n:
# is "**EOF" - to align with .gitignore just accept all # is "**EOF" - to align with .gitignore just accept all
res = f"{res}.*" res = res + '.*'
else: else:
# is "**" # is "**"
# Note that this allows for any # of /'s (even 0) because # Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s # the .* will eat everything, even /'s
res = f"{res}(.*/)?" res = res + '(.*/)?'
else: else:
# is "*" so map it to anything but "/" # is "*" so map it to anything but "/"
res = f"{res}[^/]*" res = res + '[^/]*'
elif c == '?': elif c == '?':
# "?" is any char except "/" # "?" is any char except "/"
res = f"{res}[^/]" res = res + '[^/]'
elif c == '[': elif c == '[':
j = i j = i
if j < n and pat[j] == '!': if j < n and pat[j] == '!':
@ -100,16 +100,16 @@ def translate(pat):
while j < n and pat[j] != ']': while j < n and pat[j] != ']':
j = j + 1 j = j + 1
if j >= n: if j >= n:
res = f"{res}\\[" res = res + '\\['
else: else:
stuff = pat[i:j].replace('\\', '\\\\') stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1 i = j + 1
if stuff[0] == '!': if stuff[0] == '!':
stuff = f"^{stuff[1:]}" stuff = '^' + stuff[1:]
elif stuff[0] == '^': elif stuff[0] == '^':
stuff = f"\\{stuff}" stuff = '\\' + stuff
res = f'{res}[{stuff}]' res = '%s[%s]' % (res, stuff)
else: else:
res = res + re.escape(c) res = res + re.escape(c)
return f"{res}$" return res + '$'

View File

@ -1,8 +1,14 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json import json
import json.decoder import json.decoder
import six
from ..errors import StreamParseError from ..errors import StreamParseError
json_decoder = json.JSONDecoder() json_decoder = json.JSONDecoder()
@ -14,7 +20,7 @@ def stream_as_text(stream):
instead of byte streams. instead of byte streams.
""" """
for data in stream: for data in stream:
if not isinstance(data, str): if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace') data = data.decode('utf-8', 'replace')
yield data yield data
@ -40,8 +46,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode) return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator='\n'): def line_splitter(buffer, separator=u'\n'):
index = buffer.find(str(separator)) index = buffer.find(six.text_type(separator))
if index == -1: if index == -1:
return None return None
return buffer[:index + 1], buffer[index + 1:] return buffer[:index + 1], buffer[index + 1:]
@ -55,7 +61,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input. of the input.
""" """
splitter = splitter or line_splitter splitter = splitter or line_splitter
buffered = '' buffered = six.text_type('')
for data in stream_as_text(stream): for data in stream_as_text(stream):
buffered += data buffered += data
@ -71,4 +77,4 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
try: try:
yield decoder(buffered) yield decoder(buffered)
except Exception as e: except Exception as e:
raise StreamParseError(e) from e raise StreamParseError(e)

View File

@ -3,11 +3,11 @@ import re
PORT_SPEC = re.compile( PORT_SPEC = re.compile(
"^" # Match full string "^" # Match full string
"(" # External part "(" # External part
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address "((?P<host>[a-fA-F\d.:]+):)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?" ")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
"(?P<proto>/(udp|tcp|sctp))?" # Protocol "(?P<proto>/(udp|tcp))?" # Protocol
"$" # Match full string "$" # Match full string
) )
@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end: if not end:
return [start + proto] return [start + proto]
if randomly_available_port: if randomly_available_port:
return [f"{start}-{end}{proto}"] return ['{}-{}'.format(start, end) + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)] return [str(port) + proto for port in range(int(start), int(end) + 1)]

View File

@ -1,77 +0,0 @@
from .utils import format_environment
class ProxyConfig(dict):
'''
Hold the client's proxy configuration
'''
@property
def http(self):
return self.get('http')
@property
def https(self):
return self.get('https')
@property
def ftp(self):
return self.get('ftp')
@property
def no_proxy(self):
return self.get('no_proxy')
@staticmethod
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
)
def get_environment(self):
'''
Return a dictionary representing the environment variables used to
set the proxy settings.
'''
env = {}
if self.http:
env['http_proxy'] = env['HTTP_PROXY'] = self.http
if self.https:
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
if self.ftp:
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
if self.no_proxy:
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
return env
def inject_proxy_environment(self, environment):
'''
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
'''
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self):
return (
'ProxyConfig('
f'http={self.http}, https={self.https}, '
f'ftp={self.ftp}, no_proxy={self.no_proxy}'
')'
)

View File

@ -4,25 +4,18 @@ import select
import socket as pysocket import socket as pysocket
import struct import struct
import six
try: try:
from ..transport import NpipeSocket from ..transport import NpipeSocket
except ImportError: except ImportError:
NpipeSocket = type(None) NpipeSocket = type(None)
STDOUT = 1
STDERR = 2
class SocketError(Exception): class SocketError(Exception):
pass pass
# NpipeSockets have their own error types
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
NPIPE_ENDED = 109
def read(socket, n=4096): def read(socket, n=4096):
""" """
Reads at most n bytes from socket Reads at most n bytes from socket
@ -30,33 +23,18 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket): if six.PY3 and not isinstance(socket, NpipeSocket):
if not hasattr(select, "poll"):
# Limited to 1024
select.select([socket], [], []) select.select([socket], [], [])
else:
poll = select.poll()
poll.register(socket, select.POLLIN | select.POLLPRI)
poll.poll()
try: try:
if hasattr(socket, 'recv'): if hasattr(socket, 'recv'):
return socket.recv(n) return socket.recv(n)
if isinstance(socket, pysocket.SocketIO): if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n) return socket.read(n)
return os.read(socket.fileno(), n) return os.read(socket.fileno(), n)
except OSError as e: except EnvironmentError as e:
if e.errno not in recoverable_errors: if e.errno not in recoverable_errors:
raise raise
except Exception as e:
is_pipe_ended = (isinstance(socket, NpipeSocket) and
len(e.args) > 0 and
e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
# npipes don't support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return ''
raise
def read_exactly(socket, n): def read_exactly(socket, n):
@ -64,7 +42,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket Reads exactly n bytes from socket
Raises SocketError if there isn't enough data Raises SocketError if there isn't enough data
""" """
data = b"" data = six.binary_type()
while len(data) < n: while len(data) < n:
next_data = read(socket, n - len(data)) next_data = read(socket, n - len(data))
if not next_data: if not next_data:
@ -73,43 +51,28 @@ def read_exactly(socket, n):
return data return data
def next_frame_header(socket): def next_frame_size(socket):
""" """
Returns the stream and size of the next frame of data waiting to be read Returns the size of the next frame of data waiting to be read from socket,
from socket, according to the protocol defined here: according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
""" """
try: try:
data = read_exactly(socket, 8) data = read_exactly(socket, 8)
except SocketError: except SocketError:
return (-1, -1) return -1
stream, actual = struct.unpack('>BxxxL', data) _, actual = struct.unpack('>BxxxL', data)
return (stream, actual) return actual
def frames_iter(socket, tty): def frames_iter(socket):
""" """
Return a generator of frames read from socket. A frame is a tuple where Returns a generator of frames read from socket
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
else:
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket):
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
""" """
while True: while True:
(stream, n) = next_frame_header(socket) n = next_frame_size(socket)
if n < 0: if n < 0:
break break
while n > 0: while n > 0:
@ -121,13 +84,13 @@ def frames_iter_no_tty(socket):
# We have reached EOF # We have reached EOF
return return
n -= data_length n -= data_length
yield (stream, result) yield result
def frames_iter_tty(socket): def socket_raw_iter(socket):
""" """
Return a generator of data read from the socket when the tty setting is Returns a generator of data read from the socket.
enabled. This is used for non-multiplexed streams.
""" """
while True: while True:
result = read(socket) result = read(socket)
@ -135,53 +98,3 @@ def frames_iter_tty(socket):
# We have reached EOF # We have reached EOF
return return
yield result yield result
def consume_socket_output(frames, demux=False):
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return b"".join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out)
def demux_adaptor(stream_id, data):
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
elif stream_id == STDERR:
return (None, data)
else:
raise ValueError(f'{stream_id} is not a valid stream')

View File

@ -1,28 +1,31 @@
import base64 import base64
import collections
import json
import os import os
import os.path import os.path
import json
import shlex import shlex
import string from distutils.version import StrictVersion
from datetime import datetime, timezone from datetime import datetime
from functools import lru_cache
from itertools import zip_longest import six
from urllib.parse import urlparse, urlunparse
from .. import errors from .. import errors
from ..constants import ( from .. import tls
BYTE_UNITS,
DEFAULT_HTTP_HOST,
DEFAULT_NPIPE,
DEFAULT_UNIX_SOCKET,
)
from ..tls import TLSConfig
URLComponents = collections.namedtuple( if six.PY2:
'URLComponents', from urllib import splitnport
'scheme netloc url params query fragment', else:
) from urllib.parse import splitnport
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
def create_ipam_pool(*args, **kwargs): def create_ipam_pool(*args, **kwargs):
@ -41,11 +44,11 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header): def decode_json_header(header):
data = base64.b64decode(header) data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8') data = data.decode('utf-8')
return json.loads(data) return json.loads(data)
@lru_cache(maxsize=None)
def compare_version(v1, v2): def compare_version(v1, v2):
"""Compare docker versions """Compare docker versions
@ -58,20 +61,14 @@ def compare_version(v1, v2):
>>> compare_version(v2, v2) >>> compare_version(v2, v2)
0 0
""" """
if v1 == v2: s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0 return 0
# Split into `sys.version_info` like tuples. elif s1 > s2:
s1 = tuple(int(p) for p in v1.split('.'))
s2 = tuple(int(p) for p in v2.split('.'))
# Compare each component, padding with 0 if necessary.
for c1, c2 in zip_longest(s1, s2, fillvalue=0):
if c1 == c2:
continue
elif c1 > c2:
return -1 return -1
else: else:
return 1 return 1
return 0
def version_lt(v1, v2): def version_lt(v1, v2):
@ -88,7 +85,7 @@ def _convert_port_binding(binding):
if len(binding) == 2: if len(binding) == 2:
result['HostPort'] = binding[1] result['HostPort'] = binding[1]
result['HostIp'] = binding[0] result['HostIp'] = binding[0]
elif isinstance(binding[0], str): elif isinstance(binding[0], six.string_types):
result['HostIp'] = binding[0] result['HostIp'] = binding[0]
else: else:
result['HostPort'] = binding[0] result['HostPort'] = binding[0]
@ -112,7 +109,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings): def convert_port_bindings(port_bindings):
result = {} result = {}
for k, v in iter(port_bindings.items()): for k, v in six.iteritems(port_bindings):
key = str(k) key = str(k)
if '/' not in key: if '/' not in key:
key += '/tcp' key += '/tcp'
@ -129,17 +126,18 @@ def convert_volume_binds(binds):
result = [] result = []
for k, v in binds.items(): for k, v in binds.items():
if isinstance(k, bytes): if isinstance(k, six.binary_type):
k = k.decode('utf-8') k = k.decode('utf-8')
if isinstance(v, dict): if isinstance(v, dict):
if 'ro' in v and 'mode' in v: if 'ro' in v and 'mode' in v:
raise ValueError( raise ValueError(
f'Binding cannot contain both "ro" and "mode": {v!r}' 'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
) )
bind = v['bind'] bind = v['bind']
if isinstance(bind, bytes): if isinstance(bind, six.binary_type):
bind = bind.decode('utf-8') bind = bind.decode('utf-8')
if 'ro' in v: if 'ro' in v:
@ -149,30 +147,14 @@ def convert_volume_binds(binds):
else: else:
mode = 'rw' mode = 'rw'
# NOTE: this is only relevant for Linux hosts
# (doesn't apply in Docker Desktop)
propagation_modes = [
'rshared',
'shared',
'rslave',
'slave',
'rprivate',
'private',
]
if 'propagation' in v and v['propagation'] in propagation_modes:
if mode:
mode = f"{mode},{v['propagation']}"
else:
mode = v['propagation']
result.append( result.append(
f'{k}:{bind}:{mode}' six.text_type('{0}:{1}:{2}').format(k, bind, mode)
) )
else: else:
if isinstance(v, bytes): if isinstance(v, six.binary_type):
v = v.decode('utf-8') v = v.decode('utf-8')
result.append( result.append(
f'{k}:{v}:rw' six.text_type('{0}:{1}:rw').format(k, v)
) )
return result return result
@ -183,13 +165,13 @@ def convert_tmpfs_mounts(tmpfs):
if not isinstance(tmpfs, list): if not isinstance(tmpfs, list):
raise ValueError( raise ValueError(
'Expected tmpfs value to be either a list or a dict, ' 'Expected tmpfs value to be either a list or a dict, found: {}'
f'found: {type(tmpfs).__name__}' .format(type(tmpfs).__name__)
) )
result = {} result = {}
for mount in tmpfs: for mount in tmpfs:
if isinstance(mount, str): if isinstance(mount, six.string_types):
if ":" in mount: if ":" in mount:
name, options = mount.split(":", 1) name, options = mount.split(":", 1)
else: else:
@ -198,8 +180,8 @@ def convert_tmpfs_mounts(tmpfs):
else: else:
raise ValueError( raise ValueError(
"Expected item in tmpfs list to be a string, " "Expected item in tmpfs list to be a string, found: {}"
f"found: {type(mount).__name__}" .format(type(mount).__name__)
) )
result[name] = options result[name] = options
@ -214,7 +196,7 @@ def convert_service_networks(networks):
result = [] result = []
for n in networks: for n in networks:
if isinstance(n, str): if isinstance(n, six.string_types):
n = {'Target': n} n = {'Target': n}
result.append(n) result.append(n)
return result return result
@ -230,95 +212,75 @@ def parse_repository_tag(repo_name):
return repo_name, None return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False): def parse_host(addr, is_win32=False, tls=False):
# Sensible defaults proto = "http+unix"
port = None
path = ''
if not addr and is_win32: if not addr and is_win32:
return DEFAULT_NPIPE addr = DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://': if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET return DEFAULT_UNIX_SOCKET
addr = addr.strip() addr = addr.strip()
if addr.startswith('http://'):
addr = addr.replace('http://', 'tcp://')
if addr.startswith('http+unix://'):
addr = addr.replace('http+unix://', 'unix://')
parsed_url = urlparse(addr) if addr == 'tcp://':
proto = parsed_url.scheme
if not proto or any(x not in f"{string.ascii_letters}+" for x in proto):
# https://bugs.python.org/issue754016
parsed_url = urlparse(f"//{addr}", 'tcp')
proto = 'tcp'
if proto == 'fd':
raise errors.DockerException('fd protocol is not implemented')
# These protos are valid aliases for our library but not for the
# official spec
if proto == 'http' or proto == 'https':
tls = proto == 'https'
proto = 'tcp'
elif proto == 'http+unix':
proto = 'unix'
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException( raise errors.DockerException(
f"Invalid bind address protocol: {addr}" "Invalid bind address format: {0}".format(addr)
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
f'Invalid bind address format: {addr}'
)
if any([
parsed_url.params, parsed_url.query, parsed_url.fragment,
parsed_url.password
]):
raise errors.DockerException(
f'Invalid bind address format: {addr}'
)
if parsed_url.path and proto == 'ssh':
raise errors.DockerException(
f'Invalid bind address format: no path allowed for this protocol: {addr}'
) )
elif addr.startswith('unix://'):
addr = addr[7:]
elif addr.startswith('tcp://'):
proto = 'http{0}'.format('s' if tls else '')
addr = addr[6:]
elif addr.startswith('https://'):
proto = "https"
addr = addr[8:]
elif addr.startswith('npipe://'):
proto = 'npipe'
addr = addr[8:]
elif addr.startswith('fd://'):
raise errors.DockerException("fd protocol is not implemented")
else: else:
path = parsed_url.path if "://" in addr:
if proto == 'unix' and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = f"{parsed_url.hostname}/{path}"
netloc = parsed_url.netloc
if proto in ('tcp', 'ssh'):
port = parsed_url.port or 0
if port <= 0:
if proto != 'ssh':
raise errors.DockerException( raise errors.DockerException(
f'Invalid bind address format: port is required: {addr}' "Invalid bind address protocol: {0}".format(addr)
) )
port = 22 proto = "https" if tls else "http"
netloc = f'{parsed_url.netloc}:{port}'
if not parsed_url.hostname: if proto in ("http", "https"):
netloc = f'{DEFAULT_HTTP_HOST}:{port}' address_parts = addr.split('/', 1)
host = address_parts[0]
if len(address_parts) == 2:
path = '/' + address_parts[1]
host, port = splitnport(host)
# Rewrite schemes to fit library internals (requests adapters) if port is None:
if proto == 'tcp': raise errors.DockerException(
proto = f"http{'s' if tls else ''}" "Invalid port: {0}".format(addr)
elif proto == 'unix': )
proto = 'http+unix'
if proto in ('http+unix', 'npipe'): if not host:
return f"{proto}://{path}".rstrip('/') host = DEFAULT_HTTP_HOST
else:
host = addr
return urlunparse(URLComponents( if proto in ("http", "https") and port == -1:
scheme=proto, raise errors.DockerException(
netloc=netloc, "Bind address needs a port: {0}".format(addr))
url=path,
params='', if proto == "http+unix" or proto == 'npipe':
query='', return "{0}://{1}".format(proto, host).rstrip('/')
fragment='', return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
)).rstrip('/')
def parse_devices(devices): def parse_devices(devices):
@ -327,9 +289,9 @@ def parse_devices(devices):
if isinstance(device, dict): if isinstance(device, dict):
device_list.append(device) device_list.append(device)
continue continue
if not isinstance(device, str): if not isinstance(device, six.string_types):
raise errors.DockerException( raise errors.DockerException(
f'Invalid device type {type(device)}' 'Invalid device type {0}'.format(type(device))
) )
device_mapping = device.split(':') device_mapping = device.split(':')
if device_mapping: if device_mapping:
@ -350,7 +312,7 @@ def parse_devices(devices):
return device_list return device_list
def kwargs_from_env(environment=None): def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment: if not environment:
environment = os.environ environment = os.environ
host = environment.get('DOCKER_HOST') host = environment.get('DOCKER_HOST')
@ -370,7 +332,9 @@ def kwargs_from_env(environment=None):
params = {} params = {}
if host: if host:
params['base_url'] = host params['base_url'] = (
host.replace('tcp://', 'https://') if enable_tls else host
)
if not enable_tls: if not enable_tls:
return params return params
@ -378,11 +342,18 @@ def kwargs_from_env(environment=None):
if not cert_path: if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker') cert_path = os.path.join(os.path.expanduser('~'), '.docker')
params['tls'] = TLSConfig( if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'), client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')), os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'), ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify, verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
) )
return params return params
@ -390,26 +361,23 @@ def kwargs_from_env(environment=None):
def convert_filters(filters): def convert_filters(filters):
result = {} result = {}
for k, v in iter(filters.items()): for k, v in six.iteritems(filters):
if isinstance(v, bool): if isinstance(v, bool):
v = 'true' if v else 'false' v = 'true' if v else 'false'
if not isinstance(v, list): if not isinstance(v, list):
v = [v, ] v = [v, ]
result[k] = [ result[k] = v
str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result) return json.dumps(result)
def datetime_to_timestamp(dt): def datetime_to_timestamp(dt):
"""Convert a datetime to a Unix timestamp""" """Convert a UTC datetime to a Unix timestamp"""
delta = dt.astimezone(timezone.utc) - datetime(1970, 1, 1, tzinfo=timezone.utc) delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600 return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s): def parse_bytes(s):
if isinstance(s, (int, float,)): if isinstance(s, six.integer_types + (float,)):
return s return s
if len(s) == 0: if len(s) == 0:
return 0 return 0
@ -430,19 +398,20 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit(): if suffix in units.keys() or suffix.isdigit():
try: try:
digits = float(digits_part) digits = int(digits_part)
except ValueError as ve: except ValueError:
raise errors.DockerException( raise errors.DockerException(
'Failed converting the string value for memory ' 'Failed converting the string value for memory ({0}) to'
f'({digits_part}) to an integer.' ' an integer.'.format(digits_part)
) from ve )
# Reconvert to long for the final result # Reconvert to long for the final result
s = int(digits * units[suffix]) s = int(digits * units[suffix])
else: else:
raise errors.DockerException( raise errors.DockerException(
f'The specified value for memory ({s}) should specify the units. ' 'The specified value for memory ({0}) should specify the'
'The postfix should be one of the `b` `k` `m` `g` characters' ' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
) )
return s return s
@ -450,9 +419,9 @@ def parse_bytes(s):
def normalize_links(links): def normalize_links(links):
if isinstance(links, dict): if isinstance(links, dict):
links = iter(links.items()) links = six.iteritems(links)
return [f'{k}:{v}' if v else k for k, v in sorted(links)] return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
def parse_env_file(env_file): def parse_env_file(env_file):
@ -462,7 +431,7 @@ def parse_env_file(env_file):
""" """
environment = {} environment = {}
with open(env_file) as f: with open(env_file, 'r') as f:
for line in f: for line in f:
if line[0] == '#': if line[0] == '#':
@ -478,12 +447,15 @@ def parse_env_file(env_file):
environment[k] = v environment[k] = v
else: else:
raise errors.DockerException( raise errors.DockerException(
f'Invalid line in environment file {env_file}:\n{line}') 'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment return environment
def split_command(command): def split_command(command):
if six.PY2 and not isinstance(command, six.binary_type):
command = command.encode('utf-8')
return shlex.split(command) return shlex.split(command)
@ -491,22 +463,22 @@ def format_environment(environment):
def format_env(key, value): def format_env(key, value):
if value is None: if value is None:
return key return key
if isinstance(value, bytes): if isinstance(value, six.binary_type):
value = value.decode('utf-8') value = value.decode('utf-8')
return f'{key}={value}' return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in iter(environment.items())] return [format_env(*var) for var in six.iteritems(environment)]
def format_extra_hosts(extra_hosts, task=False): def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task # Use format dictated by Swarm API if container is part of a task
if task: if task:
return [ return [
f'{v} {k}' for k, v in sorted(iter(extra_hosts.items())) '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
] ]
return [ return [
f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items())) '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
] ]

View File

@ -1,8 +1,2 @@
try: version = "3.4.1"
from ._version import __version__ version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
except ImportError:
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version('docker')
except PackageNotFoundError:
__version__ = '0.0.0'

2
docs-requirements.txt Normal file
View File

@ -0,0 +1,2 @@
recommonmark==0.4.0
Sphinx==1.4.6

View File

@ -1,12 +1,3 @@
dl.hide-signature > dt { dl.hide-signature > dt {
display: none; display: none;
} }
dl.field-list > dt {
/* prevent code blocks from forcing wrapping on the "Parameters" header */
word-break: initial;
}
code.literal{
hyphens: none;
}

View File

@ -140,19 +140,14 @@ Configuration types
.. autoclass:: Healthcheck .. autoclass:: Healthcheck
.. autoclass:: IPAMConfig .. autoclass:: IPAMConfig
.. autoclass:: IPAMPool .. autoclass:: IPAMPool
.. autoclass:: LogConfig
.. autoclass:: Mount .. autoclass:: Mount
.. autoclass:: NetworkAttachmentConfig
.. autoclass:: Placement .. autoclass:: Placement
.. autoclass:: PlacementPreference
.. autoclass:: Privileges .. autoclass:: Privileges
.. autoclass:: Resources .. autoclass:: Resources
.. autoclass:: RestartPolicy .. autoclass:: RestartPolicy
.. autoclass:: RollbackConfig
.. autoclass:: SecretReference .. autoclass:: SecretReference
.. autoclass:: ServiceMode .. autoclass:: ServiceMode
.. autoclass:: SwarmExternalCA .. autoclass:: SwarmExternalCA
.. autoclass:: SwarmSpec(*args, **kwargs) .. autoclass:: SwarmSpec(*args, **kwargs)
.. autoclass:: TaskTemplate .. autoclass:: TaskTemplate
.. autoclass:: Ulimit
.. autoclass:: UpdateConfig .. autoclass:: UpdateConfig

View File

@ -1,507 +1,6 @@
Changelog Change log
========== ==========
7.1.0
-----
### Upgrade Notes
- Bumped minimum engine API version to 1.24
- Bumped default engine API version to 1.44 (Moby 25.0)
### Bugfixes
- Fixed issue with tag parsing when the registry address includes ports that resulted in `invalid tag format` errors
- Fixed issue preventing creating new configs (`ConfigCollection`), which failed with a `KeyError` due to the `name` field
- Fixed an issue due to an update in the [requests](https://github.com/psf/requests) package breaking `docker-py` by applying the [suggested fix](https://github.com/psf/requests/pull/6710)
### Miscellaneous
- Documentation improvements
- Updated Ruff (linter) and fixed minor linting issues
- Packaging/CI updates
- Started using hatch for packaging (https://github.com/pypa/hatch)
- Updated `setup-python` github action
- Updated tests
- Stopped checking for deprecated container and image related fields (`Container` and `ContainerConfig`)
- Updated tests that check `NetworkSettings.Networks.<network>.Aliases` due to engine changes
7.0.0
-----
### Upgrade Notes
- Removed SSL version (`ssl_version`) and explicit hostname check (`assert_hostname`) options
- `assert_hostname` has not been used since Python 3.6 and was removed in 3.12
- Python 3.7+ supports TLSv1.3 by default
- Websocket support is no longer included by default
- Use `pip install docker[websockets]` to include `websocket-client` dependency
- By default, `docker-py` hijacks the TCP connection and does not use Websockets
- Websocket client is only required to use `attach_socket(container, ws=True)`
- Python 3.7 no longer officially supported (reached end-of-life June 2023)
### Features
- Python 3.12 support
- Full `networking_config` support for `containers.create()`
- Replaces `network_driver_opt` (added in 6.1.0)
- Add `health()` property to container that returns status (e.g. `unhealthy`)
- Add `pause` option to `container.commit()`
- Add support for bind mount propagation (e.g. `rshared`, `private`)
- Add `filters`, `keep_storage`, and `all` parameters to `prune_builds()` (requires API v1.39+)
### Bugfixes
- Consistently return `docker.errors.NotFound` on 404 responses
- Validate tag format before image push
### Miscellaneous
- Upgraded urllib3 version in `requirements.txt` (used for development/tests)
- Documentation typo fixes & formatting improvements
- Fixed integration test compatibility for newer Moby engine versions
- Switch to [ruff](https://github.com/astral-sh/ruff) for linting
6.1.3
-----
#### Bugfixes
- Fix compatibility with [`eventlet/eventlet`](https://github.com/eventlet/eventlet)
6.1.2
-----
#### Bugfixes
- Fix for socket timeouts on long `docker exec` calls
6.1.1
-----
#### Bugfixes
- Fix `containers.stats()` hanging with `stream=True`
- Correct return type in docs for `containers.diff()` method
6.1.0
-----
### Upgrade Notes
- Errors are no longer returned during client initialization if the credential helper cannot be found. A warning will be emitted instead, and an error is returned if the credential helper is used.
### Features
- Python 3.11 support
- Use `poll()` instead of `select()` on non-Windows platforms
- New API fields
- `network_driver_opt` on container run / create
- `one-shot` on container stats
- `status` on services list
### Bugfixes
- Support for requests 2.29.0+ and urllib3 2.x
- Do not strip characters from volume names
- Fix connection leak on container.exec_* operations
- Fix errors closing named pipes on Windows
6.0.1
-----
### Bugfixes
- Fix for `The pipe has been ended errors` on Windows
- Support floats for container log filtering by timestamp (`since` / `until`)
6.0.0
-----
### Upgrade Notes
- Minimum supported Python version is 3.7+
- When installing with pip, the `docker[tls]` extra is deprecated and a no-op,
use `docker` for same functionality (TLS support is always available now)
- Native Python SSH client (used by default / `use_ssh_client=False`) will now
reject unknown host keys with `paramiko.ssh_exception.SSHException`
- Short IDs are now 12 characters instead of 10 characters (same as Docker CLI)
### Features
- Python 3.10 support
- Automatically negotiate most secure TLS version
- Add `platform` (e.g. `linux/amd64`, `darwin/arm64`) to container create & run
- Add support for `GlobalJob` and `ReplicatedJobs` for Swarm
- Add `remove()` method on `Image`
- Add `force` param to `disable()` on `Plugin`
### Bugfixes
- Fix install issues on Windows related to `pywin32`
- Do not accept unknown SSH host keys in native Python SSH mode
- Use 12 character short IDs for consistency with Docker CLI
- Ignore trailing whitespace in `.dockerignore` files
- Fix IPv6 host parsing when explicit port specified
- Fix `ProxyCommand` option for SSH connections
- Do not spawn extra subshell when launching external SSH client
- Improve exception semantics to preserve context
- Documentation improvements (formatting, examples, typos, missing params)
### Miscellaneous
- Upgrade dependencies in `requirements.txt` to latest versions
- Remove extraneous transitive dependencies
- Eliminate usages of deprecated functions/methods
- Test suite reliability improvements
- GitHub Actions workflows for linting, unit tests, integration tests, and
publishing releases
5.0.3
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/76?closed=1)
### Features
- Add `cap_add` and `cap_drop` parameters to service create and ContainerSpec
- Add `templating` parameter to config create
### Bugfixes
- Fix getting a read timeout for logs/attach with a tty and slow output
### Miscellaneous
- Fix documentation examples
5.0.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/75?closed=1)
### Bugfixes
- Fix `disable_buffering` regression
5.0.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/74?closed=1)
### Bugfixes
- Bring back support for ssh identity file
- Cleanup remaining python-2 dependencies
- Fix image save example in docs
### Miscellaneous
- Bump urllib3 to 1.26.5
- Bump requests to 2.26.0
5.0.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/70?closed=1)
### Breaking changes
- Remove support for Python 2.7
- Make Python 3.6 the minimum version supported
### Features
- Add `limit` parameter to image search endpoint
### Bugfixes
- Fix `KeyError` exception on secret create
- Verify TLS keys loaded from docker contexts
- Update PORT_SPEC regex to allow square brackets for IPv6 addresses
- Fix containers and images documentation examples
4.4.4
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/73?closed=1)
### Bugfixes
- Remove `LD_LIBRARY_PATH` and `SSL_CERT_FILE` environment variables when shelling out to the ssh client
4.4.3
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/72?closed=1)
### Features
- Add support for docker.types.Placement.MaxReplicas
### Bugfixes
- Fix SSH port parsing when shelling out to the ssh client
4.4.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/71?closed=1)
### Bugfixes
- Fix SSH connection bug where the hostname was incorrectly trimmed and the error was hidden
- Fix docs example
### Miscellaneous
- Add Python3.8 and 3.9 in setup.py classifier list
4.4.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/69?closed=1)
### Bugfixes
- Avoid setting unsuported parameter for subprocess.Popen on Windows
- Replace use of deprecated "filter" argument on ""docker/api/image"
4.4.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/67?closed=1)
### Features
- Add an alternative SSH connection to the paramiko one, based on shelling out to the SSh client. Similar to the behaviour of Docker cli
- Default image tag to `latest` on `pull`
### Bugfixes
- Fix plugin model upgrade
- Fix examples URL in ulimits
### Miscellaneous
- Improve exception messages for server and client errors
- Bump cryptography from 2.3 to 3.2
4.3.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/68?closed=1)
### Miscellaneous
- Set default API version to `auto`
- Fix conversion to bytes for `float`
- Support OpenSSH `identityfile` option
4.3.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/64?closed=1)
### Features
- Add `DeviceRequest` type to expose host resources such as GPUs
- Add support for `DriverOpts` in EndpointConfig
- Disable compression by default when using container.get_archive method
### Miscellaneous
- Update default API version to v1.39
- Update test engine version to 19.03.12
4.2.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/66?closed=1)
### Bugfixes
- Fix context load for non-docker endpoints
4.2.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/65?closed=1)
### Features
- Add option on when to use `tls` on Context constructor
- Make context orchestrator field optional
4.2.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/63?closed=1)
### Bugfixes
- Fix `win32pipe.WaitNamedPipe` throw exception in Windows containers
- Use `Hostname`, `Username`, `Port` and `ProxyCommand` settings from `.ssh/config` when on SSH
- Set host key policy for ssh transport to `paramiko.WarningPolicy()`
- Set logging level of `paramiko` to warn
### Features
- Add support for docker contexts through `docker.ContextAPI`
4.1.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/61?closed=1)
### Bugfixes
- Correct `INDEX_URL` logic in build.py _set_auth_headers
- Fix for empty auth keys in config.json
### Features
- Add `NetworkAttachmentConfig` for service create/update
### Miscellaneous
- Bump pytest to 4.3.1
- Adjust `--platform` tests for changes in docker engine
- Update credentials-helpers to v0.6.3
4.0.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/62?closed=1)
### Bugfixes
- Unified the way `HealthCheck` is created/configured
### Miscellaneous
- Bumped version of websocket-client
4.0.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/60?closed=1)
### Bugfixes
- Fixed an obsolete import in the `credentials` subpackage that caused import errors in
Python 3.7
### Miscellaneous
- Docs building has been repaired
4.0.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/57?closed=1)
### Breaking changes
- Support for Python 3.3 and Python 3.4 has been dropped
- `APIClient.update_service`, `APIClient.init_swarm`, and
`DockerClient.swarm.init` now return a `dict` from the API's response body
- In `APIClient.build` and `DockerClient.images.build`, the `use_config_proxy`
parameter now defaults to True
- `init_path` is no longer a valid parameter for `HostConfig`
### Features
- It is now possible to provide `SCTP` ports for port mappings
- `ContainerSpec`s now support the `init` parameter
- `DockerClient.swarm.init` and `APIClient.init_swarm` now support the
`data_path_addr` parameter
- `APIClient.update_swarm` and `DockerClient.swarm.update` now support the
`rotate_manager_unlock_key` parameter
- `APIClient.update_service` returns the API's response body as a `dict`
- `APIClient.init_swarm`, and `DockerClient.swarm.init` now return the API's
response body as a `dict`
### Bugfixes
- Fixed `PlacementPreference` instances to produce a valid API type
- Fixed a bug where not setting a value for `buildargs` in `build` could cause
the library to attempt accessing attributes of a `None` value
- Fixed a bug where setting the `volume_driver` parameter in
`DockerClient.containers.create` would result in an error
- `APIClient.inspect_distribution` now correctly sets the authentication
headers on the request, allowing it to be used with private repositories
This change also applies to `DockerClient.get_registry_data`
3.7.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/59?closed=1)
### Bugfixes
* Fix base_url to keep TCP protocol on utils.py by letting the responsibility of changing the
protocol to `parse_host` afterwards, letting `base_url` with the original value.
* XFAIL test_attach_stream_and_cancel on TLS
3.7.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/58?closed=1)
### Bugfixes
* Set a different default number (which is now 9) for SSH pools
* Adds a BaseHTTPAdapter with a close method to ensure that the
pools is clean on close()
* Makes SSHHTTPAdapter reopen a closed connection when needed
like the others
3.7.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/56?closed=1)
### Features
* Added support for multiplexed streams (for `attach` and `exec_start`). Learn
more at https://docker-py.readthedocs.io/en/stable/user_guides/multiplex.html
* Added the `use_config_proxy` parameter to the following methods:
`APIClient.build`, `APIClient.create_container`, `DockerClient.images.build`
and `DockerClient.containers.run` (`False` by default). **This parameter**
**will become `True` by default in the 4.0.0 release.**
* Placement preferences for Swarm services are better validated on the client
and documentation has been updated accordingly
### Bugfixes
* Fixed a bug where credential stores weren't queried for relevant registry
credentials with certain variations of the `config.json` file.
* `DockerClient.swarm.init` now returns a boolean value as advertised.
3.6.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone=55?closed=1)
### Features
* Added support for connecting to the Docker Engine over SSH. Additional
dependencies for this feature can be installed with
`pip install "docker[ssh]"`
* Added support for the `named` parameter in `Image.save`, which may be
used to ensure the resulting tarball retains the image's name on save.
### Bugfixes
* Fixed a bug where builds on Windows with a context path using the `\\?\`
prefix would fail with some relative Dockerfile paths.
* Fixed an issue where pulls made with the `DockerClient` would fail when
setting the `stream` parameter to `True`.
### Miscellaneous
* The minimum requirement for the `requests` dependency has been bumped
to 2.20.0
3.5.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/54?closed=1)
### Miscellaneous
* Bumped version of `pyOpenSSL` in `requirements.txt` and `setup.py` to prevent
installation of a vulnerable version
* Docs fixes
3.5.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/53?closed=1)
### Deprecation warning
* Support for Python 3.3 will be dropped in the 4.0.0 release
### Features
* Updated dependencies to ensure support for Python 3.7 environments
* Added support for the `uts_mode` parameter in `HostConfig`
* The `UpdateConfig` constructor now allows `rollback` as a valid
value for `failure_action`
* Added support for `rollback_config` in `APIClient.create_service`,
`APIClient.update_service`, `DockerClient.services.create` and
`Service.update`.
### Bugfixes
* Credential helpers are now properly leveraged by the `build` method
* Fixed a bug that caused placement preferences to be ignored when provided
to `DockerClient.services.create`
* Fixed a bug that caused a `user` value of `0` to be ignored in
`APIClient.create_container` and `DockerClient.containers.create`
3.4.1 3.4.1
----- -----
@ -1503,7 +1002,7 @@ like the others
(`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`, (`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`,
`Client.remove_volume`). `Client.remove_volume`).
* Added support for the `group_add` parameter in `create_host_config`. * Added support for the `group_add` parameter in `create_host_config`.
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameters * Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameteres
in `create_host_config`. in `create_host_config`.
* Added support for the archive API endpoint (`Client.get_archive`, * Added support for the archive API endpoint (`Client.get_archive`,
`Client.put_archive`). `Client.put_archive`).

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# #
# docker-sdk-python documentation build configuration file, created by # docker-sdk-python documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 14 15:48:58 2016. # sphinx-quickstart on Wed Sep 14 15:48:58 2016.
@ -18,8 +19,6 @@
import datetime import datetime
import os import os
import sys import sys
from importlib.metadata import version
sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('..'))
@ -35,19 +34,24 @@ sys.path.insert(0, os.path.abspath('..'))
extensions = [ extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autodoc',
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
'myst_parser'
] ]
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']
source_suffix = { from recommonmark.parser import CommonMarkParser
'.rst': 'restructuredtext',
'.txt': 'markdown', source_parsers = {
'.md': 'markdown', '.md': CommonMarkParser,
} }
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.md'
# The encoding of source files. # The encoding of source files.
# #
# source_encoding = 'utf-8-sig' # source_encoding = 'utf-8-sig'
@ -56,26 +60,26 @@ source_suffix = {
master_doc = 'index' master_doc = 'index'
# General information about the project. # General information about the project.
project = 'Docker SDK for Python' project = u'Docker SDK for Python'
year = datetime.datetime.now().year year = datetime.datetime.now().year
copyright = f'{year} Docker Inc' copyright = u'%d Docker Inc' % year
author = 'Docker Inc' author = u'Docker Inc'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
# see https://github.com/pypa/setuptools_scm#usage-from-sphinx # The short X.Y version.
release = version('docker') version = u'2.0'
# for example take major/minor # The full version, including alpha/beta/rc tags.
version = '.'.join(release.split('.')[:2]) release = u'2.0'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
# #
# This is also used if you do content translation via gettext catalogs. # This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases. # Usually you set "language" from the command line for these cases.
language = 'en' language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
@ -277,8 +281,8 @@ latex_elements = {
# (source start file, target name, title, # (source start file, target name, title,
# author, documentclass [howto, manual, or own class]). # author, documentclass [howto, manual, or own class]).
latex_documents = [ latex_documents = [
(master_doc, 'docker-sdk-python.tex', 'docker-sdk-python Documentation', (master_doc, 'docker-sdk-python.tex', u'docker-sdk-python Documentation',
'Docker Inc.', 'manual'), u'Docker Inc.', 'manual'),
] ]
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
@ -319,7 +323,7 @@ latex_documents = [
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [ man_pages = [
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation', (master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
[author], 1) [author], 1)
] ]
@ -334,7 +338,7 @@ man_pages = [
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation', (master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
author, 'docker-sdk-python', 'One line description of project.', author, 'docker-sdk-python', 'One line description of project.',
'Miscellaneous'), 'Miscellaneous'),
] ]

View File

@ -58,7 +58,7 @@ You can stream logs:
.. code-block:: python .. code-block:: python
>>> for line in container.logs(stream=True): >>> for line in container.logs(stream=True):
... print(line.strip()) ... print line.strip()
Reticulating spline 2... Reticulating spline 2...
Reticulating spline 3... Reticulating spline 3...
... ...
@ -92,5 +92,4 @@ That's just a taste of what you can do with the Docker SDK for Python. For more,
volumes volumes
api api
tls tls
user_guides/index
change-log change-log

View File

@ -30,10 +30,7 @@ Service objects
The raw representation of this object from the server. The raw representation of this object from the server.
.. automethod:: force_update
.. automethod:: logs
.. automethod:: reload .. automethod:: reload
.. automethod:: remove .. automethod:: remove
.. automethod:: scale
.. automethod:: tasks .. automethod:: tasks
.. automethod:: update .. automethod:: update

View File

@ -15,7 +15,7 @@ For example, to check the server against a specific CA certificate:
.. code-block:: python .. code-block:: python
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem', verify=True) tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
client = docker.DockerClient(base_url='<https_url>', tls=tls_config) client = docker.DockerClient(base_url='<https_url>', tls=tls_config)
This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``. This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``.

View File

@ -1,8 +0,0 @@
User guides and tutorials
=========================
.. toctree::
:maxdepth: 2
multiplex
swarm_services

View File

@ -1,62 +0,0 @@
Handling multiplexed streams
============================
.. note::
The following instruction assume you're interested in getting output from
an ``exec`` command. These instruction are similarly applicable to the
output of ``attach``.
First create a container that runs in the background:
>>> client = docker.from_env()
>>> container = client.containers.run(
... 'bfirsh/reticulate-splines', detach=True)
Prepare the command we are going to use. It prints "hello stdout"
in `stdout`, followed by "hello stderr" in `stderr`:
>>> cmd = '/bin/sh -c "echo hello stdout ; echo hello stderr >&2"'
We'll run this command with all four the combinations of ``stream``
and ``demux``.
With ``stream=False`` and ``demux=False``, the output is a string
that contains both the `stdout` and the `stderr` output:
>>> res = container.exec_run(cmd, stream=False, demux=False)
>>> res.output
b'hello stderr\nhello stdout\n'
With ``stream=True``, and ``demux=False``, the output is a
generator that yields strings containing the output of both
`stdout` and `stderr`:
>>> res = container.exec_run(cmd, stream=True, demux=False)
>>> next(res.output)
b'hello stdout\n'
>>> next(res.output)
b'hello stderr\n'
>>> next(res.output)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
With ``stream=True`` and ``demux=True``, the generator now
separates the streams, and yield tuples
``(stdout, stderr)``:
>>> res = container.exec_run(cmd, stream=True, demux=True)
>>> next(res.output)
(b'hello stdout\n', None)
>>> next(res.output)
(None, b'hello stderr\n')
>>> next(res.output)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
Finally, with ``stream=False`` and ``demux=True``, the output is a tuple ``(stdout, stderr)``:
>>> res = container.exec_run(cmd, stream=False, demux=True)
>>> res.output
(b'hello stdout\n', b'hello stderr\n')

View File

@ -1,12 +1,8 @@
# Swarm services # Swarm services
> Warning:
> This is a stale document and may contain outdated information.
> Refer to the API docs for updated classes and method signatures.
Starting with Engine version 1.12 (API 1.24), it is possible to manage services Starting with Engine version 1.12 (API 1.24), it is possible to manage services
using the Docker Engine API. Note that the engine needs to be part of a using the Docker Engine API. Note that the engine needs to be part of a
[Swarm cluster](../swarm.html) before you can use the service-related methods. [Swarm cluster](../swarm.rst) before you can use the service-related methods.
## Creating a service ## Creating a service

View File

@ -1,102 +0,0 @@
[build-system]
requires = ["hatchling", "hatch-vcs"]
build-backend = "hatchling.build"
[project]
name = "docker"
dynamic = ["version"]
description = "A Python library for the Docker Engine API."
readme = "README.md"
license = "Apache-2.0"
requires-python = ">=3.8"
maintainers = [
{ name = "Docker Inc.", email = "no-reply@docker.com" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Software Development",
"Topic :: Utilities",
]
dependencies = [
"requests >= 2.26.0",
"urllib3 >= 1.26.0",
"pywin32>=304; sys_platform == \"win32\"",
]
[project.optional-dependencies]
# ssh feature allows DOCKER_HOST=ssh://... style connections
ssh = [
"paramiko>=2.4.3",
]
# tls is always supported, the feature is a no-op for backwards compatibility
tls = []
# websockets can be used as an alternate container attach mechanism but
# by default docker-py hijacks the TCP connection and does not use Websockets
# unless attach_socket(container, ws=True) is called
websockets = [
"websocket-client >= 1.3.0",
]
# docs are dependencies required to build the ReadTheDocs site
# this is only needed for CI / working on the docs!
docs = [
"myst-parser==0.18.0",
"Sphinx==5.1.1",
]
# dev are dependencies required to test & lint this project
# this is only needed if you are making code changes to docker-py!
dev = [
"coverage==7.2.7",
"pytest==7.4.2",
"pytest-cov==4.1.0",
"pytest-timeout==2.1.0",
"ruff==0.1.8",
]
[project.urls]
Changelog = "https://docker-py.readthedocs.io/en/stable/change-log.html"
Documentation = "https://docker-py.readthedocs.io"
Homepage = "https://github.com/docker/docker-py"
Source = "https://github.com/docker/docker-py"
Tracker = "https://github.com/docker/docker-py/issues"
[tool.hatch.version]
source = "vcs"
[tool.hatch.build.hooks.vcs]
version-file = "docker/_version.py"
[tool.hatch.build.targets.sdist]
include = [
"/docker",
]
[tool.ruff]
target-version = "py38"
extend-select = [
"B",
"C",
"F",
"I",
"UP",
"W",
]
ignore = [
"UP012", # unnecessary `UTF-8` argument (we want to be explicit)
"C901", # too complex (there's a whole bunch of these)
]
[tool.ruff.per-file-ignores]
"**/__init__.py" = ["F401"]

View File

@ -1,5 +1,2 @@
[pytest] [pytest]
addopts = --tb=short -rxs addopts = --tb=short -rxs
junit_suite_name = docker-py
junit_family = xunit2

18
requirements.txt Normal file
View File

@ -0,0 +1,18 @@
appdirs==1.4.3
asn1crypto==0.22.0
backports.ssl-match-hostname==3.5.0.1
cffi==1.10.0
cryptography==1.9
docker-pycreds==0.3.0
enum34==1.1.6
idna==2.5
ipaddress==1.0.18
packaging==16.8
pycparser==2.17
pyOpenSSL==17.0.0
pyparsing==2.2.0
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
requests==2.14.2
six==1.10.0
websocket-client==0.40.0

View File

@ -3,6 +3,12 @@
# Create the official release # Create the official release
# #
if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
>&2 echo "$0 requires http://pandoc.org/"
>&2 echo "Please install it and make sure it is available on your \$PATH."
exit 2
fi
VERSION=$1 VERSION=$1
REPO=docker/docker-py REPO=docker/docker-py
GITHUB_REPO=git@github.com:$REPO GITHUB_REPO=git@github.com:$REPO
@ -12,30 +18,22 @@ if [ -z $VERSION ]; then
exit 1 exit 1
fi fi
echo "##> Removing stale build files and other untracked files" echo "##> Removing stale build files"
git clean -x -d -i rm -rf ./build || exit 1
test -z "$(git clean -x -d -n)" || exit 1
echo "##> Tagging the release as $VERSION" echo "##> Tagging the release as $VERSION"
git tag $VERSION git tag $VERSION || exit 1
if [[ $? != 0 ]]; then
head_commit=$(git show --pretty=format:%H HEAD)
tag_commit=$(git show --pretty=format:%H $VERSION)
if [[ $head_commit != $tag_commit ]]; then
echo "ERROR: tag already exists, but isn't the current HEAD"
exit 1
fi
fi
if [[ $2 == 'upload' ]]; then if [[ $2 == 'upload' ]]; then
echo "##> Pushing tag to github" echo "##> Pushing tag to github"
git push $GITHUB_REPO $VERSION || exit 1 git push $GITHUB_REPO $VERSION || exit 1
fi fi
echo "##> sdist & wheel" pandoc -f markdown -t rst README.md -o README.rst || exit 1
python setup.py sdist bdist_wheel
if [[ $2 == 'upload' ]]; then if [[ $2 == 'upload' ]]; then
echo '##> Uploading sdist to pypi' echo "##> Uploading sdist to pypi"
twine upload dist/docker-$VERSION* python setup.py sdist bdist_wheel upload
else
echo "##> sdist & wheel"
python setup.py sdist bdist_wheel
fi fi

53
scripts/versions.py Executable file → Normal file
View File

@ -11,24 +11,23 @@ categories = [
'test' 'test'
] ]
STAGES = ['tp', 'beta', 'rc']
class Version(namedtuple('_Version', 'major minor patch rc edition')):
class Version(namedtuple('_Version', 'major minor patch stage edition')):
@classmethod @classmethod
def parse(cls, version): def parse(cls, version):
edition = None edition = None
version = version.lstrip('v') version = version.lstrip('v')
version, _, stage = version.partition('-') version, _, rc = version.partition('-')
if stage: if rc:
if not any(marker in stage for marker in STAGES): if 'rc' not in rc:
edition = stage edition = rc
stage = None rc = None
elif '-' in stage: elif '-' in rc:
edition, stage = stage.split('-', 1) edition, rc = rc.split('-')
major, minor, patch = version.split('.', 2)
return cls(major, minor, patch, stage, edition) major, minor, patch = version.split('.', 3)
return cls(major, minor, patch, rc, edition)
@property @property
def major_minor(self): def major_minor(self):
@ -39,22 +38,14 @@ class Version(namedtuple('_Version', 'major minor patch stage edition')):
"""Return a representation that allows this object to be sorted """Return a representation that allows this object to be sorted
correctly with the default comparator. correctly with the default comparator.
""" """
# non-GA releases should appear before GA releases # rc releases should appear before official releases
# Order: tp -> beta -> rc -> GA rc = (0, self.rc) if self.rc else (1, )
if self.stage: return (int(self.major), int(self.minor), int(self.patch)) + rc
for st in STAGES:
if st in self.stage:
stage = (STAGES.index(st), self.stage)
break
else:
stage = (len(STAGES),)
return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self): def __str__(self):
stage = f'-{self.stage}' if self.stage else '' rc = '-{}'.format(self.rc) if self.rc else ''
edition = f'-{self.edition}' if self.edition else '' edition = '-{}'.format(self.edition) if self.edition else ''
return '.'.join(map(str, self[:3])) + edition + stage return '.'.join(map(str, self[:3])) + edition + rc
def main(): def main():
@ -62,9 +53,13 @@ def main():
for url in [base_url.format(cat) for cat in categories]: for url in [base_url.format(cat) for cat in categories]:
res = requests.get(url) res = requests.get(url)
content = res.text content = res.text
versions = [Version.parse(v) for v in re.findall( versions = [
r'"docker-([0-9]+\.[0-9]+\.[0-9]+-?.*)\.tgz"', content Version.parse(
)] v.strip('"').lstrip('docker-').rstrip('.tgz').rstrip('-x86_64')
) for v in re.findall(
r'"docker-[0-9]+\.[0-9]+\.[0-9]+-.*tgz"', content
)
]
sorted_versions = sorted( sorted_versions = sorted(
versions, reverse=True, key=operator.attrgetter('order') versions, reverse=True, key=operator.attrgetter('order')
) )

6
setup.cfg Normal file
View File

@ -0,0 +1,6 @@
[bdist_wheel]
universal = 1
[metadata]
description_file = README.rst
license = Apache License 2.0

89
setup.py Normal file
View File

@ -0,0 +1,89 @@
#!/usr/bin/env python
from __future__ import print_function
import codecs
import os
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.14.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.3.0'
]
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
# ssl_match_hostname to verify hosts match with certificates via
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
# win32 APIs if on Windows (required for npipe support)
# Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
# on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
# TODO: if pip installing both "requests" and "requests[security]", the
# extra package from the "security" option are not installed (see
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
}
version = None
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
long_description = ''
try:
with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
# setup.py from working properly.
pass
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
url='https://github.com/docker/docker-py',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Joffrey F',
maintainer_email='joffrey@docker.com',
)

6
test-requirements.txt Normal file
View File

@ -0,0 +1,6 @@
coverage==3.7.1
flake8==3.4.1
mock==1.0.1
pytest==2.9.1
pytest-cov==2.1.0
pytest-timeout==1.2.1

Some files were not shown because too many files have changed in this diff Show More