Compare commits

..

No commits in common. "main" and "1.8.0-rc2" have entirely different histories.

225 changed files with 9478 additions and 31346 deletions

View File

@ -13,4 +13,5 @@ html/*
__pycache__ __pycache__
# Compiled Documentation # Compiled Documentation
docs/_build site/
Makefile

View File

@ -9,6 +9,3 @@ max_line_length = 80
[*.md] [*.md]
trim_trailing_whitespace = false trim_trailing_whitespace = false
[*.{yaml,yml}]
indent_size = 2

View File

@ -1,72 +0,0 @@
name: Python package
on: [push, pull_request]
env:
DOCKER_BUILDKIT: '1'
FORCE_COLOR: 1
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- run: pip install -U ruff==0.1.8
- name: Run ruff
run: ruff docker tests
build:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- run: pip3 install build && python -m build .
- uses: actions/upload-artifact@v4
with:
name: dist
path: dist
unit-tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
- name: Install dependencies
run: |
python3 -m pip install --upgrade pip
pip3 install '.[ssh,dev]'
- name: Run unit tests
run: |
docker logout
rm -rf ~/.docker
py.test -v --cov=docker tests/unit
integration-tests:
runs-on: ubuntu-latest
strategy:
matrix:
variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: make ${{ matrix.variant }}
run: |
docker logout
rm -rf ~/.docker
make ${{ matrix.variant }}

View File

@ -1,53 +0,0 @@
name: Release
on:
workflow_dispatch:
inputs:
tag:
description: "Release Tag WITHOUT `v` Prefix (e.g. 6.0.0)"
required: true
dry-run:
description: 'Dry run'
required: false
type: boolean
default: true
env:
DOCKER_BUILDKIT: '1'
FORCE_COLOR: 1
jobs:
publish:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Generate Package
run: |
pip3 install build
python -m build .
env:
# This is also supported by Hatch; see
# https://github.com/ofek/hatch-vcs#version-source-environment-variables
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ inputs.tag }}
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
if: '! inputs.dry-run'
with:
password: ${{ secrets.PYPI_API_TOKEN }}
- name: Create GitHub release
uses: ncipollo/release-action@v1
if: '! inputs.dry-run'
with:
artifacts: "dist/*"
generateReleaseNotes: true
draft: true
commit: ${{ github.sha }}
token: ${{ secrets.GITHUB_TOKEN }}
tag: ${{ inputs.tag }}

6
.gitignore vendored
View File

@ -10,13 +10,9 @@ dist
html/* html/*
# Compiled Documentation # Compiled Documentation
_build/ site/
README.rst README.rst
# setuptools_scm
_version.py
env/ env/
venv/ venv/
.idea/ .idea/
*.iml

View File

@ -1,17 +0,0 @@
version: 2
sphinx:
configuration: docs/conf.py
build:
os: ubuntu-22.04
tools:
python: '3.12'
python:
install:
- method: pip
path: .
extra_requirements:
- ssh
- docs

14
.travis.yml Normal file
View File

@ -0,0 +1,14 @@
sudo: false
language: python
python:
- "2.7"
env:
- TOX_ENV=py26
- TOX_ENV=py27
- TOX_ENV=py33
- TOX_ENV=py34
- TOX_ENV=flake8
install:
- pip install tox
script:
- tox -e $TOX_ENV

View File

@ -1,8 +1,5 @@
# Contributing guidelines # Contributing guidelines
See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
The following is specific to Docker SDK for Python.
Thank you for your interest in the project. We look forward to your Thank you for your interest in the project. We look forward to your
contribution. In order to make the process as fast and streamlined as possible, contribution. In order to make the process as fast and streamlined as possible,
here is a set of guidelines we recommend you follow. here is a set of guidelines we recommend you follow.
@ -10,29 +7,28 @@ here is a set of guidelines we recommend you follow.
## Reporting issues ## Reporting issues
We do our best to ensure bugs don't creep up in our releases, but some may We do our best to ensure bugs don't creep up in our releases, but some may
still slip through. If you encounter one while using the SDK, please still slip through. If you encounter one while using docker-py, please create
create an issue an issue [in the tracker](https://github.com/docker/docker-py/issues/new) with
[in the tracker](https://github.com/docker/docker-py/issues/new) with
the following information: the following information:
- SDK version, Docker version and python version - docker-py version, docker version and python version
``` ```
pip freeze | grep docker && python --version && docker version pip freeze | grep docker-py && python --version && docker version
``` ```
- OS, distribution and OS version - OS, distribution and OS version
- The issue you're encountering including a stacktrace if applicable - The issue you're encountering including a stacktrace if applicable
- If possible, steps or a code snippet to reproduce the issue - If possible, steps or a code snippet to reproduce the issue
To save yourself time, please be sure to check our To save yourself time, please be sure to check our
[documentation](https://docker-py.readthedocs.io/) and use the [documentation](http://docker-py.readthedocs.org/) and use the
[search function](https://github.com/docker/docker-py/search) to find [search function](https://github.com/docker/docker-py/search) to find out if
out if it has already been addressed, or is currently being looked at. it has already been addressed, or is currently being looked at.
## Submitting pull requests ## Submitting pull requests
Do you have a fix for an existing issue, or want to add a new functionality Do you have a fix for an existing issue, or want to add a new functionality
to the SDK? We happily welcome pull requests. Here are a few tips to to docker-py? We happily welcome pull requests. Here are a few tips to make
make the review process easier on both the maintainers and yourself. the review process easier on both the maintainers and yourself.
### 1. Sign your commits ### 1. Sign your commits
@ -44,7 +40,7 @@ paragraph in the Docker contribution guidelines.
Before we can review your pull request, please ensure that nothing has been Before we can review your pull request, please ensure that nothing has been
broken by your changes by running the test suite. You can do so simply by broken by your changes by running the test suite. You can do so simply by
running `make test` in the project root. This also includes coding style using running `make test` in the project root. This also includes coding style using
`ruff` `flake8`
### 3. Write clear, self-contained commits ### 3. Write clear, self-contained commits
@ -88,10 +84,11 @@ to reach out and ask questions. We will do our best to answer and help out.
## Development environment ## Development environment
If you're looking contribute to Docker SDK for Python but are new to the If you're looking contribute to docker-py but are new to the project or Python,
project or Python, here are the steps to get you started. here are the steps to get you started.
1. Fork https://github.com/docker/docker-py to your username. 1. Fork [https://github.com/docker/docker-py](https://github.com/docker/docker-py)
to your username.
2. Clone your forked repository locally with 2. Clone your forked repository locally with
`git clone git@github.com:yourusername/docker-py.git`. `git clone git@github.com:yourusername/docker-py.git`.
3. Configure a 3. Configure a
@ -103,33 +100,3 @@ project or Python, here are the steps to get you started.
5. Run `python setup.py develop` to install the dev version of the project 5. Run `python setup.py develop` to install the dev version of the project
and required dependencies. We recommend you do so inside a and required dependencies. We recommend you do so inside a
[virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs) [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs)
## Running the tests & Code Quality
To get the source source code and run the unit tests, run:
```
$ git clone git://github.com/docker/docker-py.git
$ cd docker-py
$ make test
```
## Building the docs
```
$ make docs
$ open _build/index.html
```
## Release Checklist
Before a new release, please go through the following checklist:
* Bump version in docker/version.py
* Add a release note in docs/change_log.md
* Git tag the version
* Upload to pypi
## Vulnerability Reporting
For any security issues, please do NOT file an issue or pull request on github!
Please contact [security@docker.com](mailto:security@docker.com) or read [the
Docker security page](https://www.docker.com/resources/security/).

View File

@ -1,13 +1,14 @@
# syntax=docker/dockerfile:1 FROM python:2.7
MAINTAINER Joffrey F <joffrey@docker.com>
ARG PYTHON_VERSION=3.12 RUN mkdir /home/docker-py
FROM python:${PYTHON_VERSION} WORKDIR /home/docker-py
WORKDIR /src ADD requirements.txt /home/docker-py/requirements.txt
COPY . . RUN pip install -r requirements.txt
ARG VERSION=0.0.0.dev0 ADD test-requirements.txt /home/docker-py/test-requirements.txt
RUN --mount=type=cache,target=/cache/pip \ RUN pip install -r test-requirements.txt
PIP_CACHE_DIR=/cache/pip \
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ ADD . /home/docker-py
pip install .[ssh] RUN pip install .

View File

@ -1,22 +0,0 @@
# syntax=docker/dockerfile:1
ARG PYTHON_VERSION=3.12
FROM python:${PYTHON_VERSION}
ARG uid=1000
ARG gid=1000
RUN addgroup --gid $gid sphinx \
&& useradd --uid $uid --gid $gid -M sphinx
WORKDIR /src
COPY . .
ARG VERSION=0.0.0.dev0
RUN --mount=type=cache,target=/cache/pip \
PIP_CACHE_DIR=/cache/pip \
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \
pip install .[ssh,docs]
USER sphinx

14
Dockerfile-py3 Normal file
View File

@ -0,0 +1,14 @@
FROM python:3.4
MAINTAINER Joffrey F <joffrey@docker.com>
RUN mkdir /home/docker-py
WORKDIR /home/docker-py
ADD requirements.txt /home/docker-py/requirements.txt
RUN pip install -r requirements.txt
ADD test-requirements.txt /home/docker-py/test-requirements.txt
RUN pip install -r test-requirements.txt
ADD . /home/docker-py
RUN pip install .

13
LICENSE
View File

@ -176,7 +176,18 @@
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
Copyright 2016 Docker, Inc. APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
# Docker SDK for Python maintainers file # Docker-py maintainers file
# #
# This file describes who runs the docker/docker-py project and how. # This file describes who runs the docker/docker-py project and how.
# This is a living document - if you see something out of date or missing, speak up! # This is a living document - if you see something out of date or missing, speak up!
@ -11,19 +11,11 @@
[Org] [Org]
[Org."Core maintainers"] [Org."Core maintainers"]
people = [ people = [
"glours",
"milas",
]
[Org.Alumni]
people = [
"aiordache",
"aanand", "aanand",
"bfirsh",
"dnephin", "dnephin",
"mnowster", "mnowster",
"mpetazzoni", "mpetazzoni",
"shin-", "shin-",
"ulyssessouza",
] ]
[people] [people]
@ -39,31 +31,11 @@
Email = "aanand@docker.com" Email = "aanand@docker.com"
GitHub = "aanand" GitHub = "aanand"
[people.aiordache]
Name = "Anca Iordache"
Email = "anca.iordache@docker.com"
GitHub = "aiordache"
[people.bfirsh]
Name = "Ben Firshman"
Email = "b@fir.sh"
GitHub = "bfirsh"
[people.dnephin] [people.dnephin]
Name = "Daniel Nephin" Name = "Daniel Nephin"
Email = "dnephin@gmail.com" Email = "dnephin@gmail.com"
GitHub = "dnephin" GitHub = "dnephin"
[people.glours]
Name = "Guillaume Lours"
Email = "705411+glours@users.noreply.github.com"
GitHub = "glours"
[people.milas]
Name = "Milas Bowman"
Email = "devnull@milas.dev"
GitHub = "milas"
[people.mnowster] [people.mnowster]
Name = "Mazz Mosley" Name = "Mazz Mosley"
Email = "mazz@houseofmnowster.com" Email = "mazz@houseofmnowster.com"
@ -78,8 +50,3 @@
Name = "Joffrey F" Name = "Joffrey F"
Email = "joffrey@docker.com" Email = "joffrey@docker.com"
GitHub = "shin-" GitHub = "shin-"
[people.ulyssessouza]
Name = "Ulysses Domiciano Souza"
Email = "ulysses.souza@docker.com"
GitHub = "ulyssessouza"

7
MANIFEST.in Normal file
View File

@ -0,0 +1,7 @@
include test-requirements.txt
include requirements.txt
include README.md
include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *

213
Makefile
View File

@ -1,189 +1,48 @@
TEST_API_VERSION ?= 1.45 .PHONY: all build test integration-test unit-test build-py3 unit-test-py3 integration-test-py3
TEST_ENGINE_VERSION ?= 26.1
ifeq ($(OS),Windows_NT)
PLATFORM := Windows
else
PLATFORM := $(shell sh -c 'uname -s 2>/dev/null || echo Unknown')
endif
ifeq ($(PLATFORM),Linux)
uid_args := "--build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g)"
endif
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER ?= $(shell git describe --match '[0-9]*' --dirty='.m' --always --tags 2>/dev/null | sed -r 's/-([0-9]+)/.dev\1/' | sed 's/-/+/')
ifeq ($(SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER),)
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER = "0.0.0.dev0"
endif
.PHONY: all
all: test all: test
.PHONY: clean
clean: clean:
-docker rm -f dpy-dind dpy-dind-certs dpy-dind-ssl rm -rf tests/__pycache__
find -name "__pycache__" | xargs rm -rf rm -rf tests/*/__pycache__
.PHONY: build-dind-ssh
build-dind-ssh:
docker build \
--pull \
-t docker-dind-ssh \
-f tests/Dockerfile-ssh-dind \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
--build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} \
--build-arg API_VERSION=${TEST_API_VERSION} \
.
.PHONY: build
build:
docker build \
--pull \
-t docker-sdk-python3 \
-f tests/Dockerfile \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
.
.PHONY: build-docs
build-docs:
docker build \
-t docker-sdk-python-docs \
-f Dockerfile-docs \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
$(uid_args) \
.
.PHONY: build-dind-certs
build-dind-certs:
docker build \
-t dpy-dind-certs \
-f tests/Dockerfile-dind-certs \
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
.
.PHONY: test
test: ruff unit-test integration-dind integration-dind-ssl
.PHONY: unit-test
unit-test: build
docker run -t --rm docker-sdk-python3 py.test tests/unit
.PHONY: integration-test
integration-test: build
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file}
.PHONY: setup-network
setup-network:
docker network inspect dpy-tests || docker network create dpy-tests
.PHONY: integration-dind
integration-dind: build setup-network
docker rm -vf dpy-dind || :
docker run \
--detach \
--name dpy-dind \
--network dpy-tests \
--pull=always \
--privileged \
docker:${TEST_ENGINE_VERSION}-dind \
dockerd -H tcp://0.0.0.0:2375 --experimental
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind 2375; do sleep 1; done'
docker run \
--env="DOCKER_HOST=tcp://dpy-dind:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--network dpy-tests \
--rm \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind docker rm -vf dpy-dind
build:
docker build -t docker-py .
.PHONY: integration-dind-ssh build-py3:
integration-dind-ssh: build-dind-ssh build setup-network docker build -t docker-py3 -f Dockerfile-py3 .
docker rm -vf dpy-dind-ssh || :
docker run -d --network dpy-tests --name dpy-dind-ssh --privileged \
docker-dind-ssh dockerd --experimental
# start SSH daemon for known key
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/known_ed25519 -p 22"
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/unknown_ed25519 -p 2222"
docker run \
--tty \
--rm \
--env="DOCKER_HOST=ssh://dpy-dind-ssh" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="UNKNOWN_DOCKER_SSH_HOST=ssh://dpy-dind-ssh:2222" \
--network dpy-tests \
docker-sdk-python3 py.test tests/ssh/${file}
docker rm -vf dpy-dind-ssh
build-dind-certs:
docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs .
.PHONY: integration-dind-ssl test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
integration-dind-ssl: build-dind-certs build setup-network
docker rm -vf dpy-dind-certs dpy-dind-ssl || : unit-test: build
docker run docker-py py.test tests/unit
unit-test-py3: build-py3
docker run docker-py3 py.test tests/unit
integration-test: build
docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py py.test tests/integration
integration-test-py3: build-py3
docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration
integration-dind: build build-py3
docker rm -vf dpy-dind || :
docker run -d --name dpy-dind --env="DOCKER_HOST=tcp://localhost:2375" --privileged dockerswarm/dind:1.10.3 docker daemon -H tcp://0.0.0.0:2375
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py py.test tests/integration
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3 py.test tests/integration
docker rm -vf dpy-dind
integration-dind-ssl: build-dind-certs build build-py3
docker run -d --name dpy-dind-certs dpy-dind-certs docker run -d --name dpy-dind-certs dpy-dind-certs
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl -v /tmp --privileged dockerswarm/dind:1.10.3 docker daemon --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375
docker run \ docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py py.test tests/integration
--detach \ docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration
--env="DOCKER_CERT_PATH=/certs" \
--env="DOCKER_HOST=tcp://localhost:2375" \
--env="DOCKER_TLS_VERIFY=1" \
--name dpy-dind-ssl \
--network dpy-tests \
--network-alias docker \
--pull=always \
--privileged \
--volume /tmp \
--volumes-from dpy-dind-certs \
docker:${TEST_ENGINE_VERSION}-dind \
dockerd \
--tlsverify \
--tlscacert=/certs/ca.pem \
--tlscert=/certs/server-cert.pem \
--tlskey=/certs/server-key.pem \
-H tcp://0.0.0.0:2375 \
--experimental
# Wait for Docker-in-Docker to come to life
docker run \
--network dpy-tests \
--rm \
--tty \
busybox \
sh -c 'while ! nc -z dpy-dind-ssl 2375; do sleep 1; done'
docker run \
--env="DOCKER_CERT_PATH=/certs" \
--env="DOCKER_HOST=tcp://docker:2375" \
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
--env="DOCKER_TLS_VERIFY=1" \
--network dpy-tests \
--rm \
--volumes-from dpy-dind-ssl \
--tty \
docker-sdk-python3 \
py.test tests/integration/${file}
docker rm -vf dpy-dind-ssl dpy-dind-certs docker rm -vf dpy-dind-ssl dpy-dind-certs
.PHONY: ruff flake8: build
ruff: build docker run docker-py flake8 docker tests
docker run -t --rm docker-sdk-python3 ruff docker tests
.PHONY: docs
docs: build-docs
docker run --rm -t -v `pwd`:/src docker-sdk-python-docs sphinx-build docs docs/_build
.PHONY: shell
shell: build
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 python

View File

@ -1,76 +1,26 @@
# Docker SDK for Python docker-py
=========
[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml) [![Build Status](https://travis-ci.org/docker/docker-py.png)](https://travis-ci.org/docker/docker-py)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps run containers, manage containers, manage Swarms, etc. A Python library for the Docker Remote API. It does everything the `docker` command does, but from within Python  run containers, manage them, pull/push images, etc.
## Installation Installation
------------
The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip: The latest stable version is always available on PyPi.
pip install docker pip install docker-py
> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support. Documentation
> This is no longer necessary and is a no-op, but is supported for backwards compatibility. ------------
## Usage [![Documentation Status](https://readthedocs.org/projects/docker-py/badge/?version=latest)](https://readthedocs.org/projects/docker-py/?badge=latest)
Connect to Docker using the default socket or the configuration in your environment: [Read the full documentation here.](http://docker-py.readthedocs.org/en/latest/).
The source is available in the `docs/` directory.
```python
import docker
client = docker.from_env()
```
You can run containers: License
-------
```python Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
>>> client.containers.run("ubuntu:latest", "echo hello world")
'hello world\n'
```
You can run containers in the background:
```python
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
<Container '45e6d2de7c54'>
```
You can manage containers:
```python
>>> client.containers.list()
[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
>>> container = client.containers.get('45e6d2de7c54')
>>> container.attrs['Config']['Image']
"bfirsh/reticulate-splines"
>>> container.logs()
"Reticulating spline 1...\n"
>>> container.stop()
```
You can stream logs:
```python
>>> for line in container.logs(stream=True):
... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...
```
You can manage images:
```python
>>> client.images.pull('nginx')
<Image 'nginx'>
>>> client.images.list()
[<Image 'ubuntu'>, <Image 'nginx'>, ...]
```
[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.

View File

@ -1,7 +1,20 @@
from .api import APIClient # Copyright 2013 dotCloud inc.
from .client import DockerClient, from_env
from .context import Context, ContextAPI
from .tls import TLSConfig
from .version import __version__
__title__ = 'docker' # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .version import version, version_info
__version__ = version
__title__ = 'docker-py'
from .client import Client, AutoVersionClient, from_env # flake8: noqa

View File

@ -1 +1,8 @@
from .client import APIClient # flake8: noqa
from .build import BuildApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .volume import VolumeApiMixin
from .network import NetworkApiMixin

View File

@ -1,140 +1,38 @@
import json
import logging import logging
import os import os
import random import re
import json
from .. import constants
from .. import errors
from .. import auth
from .. import utils
from .. import auth, constants, errors, utils
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class BuildApiMixin: class BuildApiMixin(object):
def build(self, path=None, tag=None, quiet=False, fileobj=None, def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None, nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False, custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None, forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None, decode=False, buildargs=None, gzip=False):
labels=None, cache_from=None, target=None, network_mode=None, remote = context = headers = None
squash=None, extra_hosts=None, platform=None, isolation=None,
use_config_proxy=True):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
gzip (bool): If set to ``True``, gzip compression/encoding is used
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (:py:class:`list`): A list of images used for build
cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
remote = context = None
headers = {}
container_limits = container_limits or {} container_limits = container_limits or {}
buildargs = buildargs or {}
if path is None and fileobj is None: if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.") raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None: if gzip and encoding is not None:
raise errors.DockerException( raise errors.DockerException(
'Can not use custom encoding if gzip is enabled' 'Can not use custom encoding if gzip is enabled'
) )
if tag is not None:
if not utils.match_tag(tag):
raise errors.DockerException(
f"invalid tag '{tag}': invalid reference format"
)
for key in container_limits.keys(): for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS: if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException( raise errors.DockerException(
f"invalid tag '{tag}': invalid reference format" 'Invalid container_limits key {0}'.format(key)
) )
if custom_context: if custom_context:
if not fileobj: if not fileobj:
raise TypeError("You must specify fileobj with custom_context") raise TypeError("You must specify fileobj with custom_context")
@ -150,17 +48,24 @@ class BuildApiMixin:
dockerignore = os.path.join(path, '.dockerignore') dockerignore = os.path.join(path, '.dockerignore')
exclude = None exclude = None
if os.path.exists(dockerignore): if os.path.exists(dockerignore):
with open(dockerignore) as f: with open(dockerignore, 'r') as f:
exclude = list(filter( exclude = list(filter(bool, f.read().splitlines()))
lambda x: x != '' and x[0] != '#',
[line.strip() for line in f.read().splitlines()]
))
dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar( context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
) )
encoding = 'gzip' if gzip else encoding encoding = 'gzip' if gzip else encoding
if utils.compare_version('1.8', self._version) >= 0:
stream = True
if dockerfile and utils.compare_version('1.17', self._version) < 0:
raise errors.InvalidVersion(
'dockerfile was only introduced in API version 1.17'
)
if utils.compare_version('1.19', self._version) < 0:
pull = 1 if pull else 0
u = self._url('/build') u = self._url('/build')
params = { params = {
't': tag, 't': tag,
@ -174,209 +79,70 @@ class BuildApiMixin:
} }
params.update(container_limits) params.update(container_limits)
if use_config_proxy:
proxy_args = self._proxy_configs.get_environment()
for k, v in proxy_args.items():
buildargs.setdefault(k, v)
if buildargs: if buildargs:
params.update({'buildargs': json.dumps(buildargs)}) if utils.version_gte(self._version, '1.21'):
params.update({'buildargs': json.dumps(buildargs)})
if shmsize:
if utils.version_gte(self._version, '1.22'):
params.update({'shmsize': shmsize})
else: else:
raise errors.InvalidVersion( raise errors.InvalidVersion(
'shmsize was only introduced in API version 1.22' 'buildargs was only introduced in API version 1.21'
) )
if labels:
if utils.version_gte(self._version, '1.23'):
params.update({'labels': json.dumps(labels)})
else:
raise errors.InvalidVersion(
'labels was only introduced in API version 1.23'
)
if cache_from:
if utils.version_gte(self._version, '1.25'):
params.update({'cachefrom': json.dumps(cache_from)})
else:
raise errors.InvalidVersion(
'cache_from was only introduced in API version 1.25'
)
if target:
if utils.version_gte(self._version, '1.29'):
params.update({'target': target})
else:
raise errors.InvalidVersion(
'target was only introduced in API version 1.29'
)
if network_mode:
if utils.version_gte(self._version, '1.25'):
params.update({'networkmode': network_mode})
else:
raise errors.InvalidVersion(
'network_mode was only introduced in API version 1.25'
)
if squash:
if utils.version_gte(self._version, '1.25'):
params.update({'squash': squash})
else:
raise errors.InvalidVersion(
'squash was only introduced in API version 1.25'
)
if extra_hosts is not None:
if utils.version_lt(self._version, '1.27'):
raise errors.InvalidVersion(
'extra_hosts was only introduced in API version 1.27'
)
if isinstance(extra_hosts, dict):
extra_hosts = utils.format_extra_hosts(extra_hosts)
params.update({'extrahosts': extra_hosts})
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
if isolation is not None:
if utils.version_lt(self._version, '1.24'):
raise errors.InvalidVersion(
'isolation was only introduced in API version 1.24'
)
params['isolation'] = isolation
if context is not None: if context is not None:
headers = {'Content-Type': 'application/tar'} headers = {'Content-Type': 'application/tar'}
if encoding: if encoding:
headers['Content-Encoding'] = encoding headers['Content-Encoding'] = encoding
self._set_auth_headers(headers) if utils.compare_version('1.9', self._version) >= 0:
self._set_auth_headers(headers)
response = self._post( response = self._post(
u, u,
data=context, data=context,
params=params, params=params,
headers=headers, headers=headers,
stream=True, stream=stream,
timeout=timeout, timeout=timeout,
) )
if context is not None and not custom_context: if context is not None and not custom_context:
context.close() context.close()
return self._stream_helper(response, decode=decode) if stream:
return self._stream_helper(response, decode=decode)
@utils.minimum_version('1.31') else:
def prune_builds(self, filters=None, keep_storage=None, all=None): output = self._result(response)
""" srch = r'Successfully built ([0-9a-f]+)'
Delete the builder cache match = re.search(srch, output)
if not match:
Args: return None, output
filters (dict): Filters to process on the prune list. return match.group(1), output
Needs Docker API v1.39+
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
- until (str): Can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
relative to the daemon's local time.
keep_storage (int): Amount of disk space in bytes to keep for cache.
Needs Docker API v1.39+
all (bool): Remove all types of build cache.
Needs Docker API v1.39+
Returns:
(dict): A dictionary containing information about the operation's
result. The ``SpaceReclaimed`` key indicates the amount of
bytes of disk space reclaimed.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/build/prune")
if (filters, keep_storage, all) != (None, None, None) \
and utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'`filters`, `keep_storage`, and `all` args are only available '
'for API version > 1.38'
)
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
if keep_storage is not None:
params['keep-storage'] = keep_storage
if all is not None:
params['all'] = all
return self._result(self._post(url, params=params), True)
def _set_auth_headers(self, headers): def _set_auth_headers(self, headers):
log.debug('Looking for auth config') log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config # If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there. # file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty: if not self._auth_configs:
log.debug("No auth config in memory - loading from filesystem") log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config()
credstore_env=self.credstore_env
)
# Send the full auth configuration (if any exists), since the build # Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries. # could use any (or all) of the registries.
if self._auth_configs: if self._auth_configs:
auth_data = self._auth_configs.get_all_credentials()
# See https://github.com/docker/docker-py/issues/1683
if (auth.INDEX_URL not in auth_data and
auth.INDEX_NAME in auth_data):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug( log.debug(
"Sending auth config (%s)", 'Sending auth config ({0})'.format(
', '.join(repr(k) for k in auth_data), ', '.join(repr(k) for k in self._auth_configs.keys())
)
if auth_data:
headers['X-Registry-Config'] = auth.encode_header(
auth_data
) )
)
if headers is None:
headers = {}
if utils.compare_version('1.19', self._version) >= 0:
headers['X-Registry-Config'] = auth.encode_header(
self._auth_configs
)
else:
headers['X-Registry-Config'] = auth.encode_header({
'configs': self._auth_configs
})
else: else:
log.debug('No auth config found') log.debug('No auth config found')
def process_dockerfile(dockerfile, path):
if not dockerfile:
return (None, None)
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
if constants.IS_WINDOWS_PLATFORM and path.startswith(
constants.WINDOWS_LONGPATH_PREFIX):
normpath = os.path.normpath(
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile) as df:
return (
f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
# Dockerfile is inside the context - return path relative to context root
if dockerfile == abs_dockerfile:
# Only calculate relpath if necessary to avoid errors
# on Windows client -> Linux Docker
# see https://github.com/docker/compose/issues/5969
dockerfile = os.path.relpath(abs_dockerfile, path)
return (dockerfile, None)

View File

@ -1,532 +0,0 @@
import json
import struct
import urllib
from functools import partial
import requests
import requests.adapters
import requests.exceptions
from .. import auth
from ..constants import (
DEFAULT_MAX_POOL_SIZE,
DEFAULT_NUM_POOLS,
DEFAULT_NUM_POOLS_SSH,
DEFAULT_TIMEOUT_SECONDS,
DEFAULT_USER_AGENT,
IS_WINDOWS_PLATFORM,
MINIMUM_DOCKER_API_VERSION,
STREAM_HEADER_SIZE_BYTES,
)
from ..errors import (
DockerException,
InvalidVersion,
TLSParameterError,
create_api_error_from_http_exception,
)
from ..tls import TLSConfig
from ..transport import UnixHTTPAdapter
from ..utils import check_resource, config, update_headers, utils
from ..utils.json_stream import json_stream
from ..utils.proxy import ProxyConfig
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .network import NetworkApiMixin
from .plugin import PluginApiMixin
from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
try:
from ..transport import NpipeHTTPAdapter
except ImportError:
pass
try:
from ..transport import SSHHTTPAdapter
except ImportError:
pass
class APIClient(
requests.Session,
BuildApiMixin,
ConfigApiMixin,
ContainerApiMixin,
DaemonApiMixin,
ExecApiMixin,
ImageApiMixin,
NetworkApiMixin,
PluginApiMixin,
SecretApiMixin,
ServiceApiMixin,
SwarmApiMixin,
VolumeApiMixin):
"""
A low-level client for the Docker Engine API.
Example:
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
{u'ApiVersion': u'1.33',
u'Arch': u'amd64',
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
u'GitCommit': u'f4ffd2511c',
u'GoVersion': u'go1.9.2',
u'KernelVersion': u'4.14.3-1-ARCH',
u'MinAPIVersion': u'1.12',
u'Os': u'linux',
u'Version': u'17.10.0-ce'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
'_general_configs',
'_version',
'base_url',
'timeout']
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None, use_ssh_client=False,
max_pool_size=DEFAULT_MAX_POOL_SIZE):
super().__init__()
if tls and not base_url:
raise TLSParameterError(
'If using TLS, the base_url argument must be provided.'
)
self.base_url = base_url
self.timeout = timeout
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
proxy_config = self._general_configs.get('proxies', {})
try:
proxies = proxy_config[base_url]
except KeyError:
proxies = proxy_config.get('default', {})
self._proxy_configs = ProxyConfig.from_dict(proxies)
self._auth_configs = auth.load_config(
config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
# SSH has a different default for num_pools to all other adapters
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
# host part of URL should be unused, but is resolved by requests
# module in proxy_bypass_macosx_sysconf()
self.base_url = 'http+docker://localhost'
elif base_url.startswith('npipe://'):
if not IS_WINDOWS_PLATFORM:
raise DockerException(
'The npipe:// protocol is only supported on Windows'
)
try:
self._custom_adapter = NpipeHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
except NameError as err:
raise DockerException(
'Install pypiwin32 package to enable npipe:// support'
) from err
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
elif base_url.startswith('ssh://'):
try:
self._custom_adapter = SSHHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size, shell_out=use_ssh_client
)
except NameError as err:
raise DockerException(
'Install paramiko package to enable ssh:// support'
) from err
self.mount('http+docker://ssh', self._custom_adapter)
self._unmount('http://', 'https://')
self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = requests.adapters.HTTPAdapter(
pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None or (isinstance(
version,
str
) and version.lower() == 'auto'):
self._version = self._retrieve_server_version()
else:
self._version = version
if not isinstance(self._version, str):
raise DockerException(
'Version parameter must be a string or None. '
f'Found {type(version).__name__}'
)
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
raise InvalidVersion(
f'API versions below {MINIMUM_DOCKER_API_VERSION} are '
f'no longer supported by this library.'
)
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError as ke:
raise DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
) from ke
except Exception as e:
raise DockerException(
f'Error while fetching server API version: {e}'
) from e
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, str):
raise ValueError(
f'Expected a string but found {arg} ({type(arg)}) instead'
)
quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
formatted_path = pathfmt.format(*args)
if kwargs.get('versioned_api', True):
return f'{self.base_url}/v{self._version}{formatted_path}'
else:
return f'{self.base_url}{formatted_path}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise create_api_error_from_http_exception(e) from e
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
data2 = data
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource('container')
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
try:
import websocket
return websocket.create_connection(url)
except ImportError as ie:
raise DockerException(
'The `websocket-client` library is required '
'for using websocket connections. '
'You can install the `docker` library '
'with the [websocket] extra to install it.'
) from ie
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
_, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
"""Consume all data from the socket, close the response and return the
data. If stream=True, then a generator is returned instead and the
caller is responsible for closing the response.
"""
socket = self._get_raw_response_socket(response)
gen = frames_iter(socket, tty)
if demux:
# The generator will output tuples (stdout, stderr)
gen = (demux_adaptor(*frame) for frame in gen)
else:
# The generator will output strings
gen = (data for (_, data) in gen)
if stream:
return gen
else:
try:
# Wait for all frames, concatenate them, and return the result
return consume_socket_output(gen, demux=demux)
finally:
response.close()
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None)
@check_resource('container')
def _check_is_tty(self, container):
cont = self.inspect_container(container)
return cont['Config']['Tty']
def _get_result(self, container, stream, res):
return self._get_result_tty(stream, res, self._check_is_tty(container))
def _get_result_tty(self, stream, res, is_tty):
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
list(self._multiplexed_buffer_helper(res))
)
def _unmount(self, *args):
for proto in args:
self.adapters.pop(proto)
def get_adapter(self, url):
try:
return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)

View File

@ -1,92 +0,0 @@
import base64
from .. import utils
class ConfigApiMixin:
@utils.minimum_version('1.30')
def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
templating (dict): dictionary containing the name of the
templating driver to be used expressed as
{ name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels,
'Templating': templating
}
url = self._url('/configs/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.30')
@utils.check_resource('id')
def inspect_config(self, id):
"""
Retrieve config metadata
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.30')
@utils.check_resource('id')
def remove_config(self, id):
"""
Remove a config
Args:
id (string): Full ID of the config to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.30')
def configs(self, filters=None):
"""
List configs
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs
"""
url = self._url('/configs')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +1,14 @@
import os import os
import warnings
from datetime import datetime from datetime import datetime
from .. import auth, types, utils from ..auth import auth
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
from ..utils import utils
class DaemonApiMixin: class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None): def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime): if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since) since = utils.datetime_to_timestamp(since)
@ -73,66 +23,35 @@ class DaemonApiMixin:
'until': until, 'until': until,
'filters': filters 'filters': filters
} }
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None) return self._stream_helper(
stream = self._stream_helper(response, decode=decode) self.get(self._url('/events'), params=params, stream=True),
decode=decode
return types.CancellableStream(stream, response) )
def info(self): def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True) return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None, def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None): reauth=False, insecure_registry=False, dockercfg_path=None):
""" if insecure_registry:
Authenticate with a registry. Similar to the ``docker login`` command. warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
Args: DeprecationWarning
username (str): The registry username )
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file # If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there. # one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists, # If dockercfg_path is passed check to see if the config file exists,
# if so load that config. # if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path): if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config( self._auth_configs = auth.load_config(dockercfg_path)
dockercfg_path, credstore_env=self.credstore_env elif not self._auth_configs:
) self._auth_configs = auth.load_config()
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry) registry = registry or auth.INDEX_URL
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# If we found an existing auth config for this registry and username # If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested. # combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \ if authcfg and authcfg.get('username', None) == username \
@ -148,34 +67,12 @@ class DaemonApiMixin:
response = self._post_json(self._url('/auth'), data=req_data) response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200: if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) self._auth_configs[registry] = req_data
return self._result(response, json=True) return self._result(response, json=True)
def ping(self): def ping(self):
""" return self._result(self._get(self._url('/_ping')))
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True): def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version) url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True) return self._result(self._get(url), json=True)

View File

@ -1,54 +1,25 @@
from .. import errors, utils import six
from ..types import CancellableStream
from .. import errors
from .. import utils
class ExecApiMixin: class ExecApiMixin(object):
@utils.check_resource('container') @utils.minimum_version('1.15')
@utils.check_resource
def exec_create(self, container, cmd, stdout=True, stderr=True, def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='', stdin=False, tty=False, privileged=False, user=''):
environment=None, workdir=None, detach_keys=None): if privileged and utils.compare_version('1.19', self._version) < 0:
"""
Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
detach_keys (str): Override the key sequence for detaching
a container. Format is a single character `[a-Z]`
or `ctrl-<value>` where `<value>` is one of:
`a-z`, `@`, `^`, `[`, `,` or `_`.
~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion( raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25' 'Privileged exec is not supported in API < 1.19'
) )
if user and utils.compare_version('1.19', self._version) < 0:
if isinstance(cmd, str): raise errors.InvalidVersion(
'User-specific exec is not supported in API < 1.19'
)
if isinstance(cmd, six.string_types):
cmd = utils.split_command(cmd) cmd = utils.split_command(cmd)
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
data = { data = {
'Container': container, 'Container': container,
'User': user, 'User': user,
@ -57,55 +28,22 @@ class ExecApiMixin:
'AttachStdin': stdin, 'AttachStdin': stdin,
'AttachStdout': stdout, 'AttachStdout': stdout,
'AttachStderr': stderr, 'AttachStderr': stderr,
'Cmd': cmd, 'Cmd': cmd
'Env': environment,
} }
if workdir is not None:
if utils.version_lt(self._version, '1.35'):
raise errors.InvalidVersion(
'workdir is not supported for API version < 1.35'
)
data['WorkingDir'] = workdir
if detach_keys:
data['detachKeys'] = detach_keys
elif 'detachKeys' in self._general_configs:
data['detachKeys'] = self._general_configs['detachKeys']
url = self._url('/containers/{0}/exec', container) url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data) res = self._post_json(url, data=data)
return self._result(res, True) return self._result(res, True)
@utils.minimum_version('1.16')
def exec_inspect(self, exec_id): def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(exec_id, dict): if isinstance(exec_id, dict):
exec_id = exec_id.get('Id') exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id)) res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True) return self._result(res, True)
@utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None): def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict): if isinstance(exec_id, dict):
exec_id = exec_id.get('Id') exec_id = exec_id.get('Id')
@ -114,63 +52,24 @@ class ExecApiMixin:
res = self._post(url, params=params) res = self._post(url, params=params)
self._raise_for_status(res) self._raise_for_status(res)
@utils.check_resource('exec_id') @utils.minimum_version('1.15')
def exec_start(self, exec_id, detach=False, tty=False, stream=False, def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False, demux=False): socket=False):
"""
Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Return response data progressively as an iterator
of strings, rather than a single string.
socket (bool): Return the connection socket to allow custom
read/write operations. Must be closed by the caller when done.
demux (bool): Return stdout and stderr separately
Returns:
(generator or str or tuple): If ``stream=True``, a generator
yielding response chunks. If ``socket=True``, a socket object for
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# we want opened socket if socket == True # we want opened socket if socket == True
if socket:
stream = True
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
data = { data = {
'Tty': tty, 'Tty': tty,
'Detach': detach 'Detach': detach
} }
headers = {} if detach else {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
res = self._post_json( res = self._post_json(
self._url('/exec/{0}/start', exec_id), self._url('/exec/{0}/start', exec_id), data=data, stream=stream
headers=headers,
data=data,
stream=True
) )
if detach:
try:
return self._result(res)
finally:
res.close()
if socket: if socket:
return self._get_raw_response_socket(res) return self._get_raw_response_socket(res)
return self._get_result_tty(stream, res, tty)
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
if stream:
return CancellableStream(output, res)
else:
return output

View File

@ -1,96 +1,39 @@
import logging import logging
import os import six
import warnings
from .. import auth, errors, utils from ..auth import auth
from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
from .. import utils
from .. import errors
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class ImageApiMixin: class ImageApiMixin(object):
@utils.check_resource('image') @utils.check_resource
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE): def get_image(self, image):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True) res = self._get(self._url("/images/{0}/get", image), stream=True)
return self._stream_raw_result(res, chunk_size, False) self._raise_for_status(res)
return res.raw
@utils.check_resource('image') @utils.check_resource
def history(self, image): def history(self, image):
"""
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(list): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(self._url("/images/{0}/history", image)) res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True) return self._result(res, True)
def images(self, name=None, quiet=False, all=False, filters=None): def images(self, name=None, quiet=False, all=False, viz=False,
""" filters=None):
List images. Similar to the ``docker images`` command. if viz:
if utils.compare_version('1.7', self._version) >= 0:
Args: raise Exception('Viz output is not supported in API >= 1.7!')
name (str): Only show images belonging to the repository ``name`` return self._result(self._get(self._url("images/viz")))
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = { params = {
'filter': name,
'only_ids': 1 if quiet else 0, 'only_ids': 1 if quiet else 0,
'all': 1 if all else 0, 'all': 1 if all else 0,
} }
if name:
if utils.version_lt(self._version, '1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
if filters:
filters['reference'] = name
else:
filters = {'reference': name}
if filters: if filters:
params['filters'] = utils.convert_filters(filters) params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params), res = self._result(self._get(self._url("/images/json"), params=params),
@ -99,305 +42,121 @@ class ImageApiMixin:
return [x['Id'] for x in res] return [x['Id'] for x in res]
return res return res
def import_image(self, src=None, repository=None, tag=None, image=None, def import_image(self, src=None, repository=None, tag=None, image=None):
changes=None, stream_src=False): if src:
""" if isinstance(src, six.string_types):
Import an image. Similar to the ``docker import`` command. try:
result = self.import_image_from_file(
src, repository=repository, tag=tag)
except IOError:
result = self.import_image_from_url(
src, repository=repository, tag=tag)
else:
result = self.import_image_from_data(
src, repository=repository, tag=tag)
elif image:
result = self.import_image_from_image(
image, repository=repository, tag=tag)
else:
raise Exception("Must specify a src or image")
If ``src`` is a string or unicode string, it will first be treated as a return result
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will def import_image_from_data(self, data, repository=None, tag=None):
be taken as the name of an existing image to import from. u = self._url("/images/create")
params = {
Args: 'fromSrc': '-',
src (str or file): Path to tarfile, URL, or file-like object 'repo': repository,
repository (str): The repository to create 'tag': tag
tag (str): The tag to apply }
image (str): Use another image like the ``FROM`` Dockerfile headers = {
parameter 'Content-Type': 'application/tar',
""" }
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
)
u = self._url('/images/create')
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
if image or params.get('fromSrc') != '-': # from image or URL
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
u, data=f, params=params, headers=headers, timeout=None
)
)
else: # from raw data
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(
self._post(u, data=src, params=params, headers=headers)
)
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result( return self._result(
self._post( self._post(u, data=data, params=params, headers=headers))
u, data=data, params=params, headers=headers, timeout=None
def import_image_from_file(self, filename, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
with open(filename, 'rb') as f:
return self._result(
self._post(u, data=f, params=params, headers=headers,
timeout=None))
def import_image_from_stream(self, stream, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
'Transfer-Encoding': 'chunked',
}
return self._result(
self._post(u, data=stream, params=params, headers=headers))
def import_image_from_url(self, url, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': url,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def import_image_from_image(self, image, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromImage': image,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
@utils.check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
) )
) api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
def import_image_from_file(self, filename, repository=None, tag=None, @utils.check_resource
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
def import_image_from_stream(self, stream, repository=None, tag=None,
changes=None):
return self.import_image(
src=stream, stream_src=True, repository=repository, tag=tag,
changes=changes
)
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@utils.check_resource('image')
def inspect_image(self, image): def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result( return self._result(
self._get(self._url("/images/{0}/json", image)), True self._get(self._url("/images/{0}/json", image)), True
) )
@utils.minimum_version('1.30') def load_image(self, data):
@utils.check_resource('image') res = self._post(self._url("/images/load"), data=data)
def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
registry, _ = auth.resolve_repository_name(image)
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
url = self._url("/distribution/{0}/json", image)
return self._result(
self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if quiet is not None:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'quiet is not supported in API version < 1.23'
)
params['quiet'] = quiet
res = self._post(
self._url("/images/load"), data=data, params=params, stream=True
)
if utils.version_gte(self._version, '1.23'):
return self._stream_helper(res, decode=True)
self._raise_for_status(res) self._raise_for_status(res)
@utils.minimum_version('1.25') def pull(self, repository, tag=None, stream=False,
def prune_images(self, filters=None): insecure_registry=False, auth_config=None, decode=False):
""" if insecure_registry:
Delete unused images warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
Args: DeprecationWarning
filters (dict): Filters to process on the prune list. )
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/images/prune")
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.pull('busybox', stream=True, decode=True)
... for line in resp:
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
repository, image_tag = utils.parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if all_tags:
tag = None
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository) registry, repo_name = auth.resolve_repository_name(repository)
params = { params = {
@ -406,20 +165,32 @@ class ImageApiMixin:
} }
headers = {} headers = {}
if auth_config is None: if utils.compare_version('1.5', self._version) >= 0:
header = auth.get_config_header(self, registry) # If we don't have any auth data so far, try reloading the config
if header: # file one more time in case anything showed up in there.
headers['X-Registry-Auth'] = header if auth_config is None:
else: log.debug('Looking for auth config')
log.debug('Sending supplied auth config') if not self._auth_configs:
headers['X-Registry-Auth'] = auth.encode_header(auth_config) log.debug(
"No auth config in memory - loading from filesystem"
if platform is not None: )
if utils.version_lt(self._version, '1.32'): self._auth_configs = auth.load_config()
raise errors.InvalidVersion( authcfg = auth.resolve_authconfig(self._auth_configs, registry)
'platform was only introduced in API version 1.32' # Do not fail here if no authentication exists for this
) # specific registry as we can have a readonly pull. Just
params['platform'] = platform # put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
headers['X-Registry-Auth'] = auth.encode_header(
authcfg
)
else:
log.debug('No auth config found')
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post( response = self._post(
self._url('/images/create'), params=params, headers=headers, self._url('/images/create'), params=params, headers=headers,
@ -433,44 +204,14 @@ class ImageApiMixin:
return self._result(response) return self._result(response)
def push(self, repository, tag=None, stream=False, auth_config=None, def push(self, repository, tag=None, stream=False,
decode=False): insecure_registry=False, decode=False):
""" if insecure_registry:
Push an image or a repository to the registry. Similar to the ``docker warnings.warn(
push`` command. INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.push(
... 'yourname/app',
... stream=True,
... decode=True,
... )
... for line in resp:
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
if not tag: if not tag:
repository, tag = utils.parse_repository_tag(repository) repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository) registry, repo_name = auth.resolve_repository_name(repository)
@ -480,13 +221,18 @@ class ImageApiMixin:
} }
headers = {} headers = {}
if auth_config is None: if utils.compare_version('1.5', self._version) >= 0:
header = auth.get_config_header(self, registry) # If we don't have any auth data so far, try reloading the config
if header: # file one more time in case anything showed up in there.
headers['X-Registry-Auth'] = header if not self._auth_configs:
else: self._auth_configs = auth.load_config()
log.debug('Sending supplied auth config') authcfg = auth.resolve_authconfig(self._auth_configs, registry)
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
# Do not fail here if no authentication exists for this specific
# registry as we can have a readonly pull. Just put the header if
# we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
response = self._post_json( response = self._post_json(
u, None, headers=headers, stream=stream, params=params u, None, headers=headers, stream=stream, params=params
@ -499,68 +245,20 @@ class ImageApiMixin:
return self._result(response) return self._result(response)
@utils.check_resource('image') @utils.check_resource
def remove_image(self, image, force=False, noprune=False): def remove_image(self, image, force=False, noprune=False):
"""
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
params = {'force': force, 'noprune': noprune} params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params) res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True) self._raise_for_status(res)
def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'term': term}
if limit is not None:
params['limit'] = limit
def search(self, term):
return self._result( return self._result(
self._get(self._url("/images/search"), params=params), self._get(self._url("/images/search"), params={'term': term}),
True True
) )
@utils.check_resource('image') @utils.check_resource
def tag(self, image, repository, tag=None, force=False): def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = { params = {
'tag': tag, 'tag': tag,
'repo': repository, 'repo': repository,
@ -570,32 +268,3 @@ class ImageApiMixin:
res = self._post(url, params=params) res = self._post(url, params=params)
self._raise_for_status(res) self._raise_for_status(res)
return res.status_code == 201 return res.status_code == 201
def is_file(src):
try:
return (
isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
return False
def _import_image_params(repo, tag, image=None, src=None,
changes=None):
params = {
'repo': repo,
'tag': tag,
}
if image:
params['fromImage'] = image
elif src and not is_file(src):
params['fromSrc'] = src
else:
params['fromSrc'] = '-'
if changes:
params['changes'] = changes
return params

View File

@ -1,97 +1,28 @@
from .. import utils import json
from ..errors import InvalidVersion from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version, version_lt from ..utils import check_resource, minimum_version, normalize_links
from ..utils import version_lt
class NetworkApiMixin: class NetworkApiMixin(object):
def networks(self, names=None, ids=None, filters=None): @minimum_version('1.21')
""" def networks(self, names=None, ids=None):
List networks. Similar to the ``docker network ls`` command. filters = {}
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names: if names:
filters['name'] = names filters['name'] = names
if ids: if ids:
filters['id'] = ids filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
params = {'filters': json.dumps(filters)}
url = self._url("/networks") url = self._url("/networks")
res = self._get(url, params=params) res = self._get(url, params=params)
return self._result(res, json=True) return self._result(res, json=True)
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None, def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None, check_duplicate=None):
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict): if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary') raise TypeError('options must be a dictionary')
@ -100,178 +31,59 @@ class NetworkApiMixin:
'Driver': driver, 'Driver': driver,
'Options': options, 'Options': options,
'IPAM': ipam, 'IPAM': ipam,
'CheckDuplicate': check_duplicate, 'CheckDuplicate': check_duplicate
} }
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
if scope is not None:
if version_lt(self._version, '1.30'):
raise InvalidVersion(
'scope is not supported in API version < 1.30'
)
data['Scope'] = scope
url = self._url("/networks/create") url = self._url("/networks/create")
res = self._post_json(url, data=data) res = self._post_json(url, data=data)
return self._result(res, json=True) return self._result(res, json=True)
@minimum_version('1.25') @minimum_version('1.21')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@check_resource('net_id')
def remove_network(self, net_id): def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id) url = self._url("/networks/{0}", net_id)
res = self._delete(url) res = self._delete(url)
self._raise_for_status(res) self._raise_for_status(res)
@check_resource('net_id') @minimum_version('1.21')
def inspect_network(self, net_id, verbose=None, scope=None): def inspect_network(self, net_id):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
if scope is not None:
if version_lt(self._version, '1.31'):
raise InvalidVersion('scope was introduced in API 1.31')
params['scope'] = scope
url = self._url("/networks/{0}", net_id) url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params) res = self._get(url)
return self._result(res, json=True) return self._result(res, json=True)
@check_resource('container') @check_resource
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id, def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None, ipv4_address=None, ipv6_address=None,
aliases=None, links=None, aliases=None, links=None):
link_local_ips=None, driver_opt=None,
mac_address=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
mac_address (str): The MAC address of this container on the
network. Defaults to ``None``.
"""
data = { data = {
"Container": container, "Container": container,
"EndpointConfig": self.create_endpoint_config( "EndpointConfig": {
aliases=aliases, links=links, ipv4_address=ipv4_address, "Aliases": aliases,
ipv6_address=ipv6_address, link_local_ips=link_local_ips, "Links": normalize_links(links) if links else None,
driver_opt=driver_opt, },
mac_address=mac_address
),
} }
# IPv4 or IPv6 or neither:
if ipv4_address or ipv6_address:
if version_lt(self._version, '1.22'):
raise InvalidVersion('IP address assignment is not '
'supported in API version < 1.22')
data['EndpointConfig']['IPAMConfig'] = dict()
if ipv4_address:
data['EndpointConfig']['IPAMConfig']['IPv4Address'] = \
ipv4_address
if ipv6_address:
data['EndpointConfig']['IPAMConfig']['IPv6Address'] = \
ipv6_address
url = self._url("/networks/{0}/connect", net_id) url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data) res = self._post_json(url, data=data)
self._raise_for_status(res) self._raise_for_status(res)
@check_resource('container') @check_resource
def disconnect_container_from_network(self, container, net_id, @minimum_version('1.21')
force=False): def disconnect_container_from_network(self, container, net_id):
""" data = {"container": container}
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id) url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data) res = self._post_json(url, data=data)
self._raise_for_status(res) self._raise_for_status(res)

View File

@ -1,261 +0,0 @@
from .. import auth, utils
class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
"""
Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def create_plugin(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
``True`` if successful
"""
url = self._url('/plugins/create')
with utils.create_archive(
root=plugin_data_dir, gzip=gzip,
files=set(utils.build.walk(plugin_data_dir, []))
) as archv:
res = self._post(url, params={'name': name}, data=archv)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def disable_plugin(self, name, force=False):
"""
Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
force (bool): To enable the force query parameter.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/disable', name)
res = self._post(url, params={'force': force})
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def enable_plugin(self, name, timeout=0):
"""
Enable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
timeout (int): Operation timeout (in seconds). Default: 0
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/enable', name)
params = {'timeout': timeout}
res = self._post(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def inspect_plugin(self, name):
"""
Retrieve plugin metadata.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
A dict containing plugin info
"""
url = self._url('/plugins/{0}/json', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def pull_plugin(self, remote, privileges, name=None):
"""
Pull and install a plugin. After the plugin is installed, it can be
enabled using :py:meth:`~enable_plugin`.
Args:
remote (string): Remote reference for the plugin to install.
The ``:latest`` tag is optional, and is the default if
omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
name (string): Local name for the pulled plugin. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/pull')
params = {
'remote': remote,
}
if name:
params['name'] = name
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)
@utils.minimum_version('1.25')
def plugins(self):
"""
Retrieve a list of installed plugins.
Returns:
A list of dicts, one per plugin
"""
url = self._url('/plugins')
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def plugin_privileges(self, name):
"""
Retrieve list of privileges to be granted to a plugin.
Args:
name (string): Name of the remote plugin to examine. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
A list of dictionaries representing the plugin's
permissions
"""
params = {
'remote': name,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
url = self._url('/plugins/privileges')
return self._result(
self._get(url, params=params, headers=headers), True
)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def remove_plugin(self, name, force=False):
"""
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}', name)
res = self._delete(url, params={'force': force})
self._raise_for_status(res)
return True
@utils.minimum_version('1.26')
@utils.check_resource('name')
def upgrade_plugin(self, name, remote, privileges):
"""
Upgrade an installed plugin.
Args:
name (string): Name of the plugin to upgrade. The ``:latest``
tag is optional and is the default if omitted.
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/{0}/upgrade', name)
params = {
'remote': remote,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)

View File

@ -1,98 +0,0 @@
import base64
from .. import errors, utils
class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
if driver is not None:
if utils.version_lt(self._version, '1.31'):
raise errors.InvalidVersion(
'Secret driver is only available for API version > 1.31'
)
body['Driver'] = driver
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)

View File

@ -1,486 +0,0 @@
from .. import auth, errors, utils
from ..types import ServiceMode
def _check_api_features(version, task_template, update_config, endpoint_spec,
rollback_config):
def raise_version_error(param, min_version):
raise errors.InvalidVersion(
f'{param} is not supported in API version < {min_version}'
)
if update_config is not None:
if utils.version_lt(version, '1.25'):
if 'MaxFailureRatio' in update_config:
raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25')
if utils.version_lt(version, '1.28'):
if update_config.get('FailureAction') == 'rollback':
raise_version_error(
'UpdateConfig.failure_action rollback', '1.28'
)
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29')
if rollback_config is not None:
if utils.version_lt(version, '1.28'):
raise_version_error('rollback_config', '1.28')
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('RollbackConfig.order', '1.29')
if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
raise_version_error('EndpointSpec.Ports[].mode', '1.32')
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
if task_template['Placement'].get('Platforms'):
raise_version_error('Placement.platforms', '1.30')
if utils.version_lt(version, '1.27'):
if task_template['Placement'].get('Preferences'):
raise_version_error('Placement.preferences', '1.27')
if task_template.get('ContainerSpec'):
container_spec = task_template.get('ContainerSpec')
if utils.version_lt(version, '1.25'):
if container_spec.get('TTY'):
raise_version_error('ContainerSpec.tty', '1.25')
if container_spec.get('Hostname') is not None:
raise_version_error('ContainerSpec.hostname', '1.25')
if container_spec.get('Hosts') is not None:
raise_version_error('ContainerSpec.hosts', '1.25')
if container_spec.get('Groups') is not None:
raise_version_error('ContainerSpec.groups', '1.25')
if container_spec.get('DNSConfig') is not None:
raise_version_error('ContainerSpec.dns_config', '1.25')
if container_spec.get('Healthcheck') is not None:
raise_version_error('ContainerSpec.healthcheck', '1.25')
if utils.version_lt(version, '1.28'):
if container_spec.get('ReadOnly') is not None:
raise_version_error('ContainerSpec.dns_config', '1.28')
if container_spec.get('StopSignal') is not None:
raise_version_error('ContainerSpec.stop_signal', '1.28')
if utils.version_lt(version, '1.30'):
if container_spec.get('Configs') is not None:
raise_version_error('ContainerSpec.configs', '1.30')
if container_spec.get('Privileges') is not None:
raise_version_error('ContainerSpec.privileges', '1.30')
if utils.version_lt(version, '1.35'):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')
if utils.version_lt(version, '1.38'):
if container_spec.get('Init') is not None:
raise_version_error('ContainerSpec.init', '1.38')
if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
raise_version_error('Resources.generic_resources', '1.32')
def _merge_task_template(current, override):
merged = current.copy()
if override is not None:
for ts_key, ts_value in override.items():
if ts_key == 'ContainerSpec':
if 'ContainerSpec' not in merged:
merged['ContainerSpec'] = {}
for cs_key, cs_value in override['ContainerSpec'].items():
if cs_value is not None:
merged['ContainerSpec'][cs_key] = cs_value
elif ts_value is not None:
merged[ts_key] = ts_value
return merged
class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None, rollback_config=None
):
"""
Create a service.
Args:
task_template (TaskTemplate): Specification of the task to start as
part of the new service.
name (string): User-defined name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
A dictionary containing an ``ID`` key for the newly created
service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is None:
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
if mode and not isinstance(mode, dict):
mode = ServiceMode(mode)
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if utils.version_lt(self._version, '1.25'):
networks = networks or task_template.pop('Networks', None)
data = {
'Name': name,
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
'Networks': utils.convert_service_networks(networks),
'EndpointSpec': endpoint_spec
}
if update_config is not None:
data['UpdateConfig'] = update_config
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def inspect_service(self, service, insert_defaults=None):
"""
Return information about a service.
Args:
service (str): Service name or ID.
insert_defaults (boolean): If true, default values will be merged
into the service inspect output.
Returns:
(dict): A dictionary of the server-side representation of the
service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
params = {}
if insert_defaults is not None:
if utils.version_lt(self._version, '1.29'):
raise errors.InvalidVersion(
'insert_defaults is not supported in API version < 1.29'
)
params['insertDefaults'] = insert_defaults
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('task')
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def remove_service(self, service):
"""
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True
@utils.minimum_version('1.24')
def services(self, filters=None, status=None):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
if status is not None:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'status is not supported in API version < 1.41'
)
params['status'] = status
url = self._url('/services')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.25')
@utils.check_resource('service')
def service_logs(self, service, details=False, follow=False, stdout=False,
stderr=False, since=0, timestamps=False, tail='all',
is_tty=None):
"""
Get log stream for a service.
Note: This endpoint works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
service (str): ID or name of the service
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
enables the TTY option. If omitted, the method will query
the Engine for the information, causing an additional
roundtrip.
Returns (generator): Logs for the service.
"""
params = {
'details': details,
'follow': follow,
'stdout': stdout,
'stderr': stderr,
'since': since,
'timestamps': timestamps,
'tail': tail
}
url = self._url('/services/{0}/logs', service)
res = self._get(url, params=params, stream=True)
if is_tty is None:
is_tty = self.inspect_service(
service
)['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
return self._get_result_tty(True, res, is_tty)
@utils.minimum_version('1.24')
def tasks(self, filters=None):
"""
Retrieve a list of tasks.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``service``, ``node``,
``label`` and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/tasks')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
endpoint_spec=None, fetch_current_spec=False,
rollback_config=None):
"""
Update a service.
Args:
service (string): A service identifier (either its name or service
ID).
version (int): The version number of the service object being
updated. This is required to avoid conflicting writes.
task_template (TaskTemplate): Specification of the updated task to
start as part of the service.
name (string): New name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
if fetch_current_spec:
inspect_defaults = True
if utils.version_lt(self._version, '1.29'):
inspect_defaults = None
current = self.inspect_service(
service, insert_defaults=inspect_defaults
)['Spec']
else:
current = {}
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
data['Name'] = current.get('Name') if name is None else name
data['Labels'] = current.get('Labels') if labels is None else labels
if mode is not None:
if not isinstance(mode, dict):
mode = ServiceMode(mode)
data['Mode'] = mode
else:
data['Mode'] = current.get('Mode')
data['TaskTemplate'] = _merge_task_template(
current.get('TaskTemplate', {}), task_template
)
container_spec = data['TaskTemplate'].get('ContainerSpec', {})
image = container_spec.get('Image', None)
if image is not None:
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if update_config is not None:
data['UpdateConfig'] = update_config
else:
data['UpdateConfig'] = current.get('UpdateConfig')
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
else:
data['RollbackConfig'] = current.get('RollbackConfig')
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
data['Networks'] = converted_networks
else:
data['TaskTemplate']['Networks'] = converted_networks
elif utils.version_lt(self._version, '1.25'):
data['Networks'] = current.get('Networks')
elif data['TaskTemplate'].get('Networks') is None:
current_task_template = current.get('TaskTemplate', {})
current_networks = current_task_template.get('Networks')
if current_networks is None:
current_networks = current.get('Networks')
if current_networks is not None:
data['TaskTemplate']['Networks'] = current_networks
if endpoint_spec is not None:
data['EndpointSpec'] = endpoint_spec
else:
data['EndpointSpec'] = current.get('EndpointSpec')
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
return self._result(resp, json=True)

View File

@ -1,462 +0,0 @@
import http.client as http_client
import logging
from .. import errors, types, utils
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
log = logging.getLogger(__name__)
class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
Create a :py:class:`docker.types.SwarmSpec` instance that can be used
as the ``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_cas (:py:class:`list`): Configuration for forwarding
signing requests to an external certificate authority. Use
a list of :py:class:`docker.types.SwarmExternalCA`.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
:py:class:`docker.types.SwarmSpec`
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
"""
ext_ca = kwargs.pop('external_ca', None)
if ext_ca:
kwargs['external_cas'] = [ext_ca]
return types.SwarmSpec(self._version, *args, **kwargs)
@utils.minimum_version('1.24')
def get_unlock_key(self):
"""
Get the unlock key for this Swarm manager.
Returns:
A ``dict`` containing an ``UnlockKey`` member
"""
return self._result(self._get(self._url('/swarm/unlockkey')), True)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None,
default_addr_pool=None, subnet_size=None,
data_path_addr=None, data_path_port=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
Args:
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
``advertise_addr`` is not specified, it will be automatically
detected when possible. Default: None
listen_addr (string): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: '0.0.0.0:2377'
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
default_addr_pool (list of strings): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
if default_addr_pool is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Address pool is only available for API version >= 1.39'
)
# subnet_size becomes 0 if not set with default_addr_pool
if subnet_size is None:
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
if subnet_size is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Subnet size is only available for API version >= 1.39'
)
# subnet_size is ignored if set without default_addr_pool
if default_addr_pool is None:
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'DefaultAddrPool': default_addr_pool,
'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
if data_path_port is not None:
if utils.version_lt(self._version, '1.40'):
raise errors.InvalidVersion(
'Data path port is only available for '
'API version >= 1.40'
)
data['DataPathPort'] = data_path_port
response = self._post_json(url, data=data)
return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
"""
Retrieve low-level information about the current swarm.
Returns:
A dictionary containing data about the swarm.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
"""
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
Args:
remote_addrs (:py:class:`list`): Addresses of one or more manager
nodes already participating in the Swarm to join.
join_token (string): Secret token for joining this Swarm.
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
data = {
'RemoteAddrs': remote_addrs,
'ListenAddr': listen_addr,
'JoinToken': join_token,
'AdvertiseAddr': advertise_addr,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
"""
Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
# FIXME: Temporary workaround for 1.13.0-rc bug
# https://github.com/docker/docker/issues/29192
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of dictionaries containing data about each swarm node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def remove_node(self, node_id, force=False):
"""
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
"""
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def unlock_swarm(self, key):
"""
Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Example:
>>> key = client.api.get_unlock_key()
>>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
if 'UnlockKey' not in key:
raise errors.InvalidArgument('Invalid unlock key format')
else:
key = {'UnlockKey': key}
url = self._url('/swarm/unlock')
res = self._post_json(url, data=key)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
"""
Update the node's configuration
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None,
rotate_worker_token=False,
rotate_manager_token=False,
rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/update')
params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
}
if rotate_manager_unlock_key:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Rotate manager unlock key '
'is only available for API version >= 1.25'
)
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True

View File

@ -1,74 +1,17 @@
from .. import errors, utils from .. import utils
class VolumeApiMixin: class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def volumes(self, filters=None): def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(dict): Dictionary with list of volume objects as value of the
``Volumes`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
u'Name': u'baz'}]}
"""
params = { params = {
'filters': utils.convert_filters(filters) if filters else None 'filters': utils.convert_filters(filters) if filters else None
} }
url = self._url('/volumes') url = self._url('/volumes')
return self._result(self._get(url, params=params), True) return self._result(self._get(url, params=params), True)
def create_volume(self, name=None, driver=None, driver_opts=None, @utils.minimum_version('1.21')
labels=None): def create_volume(self, name, driver=None, driver_opts=None):
"""
Create and register a named volume
Args:
name (str): Name of the volume
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(dict): The created volume reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.api.create_volume(
... name='foobar',
... driver='local',
... driver_opts={'foo': 'bar', 'baz': 'false'},
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
url = self._url('/volumes/create') url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict): if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary') raise TypeError('driver_opts must be a dictionary')
@ -78,86 +21,15 @@ class VolumeApiMixin:
'Driver': driver, 'Driver': driver,
'DriverOpts': driver_opts, 'DriverOpts': driver_opts,
} }
if labels is not None:
if utils.compare_version('1.23', self._version) < 0:
raise errors.InvalidVersion(
'volume labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
return self._result(self._post_json(url, data=data), True) return self._result(self._post_json(url, data=data), True)
@utils.minimum_version('1.21')
def inspect_volume(self, name): def inspect_volume(self, name):
"""
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
"""
url = self._url('/volumes/{0}', name) url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True) return self._result(self._get(url), True)
@utils.minimum_version('1.25') @utils.minimum_version('1.21')
def prune_volumes(self, filters=None): def remove_volume(self, name):
""" url = self._url('/volumes/{0}', name)
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url) resp = self._delete(url)
self._raise_for_status(resp) self._raise_for_status(resp)

View File

@ -1,378 +0,0 @@
import base64
import json
import logging
from . import credentials, errors
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
f'Invalid index name ({index_name}). '
'Cannot begin or end with a hyphen.'
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == f"index.{INDEX_NAME}":
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
f'Config entry for key {registry} is not auth config'
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
f'Invalid configuration for registry {registry}'
)
return {}
if 'identitytoken' in entry:
log.debug(f'Found an IdentityToken entry for registry {registry}')
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
f'Auth data for {registry} is absent. '
f'Client might be using a credentials store instead.'
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
f'Found entry (registry={registry!r}, username={username!r})'
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self):
return self.get('auths', {})
@property
def creds_store(self):
return self.get('credsStore', None)
@property
def cred_helpers(self):
return self.get('credHelpers', {})
@property
def is_empty(self):
return (
not self.auths and not self.creds_store and not self.cred_helpers
)
def resolve_authconfig(self, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
f'Using credentials store "{store_name}"'
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug(f"Looking for auth entry for {repr(registry)}")
if registry in self.auths:
log.debug(f"Found {repr(registry)}")
return self.auths[registry]
for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
log.debug(f"Found {repr(key)}")
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(self, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug(f"Looking for auth entry for {repr(registry)}")
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
'ServerAddress': registry,
}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({
'Username': data['Username'],
'Password': data['Secret'],
})
return res
except credentials.CredentialsNotFound:
log.debug('No entry found')
return None
except credentials.StoreError as e:
raise errors.DockerException(
f'Credentials store error: {repr(e)}'
) from e
def _get_store_instance(self, name):
if name not in self._stores:
self._stores[name] = credentials.Store(
name, environment=self._credstore_env
)
return self._stores[name]
def get_credential_store(self, registry):
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list().keys():
auth_data[k] = self._resolve_authconfig_credstore(
k, self.creds_store
)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(
reg, store_name
)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg, data):
self['auths'][reg] = data
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(config_path=None, config_dict=None, credstore_env=None):
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {'auths': {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}}
except Exception as e:
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

8
docker/auth/__init__.py Normal file
View File

@ -0,0 +1,8 @@
from .auth import (
INDEX_NAME,
INDEX_URL,
encode_header,
load_config,
resolve_authconfig,
resolve_repository_name,
) # flake8: noqa

230
docker/auth/auth.py Normal file
View File

@ -0,0 +1,230 @@
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import os
import six
from .. import errors
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def resolve_authconfig(authconfig, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the config
with full URLs are stripped down to hostnames before checking for a match.
Returns None if no match was found.
"""
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
if registry in authconfig:
log.debug("Found {0}".format(repr(registry)))
return authconfig[registry]
for key, config in six.iteritems(authconfig):
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
return config
log.debug("No entry found")
return None
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, six.string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in six.iteritems(entries):
if not (isinstance(entry, dict) and 'auth' in entry):
log.debug(
'Config entry for key {0} is not auth config'.format(registry)
)
# We sometimes fall back to parsing the whole config as if it was
# the auth config by itself, for legacy purposes. In that case, we
# fail silently and return an empty conf if any of the keys is not
# formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(registry)
)
return {}
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
def find_config_file(config_path=None):
environment_path = os.path.join(
os.environ.get('DOCKER_CONFIG'),
os.path.basename(DOCKER_CONFIG_FILENAME)
) if os.environ.get('DOCKER_CONFIG') else None
paths = [
config_path, # 1
environment_path, # 2
os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
os.path.join(
os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
) # 4
]
for path in paths:
if path and os.path.exists(path):
return path
return None
def load_config(config_path=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment variable >
~/.docker/config.json > ~/.dockercfg
"""
config_file = find_config_file(config_path)
if not config_file:
log.debug("File doesn't exist")
return {}
try:
with open(config_file) as f:
data = json.load(f)
res = {}
if data.get('auths'):
log.debug("Found 'auths' section")
res.update(parse_auth(data['auths'], raise_on_error=True))
if data.get('HttpHeaders'):
log.debug("Found 'HttpHeaders' section")
res.update({'HttpHeaders': data['HttpHeaders']})
if res:
return res
else:
log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
f.seek(0)
return parse_auth(json.load(f))
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}
except Exception as e:
log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@ -1,222 +1,349 @@
from .api.client import APIClient # Copyright 2013 dotCloud inc.
from .constants import DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS
from .models.configs import ConfigCollection # Licensed under the Apache License, Version 2.0 (the "License");
from .models.containers import ContainerCollection # you may not use this file except in compliance with the License.
from .models.images import ImageCollection # You may obtain a copy of the License at
from .models.networks import NetworkCollection
from .models.nodes import NodeCollection # http://www.apache.org/licenses/LICENSE-2.0
from .models.plugins import PluginCollection
from .models.secrets import SecretCollection # Unless required by applicable law or agreed to in writing, software
from .models.services import ServiceCollection # distributed under the License is distributed on an "AS IS" BASIS,
from .models.swarm import Swarm # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from .models.volumes import VolumeCollection # See the License for the specific language governing permissions and
from .utils import kwargs_from_env # limitations under the License.
import json
import struct
import sys
import requests
import requests.exceptions
import six
import websocket
class DockerClient: from . import api
""" from . import constants
A client for communicating with a Docker server. from . import errors
from .auth import auth
from .unixconn import unixconn
from .ssladapter import ssladapter
from .utils import utils, check_resource, update_headers, kwargs_from_env
from .tls import TLSConfig
Example:
>>> import docker def from_env(**kwargs):
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock') return Client.from_env(**kwargs)
Args:
base_url (str): URL to the Docker server. For example, class Client(
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. requests.Session,
version (str): The version of the API to use. Set to ``auto`` to api.BuildApiMixin,
automatically detect the server's version. Default: ``1.35`` api.ContainerApiMixin,
timeout (int): Default timeout for API calls, in seconds. api.DaemonApiMixin,
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass api.ExecApiMixin,
``True`` to enable it with default options, or pass a api.ImageApiMixin,
:py:class:`~docker.tls.TLSConfig` object to use custom api.VolumeApiMixin,
configuration. api.NetworkApiMixin):
user_agent (str): Set a custom user agent for requests to the server. def __init__(self, base_url=None, version=None,
credstore_env (dict): Override environment variables when calling the timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
credential store process. super(Client, self).__init__()
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is if tls and not base_url:
installed and configured on the host. raise errors.TLSParameterError(
max_pool_size (int): The maximum number of connections 'If using TLS, the base_url argument must be provided.'
to save in the pool. )
"""
def __init__(self, *args, **kwargs): self.base_url = base_url
self.api = APIClient(*args, **kwargs) self.timeout = timeout
self._auth_configs = auth.load_config()
base_url = utils.parse_host(base_url, sys.platform, tls=bool(tls))
if base_url.startswith('http+unix://'):
self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localunixsocket'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = ssladapter.SSLAdapter()
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None:
self._version = constants.DEFAULT_DOCKER_API_VERSION
elif isinstance(version, six.string_types):
if version.lower() == 'auto':
self._version = self._retrieve_server_version()
else:
self._version = version
else:
raise errors.DockerException(
'Version parameter must be a string or None. Found {0}'.format(
type(version).__name__
)
)
@classmethod @classmethod
def from_env(cls, **kwargs): def from_env(cls, **kwargs):
return cls(**kwargs_from_env(**kwargs))
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise errors.DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise errors.DockerException(
'Error while fetching server API version: {0}'.format(e)
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, six.string_types):
raise ValueError(
'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
)
args = map(six.moves.urllib.parse.quote_plus, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response, explanation=None):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
raise errors.NotFound(e, response, explanation=explanation)
raise errors.APIError(e, response, explanation=explanation)
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None:
for k, v in six.iteritems(data):
if v is not None:
data2[k] = v
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if six.PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
if decode:
if six.PY3:
data = data.decode('utf-8')
data = json.loads(data)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
walker = 0
while True:
if len(buf[walker:]) < 8:
break
_, length = struct.unpack_from('>BxxxL', buf[walker:])
start = walker + constants.STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result_old(self, response):
''' Stream raw output for API versions below 1.6 '''
self._raise_for_status(response)
for line in response.iter_lines(chunk_size=1,
decode_unicode=True):
# filter out keep-alive new lines
if line:
yield line
def _stream_raw_result(self, response):
''' Stream result for TTY-enabled container above API 1.6 '''
self._raise_for_status(response)
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it.
To avoid missing the correct one, we try both.
""" """
Return a client configured from environment variables. if hasattr(socket, "settimeout"):
socket.settimeout(None)
if hasattr(socket, "_sock") and hasattr(socket._sock, "settimeout"):
socket._sock.settimeout(None)
The environment variables used are the same as those used by the def _get_result(self, container, stream, res):
Docker command-line client. They are: cont = self.inspect_container(container)
return self._get_result_tty(stream, res, cont['Config']['Tty'])
.. envvar:: DOCKER_HOST def _get_result_tty(self, stream, res, is_tty):
# Stream multi-plexing was only introduced in API v1.6. Anything
# before that needs old-style streaming.
if utils.compare_version('1.6', self._version) < 0:
return self._stream_raw_result_old(res)
The URL to the Docker host. # We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
.. envvar:: DOCKER_TLS_VERIFY self._raise_for_status(res)
sep = six.binary_type()
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
[x for x in self._multiplexed_buffer_helper(res)]
)
Verify the host against a CA certificate. def get_adapter(self, url):
try:
.. envvar:: DOCKER_CERT_PATH return super(Client, self).get_adapter(url)
except requests.exceptions.InvalidSchema as e:
A path to a directory containing TLS certificates to use when if self._custom_adapter:
connecting to the Docker host. return self._custom_adapter
else:
Args: raise e
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
timeout=timeout,
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
)
# Resources
@property
def configs(self):
"""
An object for managing configs on the server. See the
:doc:`configs documentation <configs>` for full details.
"""
return ConfigCollection(client=self)
@property @property
def containers(self): def api_version(self):
""" return self._version
An object for managing containers on the server. See the
:doc:`containers documentation <containers>` for full details.
"""
return ContainerCollection(client=self)
@property
def images(self):
"""
An object for managing images on the server. See the
:doc:`images documentation <images>` for full details.
"""
return ImageCollection(client=self)
@property
def networks(self):
"""
An object for managing networks on the server. See the
:doc:`networks documentation <networks>` for full details.
"""
return NetworkCollection(client=self)
@property
def nodes(self):
"""
An object for managing nodes on the server. See the
:doc:`nodes documentation <nodes>` for full details.
"""
return NodeCollection(client=self)
@property
def plugins(self):
"""
An object for managing plugins on the server. See the
:doc:`plugins documentation <plugins>` for full details.
"""
return PluginCollection(client=self)
@property
def secrets(self):
"""
An object for managing secrets on the server. See the
:doc:`secrets documentation <secrets>` for full details.
"""
return SecretCollection(client=self)
@property
def services(self):
"""
An object for managing services on the server. See the
:doc:`services documentation <services>` for full details.
"""
return ServiceCollection(client=self)
@property
def swarm(self):
"""
An object for managing a swarm on the server. See the
:doc:`swarm documentation <swarm>` for full details.
"""
return Swarm(client=self)
@property
def volumes(self):
"""
An object for managing volumes on the server. See the
:doc:`volumes documentation <volumes>` for full details.
"""
return VolumeCollection(client=self)
# Top-level methods
def events(self, *args, **kwargs):
return self.api.events(*args, **kwargs)
events.__doc__ = APIClient.events.__doc__
def df(self):
return self.api.df()
df.__doc__ = APIClient.df.__doc__
def info(self, *args, **kwargs):
return self.api.info(*args, **kwargs)
info.__doc__ = APIClient.info.__doc__
def login(self, *args, **kwargs):
return self.api.login(*args, **kwargs)
login.__doc__ = APIClient.login.__doc__
def ping(self, *args, **kwargs):
return self.api.ping(*args, **kwargs)
ping.__doc__ = APIClient.ping.__doc__
def version(self, *args, **kwargs):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
def close(self):
return self.api.close()
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
"object APIClient. See the low-level API section of the "
"documentation for more details.")
raise AttributeError(' '.join(s))
from_env = DockerClient.from_env class AutoVersionClient(Client):
def __init__(self, *args, **kwargs):
if 'version' in kwargs and kwargs['version']:
raise errors.DockerException(
'Can not specify version for AutoVersionClient'
)
kwargs['version'] = 'auto'
super(AutoVersionClient, self).__init__(*args, **kwargs)

View File

@ -1,45 +1,10 @@
import sys DEFAULT_DOCKER_API_VERSION = '1.22'
from .version import __version__
DEFAULT_DOCKER_API_VERSION = '1.45'
MINIMUM_DOCKER_API_VERSION = '1.24'
DEFAULT_TIMEOUT_SECONDS = 60 DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8 STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [ CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus' 'memory', 'memswap', 'cpushares', 'cpusetcpus'
] ]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
INSECURE_REGISTRY_DEPRECATION_WARNING = \ INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \ 'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.' 'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
DEFAULT_SWARM_SUBNET_SIZE = 24

View File

@ -1,2 +0,0 @@
from .api import ContextAPI
from .context import Context

View File

@ -1,206 +0,0 @@
import json
import os
from docker import errors
from .config import (
METAFILE,
get_current_context_name,
get_meta_dir,
write_context_name_to_docker_config,
)
from .context import Context
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = Context("default", "swarm")
@classmethod
def create_context(
cls, name, orchestrator=None, host=None, tls_cfg=None,
default_namespace=None, skip_tls_verify=False):
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'"default" is a reserved context name')
ctx = Context.load_context(name)
if ctx:
raise errors.ContextAlreadyExists(name)
endpoint = "docker"
if orchestrator and orchestrator != "swarm":
endpoint = orchestrator
ctx = Context(name, orchestrator)
ctx.set_endpoint(
endpoint, host, tls_cfg,
skip_tls_verify=skip_tls_verify,
def_namespace=default_namespace)
ctx.save()
return ctx
@classmethod
def get_context(cls, name=None):
"""Retrieves a context object.
Args:
name (str): The name of the context
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
name = get_current_context_name()
if name == "default":
return cls.DEFAULT_CONTEXT
return Context.load_context(name)
@classmethod
def contexts(cls):
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
names = []
for dirname, dirnames, fnames in os.walk(get_meta_dir()):
for filename in fnames + dirnames:
if filename == METAFILE:
try:
data = json.load(
open(os.path.join(dirname, filename)))
names.append(data["Name"])
except Exception as e:
raise errors.ContextException(
f"Failed to load metafile {filename}: {e}",
) from e
contexts = [cls.DEFAULT_CONTEXT]
for name in names:
contexts.append(Context.load_context(name))
return contexts
@classmethod
def get_current_context(cls):
"""Get current context.
Returns:
(Context): current context object.
"""
return cls.get_context()
@classmethod
def set_current_context(cls, name="default"):
ctx = cls.get_context(name)
if not ctx:
raise errors.ContextNotFound(name)
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(
f'Failed to set current context: {err}')
@classmethod
def remove_context(cls, name):
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'context "default" cannot be removed')
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
if name == get_current_context_name():
write_context_name_to_docker_config(None)
ctx.remove()
@classmethod
def inspect_context(cls, name="default"):
"""Remove a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
return cls.DEFAULT_CONTEXT()
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
return ctx()

View File

@ -1,81 +0,0 @@
import hashlib
import json
import os
from docker import utils
from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
from docker.utils.config import find_config_file
METAFILE = "meta.json"
def get_current_context_name():
name = "default"
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
name = json.load(f).get("currentContext", "default")
except Exception:
return "default"
return name
def write_context_name_to_docker_config(name=None):
if name == 'default':
name = None
docker_cfg_path = find_config_file()
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
config = json.load(f)
except Exception as e:
return e
current_context = config.get("currentContext", None)
if current_context and not name:
del config["currentContext"]
elif name:
config["currentContext"] = name
else:
return
try:
with open(docker_cfg_path, "w") as f:
json.dump(config, f, indent=4)
except Exception as e:
return e
def get_context_id(name):
return hashlib.sha256(name.encode('utf-8')).hexdigest()
def get_context_dir():
return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
def get_meta_dir(name=None):
meta_dir = os.path.join(get_context_dir(), "meta")
if name:
return os.path.join(meta_dir, get_context_id(name))
return meta_dir
def get_meta_file(name):
return os.path.join(get_meta_dir(name), METAFILE)
def get_tls_dir(name=None, endpoint=""):
context_dir = get_context_dir()
if name:
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
return os.path.join(context_dir, "tls")
def get_context_host(path=None, tls=False):
host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
if host == DEFAULT_UNIX_SOCKET:
# remove http+ from default docker socket url
if host.startswith("http+"):
host = host[5:]
return host

View File

@ -1,249 +0,0 @@
import json
import os
from shutil import copyfile, rmtree
from docker.errors import ContextException
from docker.tls import TLSConfig
from .config import (
get_context_host,
get_meta_dir,
get_meta_file,
get_tls_dir,
)
class Context:
"""A context."""
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
raise Exception("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = "docker" if (
not orchestrator or orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, tls),
"SkipTLSVerify": not tls
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException(
f"Unknown endpoint format for context {name}: {v}",
)
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
host, tls))
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
"SkipTLSVerify", not tls))
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
skip_tls_verify=False, def_namespace=None):
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify),
"SkipTLSVerify": skip_tls_verify
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self):
return self.__call__()
@classmethod
def load_context(cls, name):
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None))
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name):
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata = {}
try:
with open(meta_file) as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception(
f"Detected corrupted meta file for context {name} : {e}"
) from e
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False))
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True))
return metadata
def _load_certs(self):
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints.keys():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self):
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "w") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(ca_file, os.path.join(
tls_dir, endpoint, os.path.basename(ca_file)))
if tls.cert:
cert_file, key_file = tls.cert
copyfile(cert_file, os.path.join(
tls_dir, endpoint, os.path.basename(cert_file)))
copyfile(key_file, os.path.join(
tls_dir, endpoint, os.path.basename(key_file)))
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self):
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
def __call__(self):
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self):
return self.context_type is None
@property
def Name(self):
return self.name
@property
def Host(self):
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None)
return None
return self.endpoints[self.orchestrator].get("Host", None)
@property
def Orchestrator(self):
return self.orchestrator
@property
def Metadata(self):
meta = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {
"Name": self.name,
"Metadata": meta,
"Endpoints": self.endpoints
}
@property
def TLSConfig(self):
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg.keys():
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self):
certs = {}
for endpoint, tls in self.tls_cfg.items():
cert, key = tls.cert
certs[endpoint] = list(
map(os.path.basename, [tls.ca_cert, cert, key]))
return {
"TLSMaterial": certs
}
@property
def Storage(self):
return {
"Storage": {
"MetadataPath": self.meta_path,
"TLSPath": self.tls_path
}}

View File

@ -1,8 +0,0 @@
from .constants import (
DEFAULT_LINUX_STORE,
DEFAULT_OSX_STORE,
DEFAULT_WIN32_STORE,
PROGRAM_PREFIX,
)
from .errors import CredentialsNotFound, StoreError
from .store import Store

View File

@ -1,4 +0,0 @@
PROGRAM_PREFIX = 'docker-credential-'
DEFAULT_LINUX_STORE = 'secretservice'
DEFAULT_OSX_STORE = 'osxkeychain'
DEFAULT_WIN32_STORE = 'wincred'

View File

@ -1,17 +0,0 @@
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe, program):
message = cpe.output.decode('utf-8')
if 'credentials not found in native keychain' in message:
return CredentialsNotFound(f'No matching credentials in {program}')
return StoreError(f'Credentials store {program} exited with "{message}".')

View File

@ -1,93 +0,0 @@
import errno
import json
import shutil
import subprocess
import warnings
from . import constants, errors
from .utils import create_environment_dict
class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = shutil.which(self.program)
self.environment = environment
if self.exe is None:
warnings.warn(
f'{self.program} not installed or not available in PATH',
stacklevel=1,
)
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
f'No matching credentials in {self.program}'
)
return result
def store(self, server, username, secret):
""" Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
def erase(self, server):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
def list(self):
""" List stored credentials. Requires v0.4.0+ of the helper.
"""
data = self._execute('list', None)
return json.loads(data.decode('utf-8'))
def _execute(self, subcmd, data_input):
if self.exe is None:
raise errors.StoreError(
f'{self.program} not installed or not available in PATH'
)
output = None
env = create_environment_dict(self.environment)
try:
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program) from e
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
f'{self.program} not installed or not available in PATH'
) from e
else:
raise errors.StoreError(
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
) from e
return output

View File

@ -1,10 +0,0 @@
import os
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View File

@ -1,99 +1,59 @@
# Copyright 2014 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests import requests
_image_not_found_explanation_fragments = frozenset(
fragment.lower() for fragment in [
'no such image',
'not found: does not exist or no pull access',
'repository does not exist',
'was found but does not match the specified platform',
]
)
class APIError(requests.exceptions.HTTPError):
class DockerException(Exception): def __init__(self, message, response, explanation=None):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e):
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()['message']
except ValueError:
explanation = (response.text or '').strip()
cls = APIError
if response.status_code == 404:
explanation_msg = (explanation or '').lower()
if any(fragment in explanation_msg
for fragment in _image_not_found_explanation_fragments):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation) from e
class APIError(requests.exceptions.HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but # requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't # requests 1.1 doesn't
super().__init__(message) super(APIError, self).__init__(message)
self.response = response self.response = response
self.explanation = explanation self.explanation = explanation
if self.explanation is None and response.content:
self.explanation = response.content.strip()
def __str__(self): def __str__(self):
message = super().__str__() message = super(APIError, self).__str__()
if self.is_client_error(): if self.is_client_error():
message = ( message = '{0} Client Error: {1}'.format(
f'{self.response.status_code} Client Error for ' self.response.status_code, self.response.reason)
f'{self.response.url}: {self.response.reason}'
)
elif self.is_server_error(): elif self.is_server_error():
message = ( message = '{0} Server Error: {1}'.format(
f'{self.response.status_code} Server Error for ' self.response.status_code, self.response.reason)
f'{self.response.url}: {self.response.reason}'
)
if self.explanation: if self.explanation:
message = f'{message} ("{self.explanation}")' message = '{0} ("{1}")'.format(message, self.explanation)
return message return message
@property
def status_code(self):
if self.response is not None:
return self.response.status_code
def is_error(self):
return self.is_client_error() or self.is_server_error()
def is_client_error(self): def is_client_error(self):
if self.status_code is None: return 400 <= self.response.status_code < 500
return False
return 400 <= self.status_code < 500
def is_server_error(self): def is_server_error(self):
if self.status_code is None: return 500 <= self.response.status_code < 600
return False
return 500 <= self.status_code < 600
class NotFound(APIError): class DockerException(Exception):
pass pass
class ImageNotFound(NotFound): class NotFound(APIError):
pass pass
@ -109,10 +69,6 @@ class InvalidConfigFile(DockerException):
pass pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException): class DeprecatedMethod(DockerException):
pass pass
@ -130,80 +86,3 @@ class TLSParameterError(DockerException):
class NullResource(DockerException, ValueError): class NullResource(DockerException, ValueError):
pass pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(self, container, exit_status, command, image, stderr):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = f": {stderr}" if stderr is not None else ""
super().__init__(
f"Command '{command}' in image '{image}' "
f"returned non-zero exit status {exit_status}{err}"
)
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason
class BuildError(DockerException):
def __init__(self, reason, build_log):
super().__init__(reason)
self.msg = reason
self.build_log = build_log
class ImageLoadError(DockerException):
pass
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
class MissingContextParameter(DockerException):
def __init__(self, param):
self.param = param
def __str__(self):
return (f"missing parameter: {self.param}")
class ContextAlreadyExists(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context {self.name} already exists")
class ContextException(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return (self.msg)
class ContextNotFound(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context '{self.name}' not found")

View File

@ -1,70 +0,0 @@
from ..api import APIClient
from .resource import Collection, Model
class Config(Model):
"""A config."""
id_attribute = 'ID'
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this config.
Raises:
:py:class:`docker.errors.APIError`
If config failed to remove.
"""
return self.client.api.remove_config(self.id)
class ConfigCollection(Collection):
"""Configs on the Docker server."""
model = Config
def create(self, **kwargs):
obj = self.client.api.create_config(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_config.__doc__
def get(self, config_id):
"""
Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_config(config_id))
def list(self, **kwargs):
"""
List configs. Similar to the ``docker config ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Config`): The configs.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.configs(**kwargs)
return [self.prepare_model(obj) for obj in resp]

File diff suppressed because it is too large Load Diff

View File

@ -1,505 +0,0 @@
import itertools
import re
import warnings
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError, InvalidArgument
from ..utils import parse_repository_tag
from ..utils.json_stream import json_stream
from .resource import Collection, Model
class Image(Model):
"""
An image on the server.
"""
def __repr__(self):
tag_str = "', '".join(self.tags)
return f"<{self.__class__.__name__}: '{tag_str}'>"
@property
def labels(self):
"""
The labels of an image as dictionary.
"""
result = self.attrs['Config'].get('Labels')
return result or {}
@property
def short_id(self):
"""
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
if self.id.startswith('sha256:'):
return self.id[:19]
return self.id[:12]
@property
def tags(self):
"""
The image's tags.
"""
tags = self.attrs.get('RepoTags')
if tags is None:
tags = []
return [tag for tag in tags if tag != '<none>:<none>']
def history(self):
"""
Show the history of an image.
Returns:
(list): The history of the image.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.history(self.id)
def remove(self, force=False, noprune=False):
"""
Remove this image.
Args:
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_image(
self.id,
force=force,
noprune=noprune,
)
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
f"{named} is not a valid tag for this image"
)
img = named
return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful
"""
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
class RegistryData(Model):
"""
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_name = image_name
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs['Descriptor']['digest']
@property
def short_id(self):
"""
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
return self.id[:19]
def pull(self, platform=None):
"""
Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image.
"""
repository, _ = parse_repository_tag(self.image_name)
return self.collection.pull(repository, tag=self.id, platform=platform)
def has_platform(self, platform):
"""
Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor.
"""
if platform and not isinstance(platform, dict):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
platform['variant'] = parts[2]
if len(parts) > 1:
platform['architecture'] = parts[1]
return normalize_platform(
platform, self.client.version()
) in self.attrs['Platforms']
def reload(self):
self.attrs = self.client.api.inspect_distribution(self.image_name)
reload.__doc__ = Model.reload.__doc__
class ImageCollection(Collection):
model = Image
def build(self, **kwargs):
"""
Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you already have a tar file for the Docker build context (including
a Dockerfile), pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is also
compressed, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
image that was built. The second item is a generator of the
build logs as JSON-decoded objects.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
result_stream, internal_stream = itertools.tee(json_stream(resp))
for chunk in internal_stream:
if 'error' in chunk:
raise BuildError(chunk['error'], result_stream)
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
last_event = chunk
if image_id:
return (self.get(image_id), result_stream)
raise BuildError(last_event or 'Unknown', result_stream)
def get(self, name):
"""
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_image(name))
def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.images(name=name, all=all, filters=filters)
return [self.get(r["Id"]) for r in resp]
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.load_image(data)
images = []
for chunk in resp:
if 'stream' in chunk:
match = re.search(
r'(^Loaded image ID: |^Loaded image: )(.+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
images.append(image_id)
if 'errorDetail' in chunk:
raise ImageLoadError(chunk['errorDetail']['message'])
return [self.get(i) for i in images]
def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If ``tag`` is ``None`` or empty, it is set to ``latest``.
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox', all_tags=True)
"""
repository, image_tag = parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if 'stream' in kwargs:
warnings.warn(
'`stream` is not a valid parameter for this method'
' and will be overridden',
stacklevel=1,
)
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if not all_tags:
sep = '@' if tag.startswith('sha256:') else ':'
return self.get(f'{repository}{sep}{tag}')
return self.list(repository)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
push.__doc__ = APIClient.push.__doc__
def remove(self, *args, **kwargs):
self.client.api.remove_image(*args, **kwargs)
remove.__doc__ = APIClient.remove_image.__doc__
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
def prune_builds(self, *args, **kwargs):
return self.client.api.prune_builds(*args, **kwargs)
prune_builds.__doc__ = APIClient.prune_builds.__doc__
def normalize_platform(platform, engine_info):
if platform is None:
platform = {}
if 'os' not in platform:
platform['os'] = engine_info['Os']
if 'architecture' not in platform:
platform['architecture'] = engine_info['Arch']
return platform

View File

@ -1,218 +0,0 @@
from ..api import APIClient
from ..utils import version_gte
from .containers import Container
from .resource import Collection, Model
class Network(Model):
"""
A Docker network.
"""
@property
def name(self):
"""
The name of the network.
"""
return self.attrs.get('Name')
@property
def containers(self):
"""
The containers that are connected to the network, as a list of
:py:class:`~docker.models.containers.Container` objects.
"""
return [
self.client.containers.get(cid) for cid in
(self.attrs.get('Containers') or {}).keys()
]
def connect(self, container, *args, **kwargs):
"""
Connect a container to this network.
Args:
container (str): Container to connect to this network, as either
an ID, name, or :py:class:`~docker.models.containers.Container`
object.
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linkedto this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.connect_container_to_network(
container, self.id, *args, **kwargs
)
def disconnect(self, container, *args, **kwargs):
"""
Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
)
def remove(self):
"""
Remove this network.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_network(self.id)
class NetworkCollection(Collection):
"""
Networks on the Docker server.
"""
model = Network
def create(self, name, *args, **kwargs):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(:py:class:`Network`): The network that was created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.networks.create("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.networks.create(
"network1",
driver="bridge",
ipam=ipam_config
)
"""
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
verbose (bool): Retrieve the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(
self.client.api.inspect_network(network_id, *args, **kwargs)
)
def list(self, *args, **kwargs):
"""
List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
networks = [self.prepare_model(item) for item in resp]
if greedy and version_gte(self.client.api._version, '1.28'):
for net in networks:
net.reload()
return networks
def prune(self, filters=None):
return self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__

View File

@ -1,107 +0,0 @@
from .resource import Collection, Model
class Node(Model):
"""A node in a swarm."""
id_attribute = 'ID'
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def update(self, node_spec):
"""
Update the node's configuration.
Args:
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> node.update(node_spec)
"""
return self.client.api.update_node(self.id, self.version, node_spec)
def remove(self, force=False):
"""
Remove this node from the swarm.
Args:
force (bool): Force remove an active node. Default: `False`
Returns:
`True` if the request was successful.
Raises:
:py:class:`docker.errors.NotFound`
If the node doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_node(self.id, force=force)
class NodeCollection(Collection):
"""Nodes on the Docker server."""
model = Node
def get(self, node_id):
"""
Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_node(node_id))
def list(self, *args, **kwargs):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
"""
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
]

View File

@ -1,206 +0,0 @@
from .. import errors
from .resource import Collection, Model
class Plugin(Model):
"""
A plugin on the server.
"""
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
"""
The plugin's name.
"""
return self.attrs.get('Name')
@property
def enabled(self):
"""
Whether the plugin is enabled.
"""
return self.attrs.get('Enabled')
@property
def settings(self):
"""
A dictionary representing the plugin's configuration.
"""
return self.attrs.get('Settings')
def configure(self, options):
"""
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.configure_plugin(self.name, options)
self.reload()
def disable(self, force=False):
"""
Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name, force)
self.reload()
def enable(self, timeout=0):
"""
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.enable_plugin(self.name, timeout)
self.reload()
def push(self):
"""
Push the plugin to a remote registry.
Returns:
A dict iterator streaming the status of the upload.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.push_plugin(self.name)
def remove(self, force=False):
"""
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_plugin(self.name, force=force)
def upgrade(self, remote=None):
"""
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
"""
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
yield from self.client.api.upgrade_plugin(
self.name,
remote,
privileges,
)
self.reload()
class PluginCollection(Collection):
model = Plugin
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
def get(self, name):
"""
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_plugin(name))
def install(self, remote_name, local_name=None):
"""
Pull and install a plugin.
Args:
remote_name (string): Remote reference for the plugin to
install. The ``:latest`` tag is optional, and is the
default if omitted.
local_name (string): Local name for the pulled plugin.
The ``:latest`` tag is optional, and is the default if
omitted. Optional.
Returns:
(:py:class:`Plugin`): The installed plugin
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
privileges = self.client.api.plugin_privileges(remote_name)
it = self.client.api.pull_plugin(remote_name, privileges, local_name)
for _data in it:
pass
return self.get(local_name or remote_name)
def list(self):
"""
List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.plugins()
return [self.prepare_model(r) for r in resp]

View File

@ -1,92 +0,0 @@
class Model:
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 12 characters.
"""
return self.id[:12]
def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = self.collection.get(self.id)
self.attrs = new_model.attrs
class Collection:
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
def __call__(self, *args, **kwargs):
raise TypeError(
f"'{self.__class__.__name__}' object is not callable. "
"You might be trying to use the old (pre-2.0) API - "
"use docker.APIClient if so."
)
def list(self):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception(f"Can't create {self.model.__name__} from {attrs}")

View File

@ -1,70 +0,0 @@
from ..api import APIClient
from .resource import Collection, Model
class Secret(Model):
"""A secret."""
id_attribute = 'ID'
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this secret.
Raises:
:py:class:`docker.errors.APIError`
If secret failed to remove.
"""
return self.client.api.remove_secret(self.id)
class SecretCollection(Collection):
"""Secrets on the Docker server."""
model = Secret
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
def get(self, secret_id):
"""
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_secret(secret_id))
def list(self, **kwargs):
"""
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp]

View File

@ -1,390 +0,0 @@
import copy
from docker.errors import InvalidArgument, create_unexpected_kwargs_error
from docker.types import ContainerSpec, Placement, ServiceMode, TaskTemplate
from .resource import Collection, Model
class Service(Model):
"""A service."""
id_attribute = 'ID'
@property
def name(self):
"""The service's name."""
return self.attrs['Spec']['Name']
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def remove(self):
"""
Stop and remove the service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_service(self.id)
def tasks(self, filters=None):
"""
List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
:py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
filters['service'] = self.id
return self.client.api.tasks(filters=filters)
def update(self, **kwargs):
"""
Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# Image is required, so if it hasn't been set, use current image
if 'image' not in kwargs:
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
if kwargs.get('force_update') is True:
task_template = self.attrs['Spec']['TaskTemplate']
current_value = int(task_template.get('ForceUpdate', 0))
kwargs['force_update'] = current_value + 1
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(
self.id,
self.version,
**create_kwargs
)
def logs(self, **kwargs):
"""
Get log stream for the service.
Note: This method works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
Returns:
generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
)
return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
def scale(self, replicas):
"""
Scale service container.
Args:
replicas (int): The number of containers that should be running.
Returns:
bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
raise InvalidArgument('Cannot scale a global container')
service_mode = ServiceMode('replicated', replicas)
return self.client.api.update_service(self.id, self.version,
mode=service_mode,
fetch_current_spec=True)
def force_update(self):
"""
Force update the service even if no changes require it.
Returns:
bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
class ServiceCollection(Collection):
"""Services on the Docker server."""
model = Service
def create(self, image, command=None, **kwargs):
"""
Create a service. Similar to the ``docker service create`` command.
Args:
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
constraints (list of str): :py:class:`~docker.types.Placement`
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
init (boolean): Run an init inside the container that forwards
signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
mode (ServiceMode): Scheduling mode for the service.
Default:``None``
mounts (list of str): Mounts for the containers, in the form
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
groups (:py:class:`list`): A list of additional groups that the
container process will run as.
open_stdin (boolean): Open ``stdin``
read_only (boolean): Mount the container's root filesystem as read
only.
stop_signal (string): Set signal to stop the service's containers
healthcheck (Healthcheck): Healthcheck
configuration for this service.
hosts (:py:class:`dict`): A set of host to IP mappings to add to
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
configs (:py:class:`list`): List of
:py:class:`~docker.types.ConfigReference` that will be exposed
to the service.
privileges (Privileges): Security options for the service's
containers.
cap_add (:py:class:`list`): A list of kernel capabilities to add to
the default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
from the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to the
container
Returns:
:py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
kwargs['image'] = image
kwargs['command'] = command
create_kwargs = _get_create_service_kwargs('create', kwargs)
service_id = self.client.api.create_service(**create_kwargs)
return self.get(service_id)
def get(self, service_id, insert_defaults=None):
"""
Get a service.
Args:
service_id (str): The ID of the service.
insert_defaults (boolean): If true, default values will be merged
into the output.
Returns:
:py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.InvalidVersion`
If one of the arguments is not supported with the current
API version.
"""
return self.prepare_model(
self.client.api.inspect_service(service_id, insert_defaults)
)
def list(self, **kwargs):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns:
list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return [
self.prepare_model(s)
for s in self.client.api.services(**kwargs)
]
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
'cap_add',
'cap_drop',
'command',
'configs',
'dns_config',
'env',
'groups',
'healthcheck',
'hostname',
'hosts',
'image',
'init',
'isolation',
'labels',
'mounts',
'open_stdin',
'privileges',
'read_only',
'secrets',
'stop_grace_period',
'stop_signal',
'tty',
'user',
'workdir',
'sysctls',
]
# kwargs to copy straight over to TaskTemplate
TASK_TEMPLATE_KWARGS = [
'networks',
'resources',
'restart_policy',
]
# kwargs to copy straight over to create_service
CREATE_SERVICE_KWARGS = [
'name',
'labels',
'mode',
'update_config',
'rollback_config',
'endpoint_spec',
]
PLACEMENT_KWARGS = [
'constraints',
'preferences',
'platforms',
'maxreplicas',
]
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in CREATE_SERVICE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
container_spec_kwargs = {}
for key in copy.copy(kwargs):
if key in CONTAINER_SPEC_KWARGS:
container_spec_kwargs[key] = kwargs.pop(key)
task_template_kwargs = {}
for key in copy.copy(kwargs):
if key in TASK_TEMPLATE_KWARGS:
task_template_kwargs[key] = kwargs.pop(key)
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
placement = {}
for key in copy.copy(kwargs):
if key in PLACEMENT_KWARGS:
placement[key] = kwargs.pop(key)
placement = Placement(**placement)
task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
'Name': kwargs.pop('log_driver'),
'Options': kwargs.pop('log_driver_options', {})
}
if func_name == 'update':
if 'force_update' in kwargs:
task_template_kwargs['force_update'] = kwargs.pop('force_update')
# fetch the current spec by default if updating the service
# through the model
fetch_current_spec = kwargs.pop('fetch_current_spec', True)
create_kwargs['fetch_current_spec'] = fetch_current_spec
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error(func_name, kwargs)
container_spec = ContainerSpec(**container_spec_kwargs)
task_template_kwargs['container_spec'] = container_spec
create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
return create_kwargs

View File

@ -1,190 +0,0 @@
from docker.api import APIClient
from docker.errors import APIError
from .resource import Model
class Swarm(Model):
"""
The server's Swarm state. This a singleton that must be reloaded to get
the current state of the Swarm.
"""
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
except APIError as e:
# FIXME: https://github.com/docker/docker/issues/29192
if e.response.status_code not in (406, 503):
raise
@property
def version(self):
"""
The version number of the swarm. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def get_unlock_key(self):
return self.client.api.get_unlock_key()
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, default_addr_pool=None,
subnet_size=None, data_path_addr=None, data_path_port=None,
**kwargs):
"""
Initialize a new swarm on this Engine.
Args:
advertise_addr (str): Externally reachable address advertised to
other nodes. This can either be an address/port combination in
the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used.
If not specified, it will be automatically detected when
possible.
listen_addr (str): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
default_addr_pool (list of str): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_ca (dict): Configuration for forwarding signing requests
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
"""
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
'force_new_cluster': force_new_cluster,
'default_addr_pool': default_addr_pool,
'subnet_size': subnet_size,
'data_path_addr': data_path_addr,
'data_path_port': data_path_port,
}
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
node_id = self.client.api.init_swarm(**init_kwargs)
self.reload()
return node_id
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
join.__doc__ = APIClient.join_swarm.__doc__
def leave(self, *args, **kwargs):
return self.client.api.leave_swarm(*args, **kwargs)
leave.__doc__ = APIClient.leave_swarm.__doc__
def reload(self):
"""
Inspect the swarm on the server and store the response in
:py:attr:`attrs`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.attrs = self.client.api.inspect_swarm()
def unlock(self, key):
return self.client.api.unlock_swarm(key)
unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False,
rotate_manager_unlock_key=False, **kwargs):
"""
Update the swarm's configuration.
It takes the same arguments as :py:meth:`init`, except
``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
addition, it takes these arguments:
Args:
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# this seems to have to be set
if kwargs.get('node_cert_expiry') is None:
kwargs['node_cert_expiry'] = 7776000000000000
return self.client.api.update_swarm(
version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
rotate_manager_token=rotate_manager_token,
rotate_manager_unlock_key=rotate_manager_unlock_key
)

View File

@ -1,99 +0,0 @@
from ..api import APIClient
from .resource import Collection, Model
class Volume(Model):
"""A volume."""
id_attribute = 'Name'
@property
def name(self):
"""The name of the volume."""
return self.attrs['Name']
def remove(self, force=False):
"""
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
return self.client.api.remove_volume(self.id, force=force)
class VolumeCollection(Collection):
"""Volumes on the Docker server."""
model = Volume
def create(self, name=None, **kwargs):
"""
Create a volume.
Args:
name (str): Name of the volume. If not specified, the engine
generates a name.
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(:py:class:`Volume`): The volume created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.volumes.create(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
"""
obj = self.client.api.create_volume(name, **kwargs)
return self.prepare_model(obj)
def get(self, volume_id):
"""
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_volume(volume_id))
def list(self, **kwargs):
"""
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']]
def prune(self, filters=None):
return self.client.api.prune_volumes(filters=filters)
prune.__doc__ = APIClient.prune_volumes.__doc__

View File

@ -0,0 +1 @@
from .ssladapter import SSLAdapter # flake8: noqa

View File

@ -0,0 +1,57 @@
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return StrictVersion(urllib_ver) > StrictVersion('1.5')

View File

@ -1,32 +1,35 @@
import os import os
import ssl
from . import errors from . import errors
from .ssladapter import ssladapter
class TLSConfig: class TLSConfig(object):
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be a bool or a path to a CA cert
file to verify against. If ``True``, verify using ca_cert;
if ``False`` or not specified, do not verify.
"""
cert = None cert = None
ca_cert = None ca_cert = None
verify = None verify = None
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None): def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with # Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/ # https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls' # This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by # here, but also disable any public/default CA pool verification by
# leaving verify=False # leaving tls_verify=False
# "client_cert" must have both or neither cert/key files. In self.assert_hostname = assert_hostname
# either case, Alert the user when both are expected, but any are self.assert_fingerprint = assert_fingerprint
# TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
# ways: https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
# missing. # missing.
if client_cert: if client_cert:
@ -34,15 +37,15 @@ class TLSConfig:
tls_cert, tls_key = client_cert tls_cert, tls_key = client_cert
except ValueError: except ValueError:
raise errors.TLSParameterError( raise errors.TLSParameterError(
'client_cert must be a tuple of' 'client_config must be a tuple of'
' (client certificate, key file)' ' (client certificate, key file)'
) from None )
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)): not os.path.isfile(tls_key)):
raise errors.TLSParameterError( raise errors.TLSParameterError(
'Path to a certificate and key files must be provided' 'Path to a certificate and key files must be provided'
' through the client_cert param' ' through the client_config param'
) )
self.cert = (tls_cert, tls_key) self.cert = (tls_cert, tls_key)
@ -51,13 +54,12 @@ class TLSConfig:
self.ca_cert = ca_cert self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError( raise errors.TLSParameterError(
'Invalid CA certificate provided for `ca_cert`.' 'Invalid CA certificate provided for `tls_ca_cert`.'
) )
def configure_client(self, client): def configure_client(self, client):
""" client.ssl_version = self.ssl_version
Configure a client with these TLS options.
"""
if self.verify and self.ca_cert: if self.verify and self.ca_cert:
client.verify = self.ca_cert client.verify = self.ca_cert
else: else:
@ -65,3 +67,9 @@ class TLSConfig:
if self.cert: if self.cert:
client.cert = self.cert client.cert = self.cert
client.mount('https://', ssladapter.SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
))

View File

@ -1,12 +0,0 @@
from .unixconn import UnixHTTPAdapter
try:
from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
try:
from .sshconn import SSHHTTPAdapter
except ImportError:
pass

View File

@ -1,13 +0,0 @@
import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
super().close()
if hasattr(self, 'pools'):
self.pools.clear()
# Fix for requests 2.32.2+:
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
return self.get_connection(request.url, proxies)

View File

@ -1,102 +0,0 @@
import queue
import requests.adapters
import urllib3
import urllib3.connection
from .. import constants
from .basehttpadapter import BaseHTTPAdapter
from .npipesocket import NpipeSocket
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(urllib3.connection.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self):
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
) from None
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout',
'max_pool_size']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View File

@ -1,230 +0,0 @@
import functools
import io
import time
import pywintypes
import win32api
import win32event
import win32file
import win32pipe
cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self._closed:
raise RuntimeError(
'Can not reuse socket after connection was closed.'
)
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
(cSECURITY_ANONYMOUS
| cSECURITY_SQOS_PRESENT
| win32file.FILE_FLAG_OVERLAPPED),
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if (retry_count < MAXIMUM_RETRY_COUNT):
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf,
overlapped
)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def send(self, string, flags=0):
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
win32file.WriteFile(self._handle, string, overlapped)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32event.INFINITE
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
else:
# Timeout mode - Value converted to milliseconds
self._timeout = int(value * 1000)
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket):
self.sock = npipe_socket
def close(self):
super().close()
self.sock = None
def fileno(self):
return self.sock.fileno()
def isatty(self):
return False
def readable(self):
return True
def readinto(self, buf):
return self.sock.recv_into(buf)
def seekable(self):
return False
def writable(self):
return False

View File

@ -1,250 +0,0 @@
import logging
import os
import queue
import signal
import socket
import subprocess
import urllib.parse
import paramiko
import requests.adapters
import urllib3
import urllib3.connection
from .. import constants
from .basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(urllib3.connection.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
) from None
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
"username": base_url.username
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super().close()
if self.ssh_client:
self.ssh_client.close()

View File

@ -1,86 +0,0 @@
import socket
import requests.adapters
import urllib3
import urllib3.connection
from .. import constants
from .basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(urllib3.connection.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(
self.base_url, self.socket_path, self.timeout
)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout',
'max_pool_size']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = f"/{socket_path}"
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url

View File

@ -1,24 +0,0 @@
from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ConfigReference,
ContainerSpec,
DNSConfig,
DriverConfig,
EndpointSpec,
Mount,
NetworkAttachmentConfig,
Placement,
PlacementPreference,
Privileges,
Resources,
RestartPolicy,
RollbackConfig,
SecretReference,
ServiceMode,
TaskTemplate,
UpdateConfig,
)
from .swarm import SwarmExternalCA, SwarmSpec

View File

@ -1,4 +0,0 @@
class DictType(dict):
def __init__(self, init):
for k, v in init.items():
self[k] = v

View File

@ -1,790 +0,0 @@
from .. import errors
from ..utils.utils import (
convert_port_bindings,
convert_tmpfs_mounts,
convert_volume_binds,
format_environment,
format_extra_hosts,
normalize_links,
parse_bytes,
parse_devices,
split_command,
version_gte,
version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class LogConfig(DictType):
"""
Configure logging for a container, when provided as an argument to
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
You may refer to the
`official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
for more information.
Args:
type (str): Indicate which log driver to use. A set of valid drivers
is provided as part of the :py:attr:`LogConfig.types`
enum. Other values may be accepted depending on the engine version
and available logging plugins.
config (dict): A driver-dependent configuration dictionary. Please
refer to the driver's documentation for a list of valid config
keys.
Example:
>>> from docker.types import LogConfig
>>> lc = LogConfig(type=LogConfig.types.JSON, config={
... 'max-size': '1g',
... 'labels': 'production_status,geo'
... })
>>> hc = client.create_host_config(log_config=lc)
>>> container = client.create_container('busybox', 'true',
... host_config=hc)
>>> client.inspect_container(container)['HostConfig']['LogConfig']
{
'Type': 'json-file',
'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
}
"""
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super().__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
""" Set a the value for ``key`` to ``value`` inside the ``config``
dict.
"""
self.config[key] = value
def unset_config(self, key):
""" Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
class Ulimit(DictType):
"""
Create a ulimit declaration to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
Example:
>>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
>>> hc = client.create_host_config(ulimits=[nproc_limit])
>>> container = client.create_container(
'busybox', 'true', host_config=hc
)
>>> client.inspect_container(container)['HostConfig']['Ulimits']
[{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
"""
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
class DeviceRequest(DictType):
"""
Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(self, **kwargs):
driver = kwargs.get('driver', kwargs.get('Driver'))
count = kwargs.get('count', kwargs.get('Count'))
device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
options = kwargs.get('options', kwargs.get('Options'))
if driver is None:
driver = ''
elif not isinstance(driver, str):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
elif not isinstance(count, int):
raise ValueError('DeviceRequest.count must be an integer')
if device_ids is None:
device_ids = []
elif not isinstance(device_ids, list):
raise ValueError('DeviceRequest.device_ids must be a list')
if capabilities is None:
capabilities = []
elif not isinstance(capabilities, list):
raise ValueError('DeviceRequest.capabilities must be a list')
if options is None:
options = {}
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
super().__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
'Capabilities': capabilities,
'Options': options
})
@property
def driver(self):
return self['Driver']
@driver.setter
def driver(self, value):
self['Driver'] = value
@property
def count(self):
return self['Count']
@count.setter
def count(self, value):
self['Count'] = value
@property
def device_ids(self):
return self['DeviceIDs']
@device_ids.setter
def device_ids(self, value):
self['DeviceIDs'] = value
@property
def capabilities(self):
return self['Capabilities']
@capabilities.setter
def capabilities(self, value):
self['Capabilities'] = value
@property
def options(self):
return self['Options']
@options.setter
def options(self, value):
self['Options'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, uts_mode=None,
pids_limit=None, isolation=None, auto_remove=False,
storage_opt=None, init=None, init_path=None,
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None, device_requests=None,
cgroupns=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode == 'host' and port_bindings:
raise host_config_incompatible_error(
'network_mode', 'host', 'port_bindings'
)
self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in sysctls.items():
self['Sysctls'][k] = str(v)
if volumes_from is not None:
if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = format_extra_hosts(extra_hosts)
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for lmt in ulimits:
if not isinstance(lmt, Ulimit):
lmt = Ulimit(**lmt)
self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
if cpu_rt_period:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_period', '1.25')
if not isinstance(cpu_rt_period, int):
raise host_config_type_error(
'cpu_rt_period', cpu_rt_period, 'int'
)
self['CPURealtimePeriod'] = cpu_rt_period
if cpu_rt_runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_runtime', '1.25')
if not isinstance(cpu_rt_runtime, int):
raise host_config_type_error(
'cpu_rt_runtime', cpu_rt_runtime, 'int'
)
self['CPURealtimeRuntime'] = cpu_rt_runtime
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if uts_mode:
if uts_mode != "host":
raise host_config_value_error("uts_mode", uts_mode)
self['UTSMode'] = uts_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
if auto_remove:
if version_lt(version, '1.25'):
raise host_config_version_error('auto_remove', '1.25')
self['AutoRemove'] = auto_remove
if storage_opt is not None:
if version_lt(version, '1.24'):
raise host_config_version_error('storage_opt', '1.24')
self['StorageOpt'] = storage_opt
if init is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init', '1.25')
self['Init'] = init
if init_path is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init_path', '1.25')
if version_gte(version, '1.29'):
# https://github.com/moby/moby/pull/32470
raise host_config_version_error('init_path', '1.29', False)
self['InitPath'] = init_path
if volume_driver is not None:
self['VolumeDriver'] = volume_driver
if cpu_count:
if not isinstance(cpu_count, int):
raise host_config_type_error('cpu_count', cpu_count, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_count', '1.25')
self['CpuCount'] = cpu_count
if cpu_percent:
if not isinstance(cpu_percent, int):
raise host_config_type_error('cpu_percent', cpu_percent, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_percent', '1.25')
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
self['NanoCpus'] = nano_cpus
if runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
if mounts is not None:
if version_lt(version, '1.30'):
raise host_config_version_error('mounts', '1.30')
self['Mounts'] = mounts
if device_cgroup_rules is not None:
if version_lt(version, '1.28'):
raise host_config_version_error('device_cgroup_rules', '1.28')
if not isinstance(device_cgroup_rules, list):
raise host_config_type_error(
'device_cgroup_rules', device_cgroup_rules, 'list'
)
self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
if cgroupns:
self['CgroupnsMode'] = cgroupns
def host_config_type_error(param, param_value, expected):
return TypeError(
f'Invalid type for {param} param: expected {expected} '
f'but found {type(param_value)}'
)
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
return errors.InvalidVersion(
f'{param} param is not supported in API versions {operator} {version}',
)
def host_config_value_error(param, param_value):
return ValueError(f'Invalid value for {param} param: {param_value}')
def host_config_incompatible_error(param, param_value, incompatible_param):
return errors.InvalidArgument(
f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
)
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, ports=None, environment=None,
volumes=None, network_disabled=False, entrypoint=None,
working_dir=None, domainname=None, host_config=None, mac_address=None,
labels=None, stop_signal=None, networking_config=None,
healthcheck=None, stop_timeout=None, runtime=None
):
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
'stop_timeout was only introduced in API version 1.25'
)
if healthcheck is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
raise errors.InvalidVersion(
'healthcheck start period was introduced in API '
'version 1.29'
)
if isinstance(command, str):
command = split_command(command)
if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Image': image,
'Volumes': volumes,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'WorkingDir': working_dir,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
'Runtime': runtime
})

View File

@ -1,71 +0,0 @@
import socket
import urllib3
from ..errors import DockerException
class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
def __init__(self, stream, response):
self._stream = stream
self._response = response
def __iter__(self):
return self
def __next__(self):
try:
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration from None
except OSError:
raise StopIteration from None
next = __next__
def close(self):
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp
if hasattr(sock_fp, 'raw'):
sock_raw = sock_fp.raw
if hasattr(sock_raw, 'sock'):
sock = sock_raw.sock
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
)
else:
sock = sock_fp._sock
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()

View File

@ -1,88 +0,0 @@
from .base import DictType
class Healthcheck(DictType):
"""
Defines a healthcheck configuration for a container or service.
Args:
test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
interval (int): The time to wait between checks in nanoseconds. It
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
timeout = kwargs.get('timeout', kwargs.get('Timeout'))
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
'Retries': retries,
'StartPeriod': start_period
})
@property
def test(self):
return self['Test']
@test.setter
def test(self, value):
if isinstance(value, str):
value = ["CMD-SHELL", value]
self['Test'] = value
@property
def interval(self):
return self['Interval']
@interval.setter
def interval(self, value):
self['Interval'] = value
@property
def timeout(self):
return self['Timeout']
@timeout.setter
def timeout(self, value):
self['Timeout'] = value
@property
def retries(self):
return self['Retries']
@retries.setter
def retries(self, value):
self['Retries'] = value
@property
def start_period(self):
return self['StartPeriod']
@start_period.setter
def start_period(self, value):
self['StartPeriod'] = value

View File

@ -1,128 +0,0 @@
from .. import errors
from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
ipv6_address=None, link_local_ips=None, driver_opt=None,
mac_address=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
)
if aliases:
self["Aliases"] = aliases
if links:
self["Links"] = normalize_links(links)
ipam_config = {}
if ipv4_address:
ipam_config['IPv4Address'] = ipv4_address
if ipv6_address:
ipam_config['IPv6Address'] = ipv6_address
if mac_address:
if version_lt(version, '1.25'):
raise errors.InvalidVersion(
'mac_address is not supported for API version < 1.25'
)
self['MacAddress'] = mac_address
if link_local_ips is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'link_local_ips is not supported for API version < 1.24'
)
ipam_config['LinkLocalIPs'] = link_local_ips
if ipam_config:
self['IPAMConfig'] = ipam_config
if driver_opt:
if version_lt(version, '1.32'):
raise errors.InvalidVersion(
'DriverOpts is not supported for API version < 1.32'
)
if not isinstance(driver_opt, dict):
raise TypeError('driver_opt must be a dictionary')
self['DriverOpts'] = driver_opt
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):
if endpoints_config:
self["EndpointsConfig"] = endpoints_config
class IPAMConfig(dict):
"""
Create an IPAM (IP Address Management) config dictionary to be used with
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
Args:
driver (str): The IPAM driver to use. Defaults to ``default``.
pool_configs (:py:class:`list`): A list of pool configurations
(:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
options (dict): Driver options as a key-value dictionary.
Defaults to `None`.
Example:
>>> ipam_config = docker.types.IPAMConfig(driver='default')
>>> network = client.create_network('network1', ipam=ipam_config)
"""
def __init__(self, driver='default', pool_configs=None, options=None):
self.update({
'Driver': driver,
'Config': pool_configs or []
})
if options:
if not isinstance(options, dict):
raise TypeError('IPAMConfig options must be a dictionary')
self['Options'] = options
class IPAMPool(dict):
"""
Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:class:`~docker.types.IPAMConfig`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Example:
>>> ipam_pool = docker.types.IPAMPool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool])
"""
def __init__(self, subnet=None, iprange=None, gateway=None,
aux_addresses=None):
self.update({
'Subnet': subnet,
'IPRange': iprange,
'Gateway': gateway,
'AuxiliaryAddresses': aux_addresses
})

View File

@ -1,870 +0,0 @@
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
check_resource,
convert_service_networks,
format_environment,
format_extra_hosts,
parse_bytes,
split_command,
)
class TaskTemplate(dict):
"""
Describe the task specification to be used when creating or updating a
service.
Args:
container_spec (ContainerSpec): Container settings for containers
started as part of this task.
log_driver (DriverConfig): Log configuration for containers created as
part of the service.
resources (Resources): Resource requirements which apply to each
individual container created as part of the service.
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None, networks=None,
force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
if isinstance(placement, list):
placement = Placement(constraints=placement)
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
if networks:
self['Networks'] = convert_service_networks(networks)
if force_update is not None:
if not isinstance(force_update, int):
raise TypeError('force_update must be an integer')
self['ForceUpdate'] = force_update
@property
def container_spec(self):
return self.get('ContainerSpec')
@property
def resources(self):
return self.get('Resources')
@property
def restart_policy(self):
return self.get('RestartPolicy')
@property
def placement(self):
return self.get('Placement')
class ContainerSpec(dict):
"""
Describes the behavior of containers that are part of a task, and is used
when declaring a :py:class:`~docker.types.TaskTemplate`.
Args:
image (string): The image name to use for the container.
command (string or list): The command to be run in the image.
args (:py:class:`list`): Arguments to the command.
hostname (string): The hostname to set on the container.
env (dict): Environment variables.
workdir (string): The working directory for commands to run in.
user (string): The user inside the container.
labels (dict): A map of labels to associate with the service.
mounts (:py:class:`list`): A list of specifications for mounts to be
added to containers created as part of the service. See the
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
made available inside the containers.
tty (boolean): Whether a pseudo-TTY should be allocated.
groups (:py:class:`list`): A list of additional groups that the
container process will run as.
open_stdin (boolean): Open ``stdin``
read_only (boolean): Mount the container's root filesystem as read
only.
stop_signal (string): Set signal to stop the service's containers
healthcheck (Healthcheck): Healthcheck
configuration for this service.
hosts (:py:class:`dict`): A set of host to IP mappings to add to
the container's ``hosts`` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
configs (:py:class:`list`): List of :py:class:`ConfigReference` that
will be exposed to the service.
privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
cap_add (:py:class:`list`): A list of kernel capabilities to add to the
default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to
the container
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
privileges=None, isolation=None, init=None, cap_add=None,
cap_drop=None, sysctls=None):
self['Image'] = image
if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
if hostname is not None:
self['Hostname'] = hostname
if env is not None:
if isinstance(env, dict):
self['Env'] = format_environment(env)
else:
self['Env'] = env
if workdir is not None:
self['Dir'] = workdir
if user is not None:
self['User'] = user
if groups is not None:
self['Groups'] = groups
if stop_signal is not None:
self['StopSignal'] = stop_signal
if stop_grace_period is not None:
self['StopGracePeriod'] = stop_grace_period
if labels is not None:
self['Labels'] = labels
if hosts is not None:
self['Hosts'] = format_extra_hosts(hosts, task=True)
if mounts is not None:
parsed_mounts = []
for mount in mounts:
if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
parsed_mounts.append(mount)
self['Mounts'] = parsed_mounts
if secrets is not None:
if not isinstance(secrets, list):
raise TypeError('secrets must be a list')
self['Secrets'] = secrets
if configs is not None:
if not isinstance(configs, list):
raise TypeError('configs must be a list')
self['Configs'] = configs
if dns_config is not None:
self['DNSConfig'] = dns_config
if privileges is not None:
self['Privileges'] = privileges
if healthcheck is not None:
self['Healthcheck'] = healthcheck
if tty is not None:
self['TTY'] = tty
if open_stdin is not None:
self['OpenStdin'] = open_stdin
if read_only is not None:
self['ReadOnly'] = read_only
if isolation is not None:
self['Isolation'] = isolation
if init is not None:
self['Init'] = init
if cap_add is not None:
if not isinstance(cap_add, list):
raise TypeError('cap_add must be a list')
self['CapabilityAdd'] = cap_add
if cap_drop is not None:
if not isinstance(cap_drop, list):
raise TypeError('cap_drop must be a list')
self['CapabilityDrop'] = cap_drop
if sysctls is not None:
if not isinstance(sysctls, dict):
raise TypeError('sysctls must be a dict')
self['Sysctls'] = sysctls
class Mount(dict):
"""
Describes a mounted folder's configuration inside a container. A list of
:py:class:`Mount` would be used as part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
target (string): Container path.
source (string): Mount source (e.g. a volume name or a host path).
type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
``npipe``). Default: ``volume``.
read_only (bool): Whether the mount should be read-only.
consistency (string): The consistency requirement for the mount. One of
``default```, ``consistent``, ``cached``, ``delegated``.
propagation (string): A propagation mode with the value ``[r]private``,
``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
no_copy (bool): False if the volume should be populated with the data
from the target. Default: ``False``. Only valid for the ``volume``
type.
labels (dict): User-defined name and labels for the volume. Only valid
for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type.
subpath (str): Path inside a volume to mount instead of the volume root.
tmpfs_size (int or string): The size for the tmpfs mount in bytes.
tmpfs_mode (int): The permission mode for the tmpfs mount.
"""
def __init__(self, target, source, type='volume', read_only=False,
consistency=None, propagation=None, no_copy=False,
labels=None, driver_config=None, tmpfs_size=None,
tmpfs_mode=None, subpath=None):
self['Target'] = target
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
if consistency:
self['Consistency'] = consistency
if type == 'bind':
if propagation is not None:
self['BindOptions'] = {
'Propagation': propagation
}
if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode, subpath]):
raise errors.InvalidArgument(
'Incompatible options have been provided for the bind '
'type mount.'
)
elif type == 'volume':
volume_opts = {}
if no_copy:
volume_opts['NoCopy'] = True
if labels:
volume_opts['Labels'] = labels
if driver_config:
volume_opts['DriverConfig'] = driver_config
if subpath:
volume_opts['Subpath'] = subpath
if volume_opts:
self['VolumeOptions'] = volume_opts
if any([propagation, tmpfs_size, tmpfs_mode]):
raise errors.InvalidArgument(
'Incompatible options have been provided for the volume '
'type mount.'
)
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
tmpfs_opts['Mode'] = tmpfs_mode
if tmpfs_size:
tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size)
if tmpfs_opts:
self['TmpfsOptions'] = tmpfs_opts
if any([propagation, labels, driver_config, no_copy]):
raise errors.InvalidArgument(
'Incompatible options have been provided for the tmpfs '
'type mount.'
)
@classmethod
def parse_mount_string(cls, string):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
else:
target = parts[1]
source = parts[0]
mount_type = 'volume'
if source.startswith('/') or (
IS_WINDOWS_PLATFORM and source[0].isalpha() and
source[1] == ':'
):
# FIXME: That windows condition will fail earlier since we
# split on ':'. We should look into doing a smarter split
# if we detect we are on Windows.
mount_type = 'bind'
read_only = not (len(parts) == 2 or parts[2] == 'rw')
return cls(target, source, read_only=read_only, type=mount_type)
class Resources(dict):
"""
Configures resource allocation for containers when made part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
cpu_limit (int): CPU limit in units of 10^9 CPU shares.
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
generic_resources (dict or :py:class:`list`): Node level generic
resources, for example a GPU, using the following format:
``{ resource_name: resource_value }``. Alternatively, a list of
of resource specifications as defined by the Engine API.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None, generic_resources=None):
limits = {}
reservation = {}
if cpu_limit is not None:
limits['NanoCPUs'] = cpu_limit
if mem_limit is not None:
limits['MemoryBytes'] = mem_limit
if cpu_reservation is not None:
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
if generic_resources is not None:
reservation['GenericResources'] = (
_convert_generic_resources_dict(generic_resources)
)
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
def _convert_generic_resources_dict(generic_resources):
if isinstance(generic_resources, list):
return generic_resources
if not isinstance(generic_resources, dict):
raise errors.InvalidArgument(
'generic_resources must be a dict or a list '
f'(found {type(generic_resources)})'
)
resources = []
for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
elif isinstance(value, str):
resource_type = 'NamedResourceSpec'
else:
kv = {kind: value}
raise errors.InvalidArgument(
f'Unsupported generic resource reservation type: {kv}'
)
resources.append({
resource_type: {'Kind': kind, 'Value': value}
})
return resources
class UpdateConfig(dict):
"""
Used to specify the way container updates should be performed by a service.
Args:
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue``, ``pause``, as well as ``rollback`` since API v1.28.
Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out an
updated task. Either ``start-first`` or ``stop-first`` are accepted.
"""
def __init__(self, parallelism=0, delay=None, failure_action='continue',
monitor=None, max_failure_ratio=None, order=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue', 'rollback'):
raise errors.InvalidArgument(
'failure_action must be one of `pause`, `continue`, `rollback`'
)
self['FailureAction'] = failure_action
if monitor is not None:
if not isinstance(monitor, int):
raise TypeError('monitor must be an integer')
self['Monitor'] = monitor
if max_failure_ratio is not None:
if not isinstance(max_failure_ratio, (float, int)):
raise TypeError('max_failure_ratio must be a float')
if max_failure_ratio > 1 or max_failure_ratio < 0:
raise errors.InvalidArgument(
'max_failure_ratio must be a number between 0 and 1'
)
self['MaxFailureRatio'] = max_failure_ratio
if order is not None:
if order not in ('start-first', 'stop-first'):
raise errors.InvalidArgument(
'order must be either `start-first` or `stop-first`'
)
self['Order'] = order
class RollbackConfig(UpdateConfig):
"""
Used to specify the way container rollbacks should be performed by a
service
Args:
parallelism (int): Maximum number of tasks to be rolled back in one
iteration (0 means unlimited parallelism). Default: 0
delay (int): Amount of time between rollbacks, in nanoseconds.
failure_action (string): Action to take if a rolled back task fails to
run, or stops running during the rollback. Acceptable values are
``continue``, ``pause`` or ``rollback``.
Default: ``continue``
monitor (int): Amount of time to monitor each rolled back task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
a rollback before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out a
rolled back task. Either ``start-first`` or ``stop-first`` are
accepted.
"""
pass
class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
'any',
)
NONE, ON_FAILURE, ANY = _values
class RestartPolicy(dict):
"""
Used when creating a :py:class:`~docker.types.ContainerSpec`,
dictates whether a container should restart after stopping or failing.
Args:
condition (string): Condition for restart (``none``, ``on-failure``,
or ``any``). Default: `none`.
delay (int): Delay between restart attempts. Default: 0
max_attempts (int): Maximum attempts to restart a given container
before giving up. Default value is 0, which is ignored.
window (int): Time window used to evaluate the restart policy. Default
value is 0, which is unbounded.
"""
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
self['Delay'] = delay
self['MaxAttempts'] = max_attempts
self['Window'] = window
class DriverConfig(dict):
"""
Indicates which driver to use, as well as its configuration. Can be used
as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
as the driver object in
:py:meth:`create_secret`.
Args:
name (string): Name of the driver to use.
options (dict): Driver-specific options. Default: ``None``.
"""
def __init__(self, name, options=None):
self['Name'] = name
if options:
self['Options'] = options
class EndpointSpec(dict):
"""
Describes properties to access and load-balance a service.
Args:
mode (string): The mode of resolution to use for internal load
balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
``'vip'`` if not provided.
ports (dict): Exposed ports that this service is accessible on from the
outside, in the form of ``{ published_port: target_port }`` or
``{ published_port: <port_config_tuple> }``. Port config tuple format
is ``(target_port [, protocol [, publish_mode]])``.
Ports can only be provided if the ``vip`` resolution mode is used.
"""
def __init__(self, mode=None, ports=None):
if ports:
self['Ports'] = convert_service_ports(ports)
if mode:
self['Mode'] = mode
def convert_service_ports(ports):
if isinstance(ports, list):
return ports
if not isinstance(ports, dict):
raise TypeError(
'Invalid type for ports, expected dict or list'
)
result = []
for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
}
if isinstance(v, tuple):
port_spec['TargetPort'] = v[0]
if len(v) >= 2 and v[1] is not None:
port_spec['Protocol'] = v[1]
if len(v) == 3:
port_spec['PublishMode'] = v[2]
if len(v) > 3:
raise ValueError(
'Service port configuration can have at most 3 elements: '
'(target_port, protocol, mode)'
)
else:
port_spec['TargetPort'] = v
result.append(port_spec)
return result
class ServiceMode(dict):
"""
Indicate whether a service or a job should be deployed as a replicated
or global service, and associated parameters
Args:
mode (string): Can be either ``replicated``, ``global``,
``replicated-job`` or ``global-job``
replicas (int): Number of replicas. For replicated services only.
concurrency (int): Number of concurrent jobs. For replicated job
services only.
"""
def __init__(self, mode, replicas=None, concurrency=None):
replicated_modes = ('replicated', 'replicated-job')
supported_modes = replicated_modes + ('global', 'global-job')
if mode not in supported_modes:
raise errors.InvalidArgument(
'mode must be either "replicated", "global", "replicated-job"'
' or "global-job"'
)
if mode not in replicated_modes:
if replicas is not None:
raise errors.InvalidArgument(
'replicas can only be used for "replicated" or'
' "replicated-job" mode'
)
if concurrency is not None:
raise errors.InvalidArgument(
'concurrency can only be used for "replicated-job" mode'
)
service_mode = self._convert_mode(mode)
self.mode = service_mode
self[service_mode] = {}
if replicas is not None:
if mode == 'replicated':
self[service_mode]['Replicas'] = replicas
if mode == 'replicated-job':
self[service_mode]['MaxConcurrent'] = concurrency or 1
self[service_mode]['TotalCompletions'] = replicas
@staticmethod
def _convert_mode(original_mode):
if original_mode == 'global-job':
return 'GlobalJob'
if original_mode == 'replicated-job':
return 'ReplicatedJob'
return original_mode
@property
def replicas(self):
if 'replicated' in self:
return self['replicated'].get('Replicas')
if 'ReplicatedJob' in self:
return self['ReplicatedJob'].get('TotalCompletions')
return None
class SecretReference(dict):
"""
Secret reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a secret is made accessible inside the service's
containers.
Args:
secret_id (string): Secret's ID
secret_name (string): Secret's name as defined at its creation.
filename (string): Name of the file containing the secret. Defaults
to the secret's name if not specified.
uid (string): UID of the secret file's owner. Default: 0
gid (string): GID of the secret file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource('secret_id')
def __init__(self, secret_id, secret_name, filename=None, uid=None,
gid=None, mode=0o444):
self['SecretName'] = secret_name
self['SecretID'] = secret_id
self['File'] = {
'Name': filename or secret_name,
'UID': uid or '0',
'GID': gid or '0',
'Mode': mode
}
class ConfigReference(dict):
"""
Config reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a config is made accessible inside the service's
containers.
Args:
config_id (string): Config's ID
config_name (string): Config's name as defined at its creation.
filename (string): Name of the file containing the config. Defaults
to the config's name if not specified.
uid (string): UID of the config file's owner. Default: 0
gid (string): GID of the config file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource('config_id')
def __init__(self, config_id, config_name, filename=None, uid=None,
gid=None, mode=0o444):
self['ConfigName'] = config_name
self['ConfigID'] = config_id
self['File'] = {
'Name': filename or config_name,
'UID': uid or '0',
'GID': gid or '0',
'Mode': mode
}
class Placement(dict):
"""
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
constraints (:py:class:`list` of str): A list of constraints
preferences (:py:class:`list` of tuple): Preferences provide a way
to make the scheduler aware of factors such as topology. They
are provided in order from highest to lowest precedence and
are expressed as ``(strategy, descriptor)`` tuples. See
:py:class:`PlacementPreference` for details.
maxreplicas (int): Maximum number of replicas per node
platforms (:py:class:`list` of tuple): A list of platforms
expressed as ``(arch, os)`` tuples
"""
def __init__(self, constraints=None, preferences=None, platforms=None,
maxreplicas=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
self['Preferences'] = []
for pref in preferences:
if isinstance(pref, tuple):
pref = PlacementPreference(*pref)
self['Preferences'].append(pref)
if maxreplicas is not None:
self['MaxReplicas'] = maxreplicas
if platforms:
self['Platforms'] = []
for plat in platforms:
self['Platforms'].append({
'Architecture': plat[0], 'OS': plat[1]
})
class PlacementPreference(dict):
"""
Placement preference to be used as an element in the list of
preferences for :py:class:`Placement` objects.
Args:
strategy (string): The placement strategy to implement. Currently,
the only supported strategy is ``spread``.
descriptor (string): A label descriptor. For the spread strategy,
the scheduler will try to spread tasks evenly over groups of
nodes identified by this label.
"""
def __init__(self, strategy, descriptor):
if strategy != 'spread':
raise errors.InvalidArgument(
f'PlacementPreference strategy value is invalid ({strategy}): '
'must be "spread".'
)
self['Spread'] = {'SpreadDescriptor': descriptor}
class DNSConfig(dict):
"""
Specification for DNS related configurations in resolver configuration
file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
Args:
nameservers (:py:class:`list`): The IP addresses of the name
servers.
search (:py:class:`list`): A search list for host-name lookup.
options (:py:class:`list`): A list of internal resolver variables
to be modified (e.g., ``debug``, ``ndots:3``, etc.).
"""
def __init__(self, nameservers=None, search=None, options=None):
self['Nameservers'] = nameservers
self['Search'] = search
self['Options'] = options
class Privileges(dict):
r"""
Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition.
Args:
credentialspec_file (str): Load credential spec from this file.
The file is read by the daemon, and must be present in the
CredentialSpecs subdirectory in the docker data directory,
which defaults to ``C:\ProgramData\Docker\`` on Windows.
Can not be combined with credentialspec_registry.
credentialspec_registry (str): Load credential spec from this value
in the Windows registry. The specified registry value must be
located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
\Virtualization\Containers\CredentialSpecs``.
Can not be combined with credentialspec_file.
selinux_disable (boolean): Disable SELinux
selinux_user (string): SELinux user label
selinux_role (string): SELinux role label
selinux_type (string): SELinux type label
selinux_level (string): SELinux level label
"""
def __init__(self, credentialspec_file=None, credentialspec_registry=None,
selinux_disable=None, selinux_user=None, selinux_role=None,
selinux_type=None, selinux_level=None):
credential_spec = {}
if credentialspec_registry is not None:
credential_spec['Registry'] = credentialspec_registry
if credentialspec_file is not None:
credential_spec['File'] = credentialspec_file
if len(credential_spec) > 1:
raise errors.InvalidArgument(
'credentialspec_file and credentialspec_registry are mutually'
' exclusive'
)
selinux_context = {
'Disable': selinux_disable,
'User': selinux_user,
'Role': selinux_role,
'Type': selinux_type,
'Level': selinux_level,
}
if len(credential_spec) > 0:
self['CredentialSpec'] = credential_spec
if len(selinux_context) > 0:
self['SELinuxContext'] = selinux_context
class NetworkAttachmentConfig(dict):
"""
Network attachment options for a service.
Args:
target (str): The target network for attachment.
Can be a network name or ID.
aliases (:py:class:`list`): A list of discoverable alternate names
for the service.
options (:py:class:`dict`): Driver attachment options for the
network target.
"""
def __init__(self, target, aliases=None, options=None):
self['Target'] = target
self['Aliases'] = aliases
self['DriverOpts'] = options

View File

@ -1,119 +0,0 @@
from ..errors import InvalidVersion
from ..utils import version_lt
class SwarmSpec(dict):
"""
Describe a Swarm's configuration and options. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
to instantiate.
"""
def __init__(self, version, task_history_retention_limit=None,
snapshot_interval=None, keep_old_snapshots=None,
log_entries_for_slow_followers=None, heartbeat_tick=None,
election_tick=None, dispatcher_heartbeat_period=None,
node_cert_expiry=None, external_cas=None, name=None,
labels=None, signing_ca_cert=None, signing_ca_key=None,
ca_force_rotate=None, autolock_managers=None,
log_driver=None):
if task_history_retention_limit is not None:
self['Orchestration'] = {
'TaskHistoryRetentionLimit': task_history_retention_limit
}
if any([snapshot_interval,
keep_old_snapshots,
log_entries_for_slow_followers,
heartbeat_tick,
election_tick]):
self['Raft'] = {
'SnapshotInterval': snapshot_interval,
'KeepOldSnapshots': keep_old_snapshots,
'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
'HeartbeatTick': heartbeat_tick,
'ElectionTick': election_tick
}
if dispatcher_heartbeat_period:
self['Dispatcher'] = {
'HeartbeatPeriod': dispatcher_heartbeat_period
}
ca_config = {}
if node_cert_expiry is not None:
ca_config['NodeCertExpiry'] = node_cert_expiry
if external_cas:
if version_lt(version, '1.25'):
if len(external_cas) > 1:
raise InvalidVersion(
'Support for multiple external CAs is not available '
'for API version < 1.25'
)
ca_config['ExternalCA'] = external_cas[0]
else:
ca_config['ExternalCAs'] = external_cas
if signing_ca_key:
if version_lt(version, '1.30'):
raise InvalidVersion(
'signing_ca_key is not supported in API version < 1.30'
)
ca_config['SigningCAKey'] = signing_ca_key
if signing_ca_cert:
if version_lt(version, '1.30'):
raise InvalidVersion(
'signing_ca_cert is not supported in API version < 1.30'
)
ca_config['SigningCACert'] = signing_ca_cert
if ca_force_rotate is not None:
if version_lt(version, '1.30'):
raise InvalidVersion(
'force_rotate is not supported in API version < 1.30'
)
ca_config['ForceRotate'] = ca_force_rotate
if ca_config:
self['CAConfig'] = ca_config
if autolock_managers is not None:
if version_lt(version, '1.25'):
raise InvalidVersion(
'autolock_managers is not supported in API version < 1.25'
)
self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers}
if log_driver is not None:
if version_lt(version, '1.25'):
raise InvalidVersion(
'log_driver is not supported in API version < 1.25'
)
self['TaskDefaults'] = {'LogDriver': log_driver}
if name is not None:
self['Name'] = name
if labels is not None:
self['Labels'] = labels
class SwarmExternalCA(dict):
"""
Configuration for forwarding signing requests to an external
certificate authority.
Args:
url (string): URL where certificate signing requests should be
sent.
protocol (string): Protocol for communication with the external CA.
options (dict): An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
ca_cert (string): The root CA certificate (in PEM format) this
external CA uses to issue TLS certificates (assumed to be to
the current swarm root CA certificate if not provided).
"""
def __init__(self, url, protocol=None, options=None, ca_cert=None):
self['URL'] = url
self['Protocol'] = protocol
self['Options'] = options
self['CACert'] = ca_cert

View File

@ -0,0 +1 @@
from .unixconn import UnixAdapter # flake8: noqa

View File

@ -0,0 +1,92 @@
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import requests.adapters
import socket
if six.PY3:
import http.client as httplib
else:
import httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60):
super(UnixHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(self.base_url, self.socket_path,
self.timeout)
class UnixAdapter(requests.adapters.HTTPAdapter):
def __init__(self, socket_url, timeout=60):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.pools = RecentlyUsedContainer(10,
dispose_func=lambda p: p.close())
super(UnixAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url
def close(self):
self.pools.clear()

View File

@ -1,28 +1,11 @@
from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar
from .decorators import check_resource, minimum_version, update_headers
from .utils import ( from .utils import (
compare_version, compare_version, convert_port_bindings, convert_volume_binds,
convert_filters, mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
convert_port_bindings, kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config,
convert_service_networks, create_container_config, parse_bytes, ping_registry, parse_env_file,
convert_volume_binds, version_lt, version_gte, decode_json_header, split_command,
create_host_config, create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
create_ipam_config, ) # flake8: noqa
create_ipam_pool,
datetime_to_timestamp,
decode_json_header,
format_environment,
format_extra_hosts,
kwargs_from_env,
normalize_links,
parse_bytes,
parse_devices,
parse_env_file,
parse_host,
parse_repository_tag,
split_command,
version_gte,
version_lt,
)
from .types import Ulimit, LogConfig # flake8: noqa
from .decorators import check_resource, minimum_version, update_headers #flake8: noqa

View File

@ -1,260 +0,0 @@
import io
import os
import re
import tarfile
import tempfile
from ..constants import IS_WINDOWS_PLATFORM
from .fnmatch import fnmatch
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
_TAG = re.compile(
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*"
r"(?::[0-9]+)?(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*"
r"(:[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})?$"
)
def match_tag(tag: str) -> bool:
return bool(_TAG.match(tag))
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
root = os.path.abspath(path)
exclude = exclude or []
dockerfile = dockerfile or (None, None)
extra_files = []
if dockerfile[1] is not None:
dockerignore_contents = '\n'.join(
(exclude or ['.dockerignore']) + [dockerfile[0]]
)
extra_files = [
('.dockerignore', dockerignore_contents),
dockerfile,
]
return create_archive(
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
)
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
patterns.append(f"!{dockerfile}")
pm = PatternMatcher(patterns)
return set(pm.walk(root))
def build_file_list(root):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(
longpath.replace(root, '', 1).lstrip('/')
)
return files
def create_archive(root, files=None, fileobj=None, gzip=False,
extra_files=None):
extra_files = extra_files or []
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
continue
full_path = os.path.join(root, path)
i = t.gettarinfo(full_path, arcname=path)
if i is None:
# This happens when we encounter a socket file. We can safely
# ignore it and proceed.
continue
# Workaround https://bugs.python.org/issue32713
if i.mtime < 0 or i.mtime > 8**11 - 1:
i.mtime = int(i.mtime)
if IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
if i.isfile():
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
except OSError as oe:
raise OSError(
f'Can not read file in context: {full_path}'
) from oe
else:
# Directories, FIFOs, symlinks... don't need to be read.
t.addfile(i, None)
for name, contents in extra_files:
info = tarfile.TarInfo(name)
contents_encoded = contents.encode('utf-8')
info.size = len(contents_encoded)
t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
return fileobj
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def split_path(p):
return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
def normalize_slashes(p):
if IS_WINDOWS_PLATFORM:
return '/'.join(split_path(p))
return p
def walk(root, patterns, default=True):
pm = PatternMatcher(patterns)
return pm.walk(root)
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
))
self.patterns.append(Pattern('!.dockerignore'))
def matches(self, filepath):
matched = False
parent_path = os.path.dirname(filepath)
parent_path_dirs = split_path(parent_path)
for pattern in self.patterns:
negative = pattern.exclusion
match = pattern.match(filepath)
if not match and parent_path != '':
if len(pattern.dirs) <= len(parent_path_dirs):
match = pattern.match(
os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
)
if match:
matched = not negative
return matched
def walk(self, root):
def rec_walk(current_dir):
for f in os.listdir(current_dir):
fpath = os.path.join(
os.path.relpath(current_dir, root), f
)
if fpath.startswith(f".{os.path.sep}"):
fpath = fpath[2:]
match = self.matches(fpath)
if not match:
yield fpath
cur = os.path.join(root, fpath)
if not os.path.isdir(cur) or os.path.islink(cur):
continue
if match:
# If we want to skip this file and it's a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we can't skip this dir.
skip = True
for pat in self.patterns:
if not pat.exclusion:
continue
if pat.cleaned_pattern.startswith(
normalize_slashes(fpath)):
skip = False
break
if skip:
continue
yield from rec_walk(cur)
return rec_walk(root)
class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
self.exclusion = True
pattern_str = pattern_str[1:]
self.dirs = self.normalize(pattern_str)
self.cleaned_pattern = '/'.join(self.dirs)
@classmethod
def normalize(cls, p):
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole
# pattern is only ".", as the Docker reference states: "For
# historical reasons, the pattern . is ignored."
# ".." component must be cleared with the potential previous
# component, regardless of whether it exists: "A preprocessing
# step [...] eliminates . and .. elements using Go's
# filepath.".
i = 0
split = split_path(p)
while i < len(split):
if split[i] == '..':
del split[i]
if i > 0:
del split[i - 1]
i -= 1
else:
i += 1
return split
def match(self, filepath):
return fnmatch(normalize_slashes(filepath), self.cleaned_pattern)

View File

@ -1,66 +0,0 @@
import json
import logging
import os
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
def find_config_file(config_path=None):
paths = list(filter(None, [
config_path, # 1
config_path_from_environment(), # 2
os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
return None
def config_path_from_environment():
config_dir = os.environ.get('DOCKER_CONFIG')
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir():
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get('USERPROFILE', '')
else:
return os.path.expanduser('~')
def load_general_config(config_path=None):
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
return json.load(f)
except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@ -4,21 +4,22 @@ from .. import errors
from . import utils from . import utils
def check_resource(resource_name): def check_resource(f):
def decorator(f): @functools.wraps(f)
@functools.wraps(f) def wrapped(self, resource_id=None, *args, **kwargs):
def wrapped(self, resource_id=None, *args, **kwargs): if resource_id is None:
if resource_id is None and kwargs.get(resource_name): if kwargs.get('container'):
resource_id = kwargs.pop(resource_name) resource_id = kwargs.pop('container')
if isinstance(resource_id, dict): elif kwargs.get('image'):
resource_id = resource_id.get('Id', resource_id.get('ID')) resource_id = kwargs.pop('image')
if not resource_id: if isinstance(resource_id, dict):
raise errors.NullResource( resource_id = resource_id.get('Id')
'Resource ID was not provided' if not resource_id:
) raise errors.NullResource(
return f(self, resource_id, *args, **kwargs) 'image or container param is undefined'
return wrapped )
return decorator return f(self, resource_id, *args, **kwargs)
return wrapped
def minimum_version(version): def minimum_version(version):
@ -27,7 +28,9 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version): if utils.version_lt(self._version, version):
raise errors.InvalidVersion( raise errors.InvalidVersion(
f'{f.__name__} is not available for version < {version}', '{0} is not available for version < {1}'.format(
f.__name__, version
)
) )
return f(self, *args, **kwargs) return f(self, *args, **kwargs)
return wrapper return wrapper
@ -36,10 +39,10 @@ def minimum_version(version):
def update_headers(f): def update_headers(f):
def inner(self, *args, **kwargs): def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._general_configs: if 'HttpHeaders' in self._auth_configs:
if not kwargs.get('headers'): if 'headers' not in kwargs:
kwargs['headers'] = self._general_configs['HttpHeaders'] kwargs['headers'] = self._auth_configs['HttpHeaders']
else: else:
kwargs['headers'].update(self._general_configs['HttpHeaders']) kwargs['headers'].update(self._auth_configs['HttpHeaders'])
return f(self, *args, **kwargs) return f(self, *args, **kwargs)
return inner return inner

View File

@ -1,115 +0,0 @@
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
_MAXCACHE = 100
def _purge():
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
pat = pat.lower()
return fnmatchcase(name, pat)
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = '^'
while i < n:
c = pat[i]
i = i + 1
if c == '*':
if i < n and pat[i] == '*':
# is some flavor of "**"
i = i + 1
# Treat **/ as ** so eat the "/"
if i < n and pat[i] == '/':
i = i + 1
if i >= n:
# is "**EOF" - to align with .gitignore just accept all
res = f"{res}.*"
else:
# is "**"
# Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s
res = f"{res}(.*/)?"
else:
# is "*" so map it to anything but "/"
res = f"{res}[^/]*"
elif c == '?':
# "?" is any char except "/"
res = f"{res}[^/]"
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = f"{res}\\["
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = f"^{stuff[1:]}"
elif stuff[0] == '^':
stuff = f"\\{stuff}"
res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)
return f"{res}$"

View File

@ -1,74 +0,0 @@
import json
import json.decoder
from ..errors import StreamParseError
json_decoder = json.JSONDecoder()
def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator='\n'):
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = ''
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e) from e

View File

@ -1,83 +0,0 @@
import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
def add_port_mapping(port_bindings, internal_port, external):
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(port_bindings, internal_port_range, external_range):
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
ports = zip(internal_port_range, external_range)
for internal_port, external_port in ports:
add_port_mapping(port_bindings, internal_port, external_port)
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def _raise_invalid_port(port):
raise ValueError('Invalid port "%s", should be '
'[[remote_ip:]remote_port[-remote_port]:]'
'port[/protocol]' % port)
def port_range(start, end, proto, randomly_available_port=False):
if not start:
return start
if not end:
return [start + proto]
if randomly_available_port:
return [f"{start}-{end}{proto}"]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
def split_port(port):
if hasattr(port, 'legacy_repr'):
# This is the worst hack, but it prevents a bug in Compose 1.14.0
# https://github.com/docker/docker-py/issues/1668
# TODO: remove once fixed in Compose stable
port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
parts = match.groupdict()
host = parts['host']
proto = parts['proto'] or ''
internal = port_range(parts['int'], parts['int_end'], proto)
external = port_range(
parts['ext'], parts['ext_end'], '', len(internal) == 1)
if host is None:
if external is not None and len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, external
else:
if not external:
external = [None] * len(internal)
elif len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, [(host, ext_port) for ext_port in external]

View File

@ -0,0 +1,4 @@
from .ports import (
split_port,
build_port_bindings
) # flake8: noqa

View File

@ -0,0 +1,92 @@
def add_port_mapping(port_bindings, internal_port, external):
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(port_bindings, internal_port_range, external_range):
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
ports = zip(internal_port_range, external_range)
for internal_port, external_port in ports:
add_port_mapping(port_bindings, internal_port, external_port)
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def to_port_range(port):
if not port:
return None
protocol = ""
if "/" in port:
parts = port.split("/")
if len(parts) != 2:
_raise_invalid_port(port)
port, protocol = parts
protocol = "/" + protocol
parts = str(port).split('-')
if len(parts) == 1:
return ["%s%s" % (port, protocol)]
if len(parts) == 2:
full_port_range = range(int(parts[0]), int(parts[1]) + 1)
return ["%s%s" % (p, protocol) for p in full_port_range]
raise ValueError('Invalid port range "%s", should be '
'port or startport-endport' % port)
def _raise_invalid_port(port):
raise ValueError('Invalid port "%s", should be '
'[[remote_ip:]remote_port[-remote_port]:]'
'port[/protocol]' % port)
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
_raise_invalid_port(port)
if len(parts) == 1:
internal_port, = parts
return to_port_range(internal_port), None
if len(parts) == 2:
external_port, internal_port = parts
internal_range = to_port_range(internal_port)
external_range = to_port_range(external_port)
if internal_range is None or external_range is None:
_raise_invalid_port(port)
if len(internal_range) != len(external_range):
raise ValueError('Port ranges don\'t match in length')
return internal_range, external_range
external_ip, external_port, internal_port = parts
internal_range = to_port_range(internal_port)
external_range = to_port_range(external_port)
if not external_range:
external_range = [None] * len(internal_range)
if len(internal_range) != len(external_range):
raise ValueError('Port ranges don\'t match in length')
return internal_range, [(external_ip, ex_port or None)
for ex_port in external_range]

View File

@ -1,77 +0,0 @@
from .utils import format_environment
class ProxyConfig(dict):
'''
Hold the client's proxy configuration
'''
@property
def http(self):
return self.get('http')
@property
def https(self):
return self.get('https')
@property
def ftp(self):
return self.get('ftp')
@property
def no_proxy(self):
return self.get('no_proxy')
@staticmethod
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
)
def get_environment(self):
'''
Return a dictionary representing the environment variables used to
set the proxy settings.
'''
env = {}
if self.http:
env['http_proxy'] = env['HTTP_PROXY'] = self.http
if self.https:
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
if self.ftp:
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
if self.no_proxy:
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
return env
def inject_proxy_environment(self, environment):
'''
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
'''
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self):
return (
'ProxyConfig('
f'http={self.http}, https={self.https}, '
f'ftp={self.ftp}, no_proxy={self.no_proxy}'
')'
)

View File

@ -1,187 +0,0 @@
import errno
import os
import select
import socket as pysocket
import struct
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
STDOUT = 1
STDERR = 2
class SocketError(Exception):
pass
# NpipeSockets have their own error types
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
NPIPE_ENDED = 109
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket):
if not hasattr(select, "poll"):
# Limited to 1024
select.select([socket], [], [])
else:
poll = select.poll()
poll.register(socket, select.POLLIN | select.POLLPRI)
poll.poll()
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
if isinstance(socket, pysocket.SocketIO):
return socket.read(n)
return os.read(socket.fileno(), n)
except OSError as e:
if e.errno not in recoverable_errors:
raise
except Exception as e:
is_pipe_ended = (isinstance(socket, NpipeSocket) and
len(e.args) > 0 and
e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
# npipes don't support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return ''
raise
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = b""
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_header(socket):
"""
Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return (-1, -1)
stream, actual = struct.unpack('>BxxxL', data)
return (stream, actual)
def frames_iter(socket, tty):
"""
Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
else:
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket):
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
while True:
(stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield (stream, result)
def frames_iter_tty(socket):
"""
Return a generator of data read from the socket when the tty setting is
enabled.
"""
while True:
result = read(socket)
if len(result) == 0:
# We have reached EOF
return
yield result
def consume_socket_output(frames, demux=False):
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return b"".join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out)
def demux_adaptor(stream_id, data):
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
elif stream_id == STDERR:
return (None, data)
else:
raise ValueError(f'{stream_id} is not a valid stream')

96
docker/utils/types.py Normal file
View File

@ -0,0 +1,96 @@
import six
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class DictType(dict):
def __init__(self, init):
for k, v in six.iteritems(init):
self[k] = v
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,2 @@
try: version = "1.8.0-rc2"
from ._version import __version__ version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
except ImportError:
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version('docker')
except PackageNotFoundError:
__version__ = '0.0.0'

1
docs-requirements.txt Normal file
View File

@ -0,0 +1 @@
mkdocs==0.9

View File

@ -1,12 +0,0 @@
dl.hide-signature > dt {
display: none;
}
dl.field-list > dt {
/* prevent code blocks from forcing wrapping on the "Parameters" header */
word-break: initial;
}
code.literal{
hyphens: none;
}

View File

@ -1,2 +0,0 @@
{% extends "!page.html" %}
{% set css_files = css_files + ["_static/custom.css"] %}

1090
docs/api.md Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,158 +0,0 @@
Low-level API
=============
The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with.
It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power.
.. py:module:: docker.api
.. autoclass:: docker.api.client.APIClient
Configs
-------
.. py:module:: docker.api.config
.. rst-class:: hide-signature
.. autoclass:: ConfigApiMixin
:members:
:undoc-members:
Containers
----------
.. py:module:: docker.api.container
.. rst-class:: hide-signature
.. autoclass:: ContainerApiMixin
:members:
:undoc-members:
Images
------
.. py:module:: docker.api.image
.. rst-class:: hide-signature
.. autoclass:: ImageApiMixin
:members:
:undoc-members:
Building images
---------------
.. py:module:: docker.api.build
.. rst-class:: hide-signature
.. autoclass:: BuildApiMixin
:members:
:undoc-members:
Networks
--------
.. rst-class:: hide-signature
.. autoclass:: docker.api.network.NetworkApiMixin
:members:
:undoc-members:
Volumes
-------
.. py:module:: docker.api.volume
.. rst-class:: hide-signature
.. autoclass:: VolumeApiMixin
:members:
:undoc-members:
Executing commands in containers
--------------------------------
.. py:module:: docker.api.exec_api
.. rst-class:: hide-signature
.. autoclass:: ExecApiMixin
:members:
:undoc-members:
Swarms
------
.. py:module:: docker.api.swarm
.. rst-class:: hide-signature
.. autoclass:: SwarmApiMixin
:members:
:undoc-members:
Services
--------
.. py:module:: docker.api.service
.. rst-class:: hide-signature
.. autoclass:: ServiceApiMixin
:members:
:undoc-members:
Plugins
-------
.. py:module:: docker.api.plugin
.. rst-class:: hide-signature
.. autoclass:: PluginApiMixin
:members:
:undoc-members:
Secrets
-------
.. py:module:: docker.api.secret
.. rst-class:: hide-signature
.. autoclass:: SecretApiMixin
:members:
:undoc-members:
The Docker daemon
-----------------
.. py:module:: docker.api.daemon
.. rst-class:: hide-signature
.. autoclass:: DaemonApiMixin
:members:
:undoc-members:
Configuration types
-------------------
.. py:module:: docker.types
.. autoclass:: ConfigReference
.. autoclass:: ContainerSpec
.. autoclass:: DNSConfig
.. autoclass:: DriverConfig
.. autoclass:: EndpointSpec
.. autoclass:: Healthcheck
.. autoclass:: IPAMConfig
.. autoclass:: IPAMPool
.. autoclass:: LogConfig
.. autoclass:: Mount
.. autoclass:: NetworkAttachmentConfig
.. autoclass:: Placement
.. autoclass:: PlacementPreference
.. autoclass:: Privileges
.. autoclass:: Resources
.. autoclass:: RestartPolicy
.. autoclass:: RollbackConfig
.. autoclass:: SecretReference
.. autoclass:: ServiceMode
.. autoclass:: SwarmExternalCA
.. autoclass:: SwarmSpec(*args, **kwargs)
.. autoclass:: TaskTemplate
.. autoclass:: Ulimit
.. autoclass:: UpdateConfig

File diff suppressed because it is too large Load Diff

899
docs/change_log.md Normal file
View File

@ -0,0 +1,899 @@
Change Log
==========
1.8.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.8.0+is%3Aclosed)
### Features
* Added `Client.update_container` method (Update resource configs of a
container)
* Added support for gzipped context in `Client.build`
* Added ability to specify IP address when connecting a container to a
network
* Added `tmpfs` support to `Client.create_host_config`
* Added support for the `changes` param in `Client.commit`
* Added support for the `follow` param in `Client.logs`
* Added support for the `check_duplicate` param in `Client.create_network`
* Added support for the `decode` param in `Client.push` and `Client.pull`
* Added `docker.from_env` shortcut function. Instantiates a client with
`kwargs_from_env`
* `kwargs_from_env` now supports an optional `environment` parameter.
If present, values will be fetched from this dictionary instead of
`os.environ`
### Bugfixes
* Fixed a bug where some environment variables specified through
`create_container` would be improperly formatted
* Fixed an issue where the default TLS version in TLSConfig would
break in some environments. `docker-py` now uses TLSv1 by default
This setting can be overridden using the `ssl_version` param in
`kwargs_from_env` or the `TLSConfig` constructor
* Fixed a bug where using the unix socket connection would raise
an error in some edge-case situations
* Fixed a bug where `tcp` hosts would fail to connect to TLS-enabled
endpoints.
### Miscellaneous
* Default API version is now 1.22 (introduced in Docker 1.10.0)
1.7.2
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.2+is%3Aclosed)
### Bugfixes
* Fixed a bug where TLS verification was improperly executed when providing
a custom CA certificate.
1.7.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.1+is%3Aclosed)
### Features
* Added support for `shm_size` in `Client.create_host_config`
### Bugfixes
* Fixed a bug where Dockerfile would sometimes be excluded from the build
context.
* Fixed a bug where a docker config file containing unknown keys would raise
an exception.
* Fixed an issue with SSL connections behaving improperly when pyOpenSSL
was installed in the same environment.
* Several TLS configuration improvements
1.7.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.0+is%3Aclosed)
### Features
* Added support for cusom IPAM configuration in `Client.create_network`
* Added input support to `Client.exec_create`
* Added support for `stop_signal` in `Client.create_host_config`
* Added support for custom HTTP headers in Docker config file.
* Added support for unspecified transfer protocol in `base_url` when TLS is
enabled.
### Bugfixes
* Fixed a bug where the `filters` parameter in `Client.volumes` would not be
applied properly.
* Fixed a bug where memory limits would parse to incorrect values.
* Fixed a bug where the `devices` parameter in `Client.create_host_config`
would sometimes be misinterpreted.
* Fixed a bug where instantiating a `Client` object would sometimes crash if
`base_url` was unspecified.
* Fixed a bug where an error message related to TLS configuration would link
to a non-existent (outdated) docs page.
### Miscellaneous
* Processing of `.dockerignore` has been made significantly faster.
* Dropped explicit support for Python 3.2
1.6.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.6.0+is%3Aclosed)
### Features
* Added support for the `since` param in `Client.logs` (introduced in API
version 1.19)
* Added support for the `DOCKER_CONFIG` environment variable when looking up
auth config
* Added support for the `stream` param in `Client.stats` (when set to `False`,
allows user to retrieve a single snapshot instead of a constant data stream)
* Added support for the `mem_swappiness`, `oom_kill_disable` params
in `Client.create_host_config`
* Added support for build arguments in `Client.build` through the `buildargs`
param.
### Bugfixes
* Fixed a bug where streaming data over HTTPS would sometimes behave
incorrectly with Python 3.x
* Fixed a bug where commands containing unicode characters would be incorrectly
handled by `Client.create_container`.
* Fixed a bug where auth config credentials containing unicode characters would
cause failures when pushing / pulling images.
* Setting `tail=0` in `Client.logs` no longer shows past logs.
* Fixed a bug where `Client.pull` and `Client.push` couldn't handle image names
containing a dot.
### Miscellaneous
* Default API version is now 1.21 (introduced in Docker 1.9.0)
* Several test improvements and cleanup that should make the suite easier to
expand and maintain moving forward.
1.5.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.5.0+is%3Aclosed)
### Features
* Added support for the networking API introduced in Docker 1.9.0
(`Client.networks`, `Client.create_network`, `Client.remove_network`,
`Client.inspect_network`, `Client.connect_container_to_network`,
`Client.disconnect_container_from_network`).
* Added support for the volumes API introduced in Docker 1.9.0
(`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`,
`Client.remove_volume`).
* Added support for the `group_add` parameter in `create_host_config`.
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameteres
in `create_host_config`.
* Added support for the archive API endpoint (`Client.get_archive`,
`Client.put_archive`).
* Added support for `ps_args` parameter in `Client.top`.
### Bugfixes
* Fixed a bug where specifying volume binds with unicode characters would
fail.
* Fixed a bug where providing an explicit protocol in `Client.port` would fail
to yield the expected result.
* Fixed a bug where the priority protocol returned by `Client.port` would be UDP
instead of the expected TCP.
### Miscellaneous
* Broke up Client code into several files to facilitate maintenance and
contribution.
* Added contributing guidelines to the repository.
1.4.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.4.0+is%3Aclosed)
### Deprecation warning
* `docker.utils.create_host_config` is deprecated in favor of
`Client.create_host_config`.
### Features
* Added `utils.parse_env_file` to support env-files.
See [docs](http://docker-py.readthedocs.org/en/latest/api/#create_container)
for usage.
* Added support for arbitrary log drivers
* Added support for URL paths in the docker host URL (`base_url`)
* Drastically improved support for .dockerignore syntax
### Bugfixes
* Fixed a bug where exec_inspect would allow invocation when the API version
was too low.
* Fixed a bug where `docker.utils.ports.split_port` would break if an open
range was provided.
* Fixed a bug where invalid image IDs / container IDs could be provided to
bypass or reroute request URLs
* Default `base_url` now adapts depending on the OS (better Windows support)
* Fixed a bug where using an integer as the user param in
`Client.create_container` would result in a failure.
### Miscellaneous
* Docs fixes
* Integration tests are now run as part of our continuous integration.
* Updated dependency on `six` library
1.3.1
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.1+is%3Aclosed)
### Bugfixes
* Fixed a bug where empty chunks in streams was misinterpreted as EOF.
* `datetime` arguments passed to `Client.events` parameters `since` and
`until` are now always considered to be UTC.
* Fixed a bug with Docker 1.7.x where the wrong auth headers were being passed
in `Client.build`, failing builds that depended on private images.
* `Client.exec_create` can now retrieve the `Id` key from a dictionary for its
container param.
### Miscellaneous
* 404 API status now raises `docker.errors.NotFound`. This exception inherits
`APIError` which was used previously.
* Docs fixes
* Test fixes
1.3.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.0+is%3Aclosed)
### Deprecation warning
* As announced in the 1.2.0 release, `Client.execute` has been removed in favor
of `Client.exec_create` and `Client.exec_start`.
### Features
* `extra_hosts` parameter in host config can now also be provided as a list.
* Added support for `memory_limit` and `memswap_limit` in host config to
comply with recent deprecations.
* Added support for `volume_driver` in `Client.create_container`
* Added support for advanced modes in volume binds (using the `mode` key)
* Added support for `decode` in `Client.build` (decodes JSON stream on the fly)
* docker-py will now look for login configuration under the new config path,
and fall back to the old `~/.dockercfg` path if not present.
### Bugfixes
* Configuration file lookup now also work on platforms that don't define a
`$HOME` environment variable.
* Fixed an issue where pinging a v2 private registry wasn't working properly,
preventing users from pushing and pulling.
* `pull` parameter in `Client.build` now defaults to `False`. Fixes a bug where
the default options would try to force a pull of non-remote images.
* Fixed a bug where getting logs from tty-enabled containers wasn't working
properly with more recent versions of Docker
* `Client.push` and `Client.pull` will now raise exceptions if the HTTP
status indicates an error.
* Fixed a bug with adapter lookup when using the Unix socket adapter
(this affected some weird edge cases, see issue #647 for details)
* Fixed a bug where providing `timeout=None` to `Client.stop` would result
in an exception despite the usecase being valid.
* Added `git@` to the list of valid prefixes for remote build paths.
### Dependencies
* The websocket-client dependency has been updated to a more recent version.
This new version also supports Python 3.x, making `attach_socket` available
on those versions as well.
### Documentation
* Various fixes
1.2.3
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.3+is%3Aclosed)
### Deprecation warning
* Passing host config in the `Client.start` method is now deprecated. Please
use the `host_config` in `Client.create_container` instead.
### Features
* Added support for `privileged` param in `Client.exec_create`
(only available in API >= 1.19)
* Volume binds can now also be specified as a list of strings.
### Bugfixes
* Fixed a bug where the `read_only` param in host_config wasn't handled
properly.
* Fixed a bug in `Client.execute` (this method is still deprecated).
* The `cpuset` param in `Client.create_container` is also passed as
the `CpusetCpus` param (`Cpuset` deprecated in recent versions of the API)
* Fixed an issue with integration tests being run inside a container
(`make integration-test`)
* Fixed a bug where an empty string would be considered a valid container ID
or image ID.
* Fixed a bug in `Client.insert`
### Documentation
* Various fixes
1.2.2
-----
### Bugfixes
* Fixed a bug where parameters passed to `Client.exec_resize` would be ignored (#576)
* Fixed a bug where auth config wouldn't be resolved properly in `Client.pull` (#577)
1.2.1
-----
### Bugfixes
* Fixed a bug where the check_resource decorator would break with some
argument-passing methods. (#573)
1.2.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.0+is%3Aclosed)
### Deprecation warning
* `Client.execute` is being deprecated in favor of the more dev-friendly
`Client.exec_start` and `Client.exec_create`. **It will be removed in 1.3.0**
### Features
* Added `exec_create`, `exec_start`, `exec_inspect` and `exec_resize` to
client, accurately mirroring the
[Exec API](https://docs.docker.com/reference/api/docker_remote_api_v1.18/#exec-create)
* Added `auth_config` param to `Client.pull` (allows to use one-off credentials
for this pull request)
* Added support for `ipc_mode` in host config.
* Added support for the `log_config` param in host config.
* Added support for the `ulimit` param in host config.
* Added support for container resource limits in `Client.build`.
* When a resource identifier (image or container ID) is passed to a Client
method, we now check for `None` values to avoid crashing
(now raises `docker.errors.NullResource`)
* Added tools to parse port ranges inside the new `docker.utils.ports` package.
* Added a `version_info` attribute to the `docker` package.
### Bugfixes
* Fixed a bug in `Client.port` where absence of a certain key in the
container's JSON would raise an error (now just returns `None`)
* Fixed a bug with the `trunc` parameter in `Client.containers` having no
effect (moved functionality to the client)
* Several improvements have been made to the `Client.import_image` method.
* Fixed pushing / pulling to
[v2 registries](https://github.com/docker/distribution)
* Fixed a bug where passing a container dictionary to `Client.commit`
would fail
### Miscellaneous
* Default API version has been bumped to 1.18 (Docker Engine 1.6.0)
* Several testing coverage improvements
* Docs fixes and improvements
1.1.0
-----
### Features
* Added `dockerfile` param support to `Client.build` (mirrors
`docker build -f` behavior)
* Added the ability to specify `'auto'` as `version` in `Client.__init__`,
allowing the constructor to autodetect the daemon's API version.
### Bugfixes
* Fixed a bug where decoding a result stream using the `decode` parameter
would break when using Python 3.x
* Fixed a bug where some files in `.dockerignore` weren't being handled
properly
* Fixed `resolve_authconfig` issues by bringing it closer to Docker Engine's
behavior. This should fix all issues encountered with private registry auth
* Fixed an issue where passwords containing a colon weren't being handled
properly.
* Bumped `requests` version requirement, which should fix most of the SSL
issues encountered recently.
### Miscellaneous
* Several integration test improvements.
* Fixed some unclosed resources in unit tests.
* Several docs improvements.
1.0.0
-----
### Features
* Added new `Client.rename` method (`docker rename`)
* Added now `Client.stats` method (`docker stats`)
* Added `read_only` param support to `utils.create_host_config` and
`Client.start` (`docker run --read-only`)
* Added `pid_mode` param support to `utils.create_host_config` and
`Client.start` (`docker run --pid='host'`)
* Added `since`, `until` and `filters` params to `Client.events`.
* Added `decode` parameter to `Client.stats` and `Client.events` to decode
JSON objects on the fly (False by default).
### Bugfixes
* Fixed a bug that caused `Client.build` to crash when the provided source was
a remote source.
### Miscellaneous
* Default API version has been bumped to 1.17 (Docker Engine 1.5.0)
* `Client.timeout` is now a public attribute, and users are encouraged to use it
when request timeouts need to be changed at runtime.
* Added `Client.api_version` as a read-only property.
* The `memswap_limit` argument in `Client.create_container` now accepts string
type values similar to `mem_limit` ('6g', '120000k', etc.)
* Improved documentation
0.7.2
-----
### Features
* Added support for `mac_address` in `Client.create_container`
### Bugfixes
* Fixed a bug where streaming responses (`pull`, `push`, `logs`, etc.) were
unreliable (#300)
* Fixed a bug where resolve_authconfig wouldn't properly resolve configuration
for private repositories (#468)
* Fixed a bug where some errors wouldn't be properly constructed in
`client.py`, leading to unhelpful exceptions bubbling up (#466)
* Fixed a bug where `Client.build` would try to close context when externally
provided (`custom_context == True`) (#458)
* Fixed an issue in `create_host_config` where empty sequences wouldn't be
interpreted properly (#462)
### Miscellaneous
* Added `resolve_authconfig` tests.
0.7.1
-----
### Bugfixes
* `setup.py` now indicates a maximum version of requests to work around the
boot2docker / `assert_hostname` bug.
* Removed invalid exception when using the Registry Hub's FQDN when pulling.
* Fixed an issue where early HTTP errors weren't handled properly in streaming
responses.
* Fixed a bug where sockets would close unexpectedly using Python 3.x
* Various fixes for integration tests.
### Miscellaneous
* Small doc fixes
0.7.0
-----
### Breaking changes
* Passing `dns` or `volumes_from` in `Client.start` with API version < 1.10
will now raise an exception (previously only triggered a warning)
### Features
* Added support for `host_config` in `Client.create_container`
* Added utility method `docker.utils.create_host_config` to help build a
proper `HostConfig` dictionary.
* Added support for the `pull` parameter in `Client.build`
* Added support for the `forcerm` parameter in `Client.build`
* Added support for `extra_hosts` in `Client.start`
* Added support for a custom `timeout` in `Client.wait`
* Added support for custom `.dockercfg` loading in `Client.login`
(`dockercfg_path` argument)
### Bugfixes
* Fixed a bug where some output wouldn't be streamed properly in streaming
chunked responses
* Fixed a bug where the `devices` param didn't recognize the proper delimiter
* `Client.login` now properly expands the `registry` URL if provided.
* Fixed a bug where unicode characters in passed for `environment` in
`create_container` would break.
### Miscellaneous
* Several unit tests and integration tests improvements.
* `Client` constructor now enforces passing the `version` parameter as a
string.
* Build context files are now ordered by filename when creating the archive
(for consistency with docker mainline behavior)
0.6.0
-----
* **This version introduces breaking changes!**
### Breaking changes
* The default SSL protocol is now the highest TLS v1.x (was SSL v2.3 before)
(Poodle fix)
* The `history` command now returns a dict instead of a raw JSON string.
### Features
* Added the `execute` command.
* Added `pause` and `unpause` commands.
* Added support fo the `cpuset` param in `create_container`
* Added support for host devices (`devices` param in `start`)
* Added support for the `tail` param in `logs`.
* Added support for the `filters` param in `images` and `containers`
* The `kwargs_from_env` method is now available in the `docker.utils`
module. This should make it easier for boot2docker user to connect
to their daemon.
### Bugfixes
* Fixed a bug where empty directories weren't correctly included when
providing a context to `Client.build`.
* Fixed a bug where UNIX socket connections weren't properly cleaned up,
causing `ResourceWarning`s to appear in some cases.
* Fixed a bug where docker-py would crash if the docker daemon was stopped
while reading a streaming response
* Fixed a bug with streaming responses in Python 3
* `remove_image` now supports a dict containing an `Id` key as its `id`
parameter (similar to other methods requiring a resource ID)
### Documentation
* Added new MkDocs documentation. Currently hosted on
[ReadTheDocs](http://docker-py.readthedocs.org/en/latest/)
### Miscellaneous
* Added tests to sdist
* Added a Makefile for running tests in Docker
* Updated Dockerfile
0.5.3
-----
* Fixed attaching when connecting to the daemon over a UNIX socket.
0.5.2
-----
* Fixed a bug where sockets were closed immediately when attaching over
TLS.
0.5.1
-----
* Added a `assert_hostname` option to `TLSConfig` which can be used to
disable verification of hostnames.
* Fixed SSL not working due to an incorrect version comparison
* Fixed streams not working on Windows
0.5.0
-----
* **This version introduces breaking changes!**
* Added `insecure_registry` parameter in `Client.push` and `Client.pull`.
*It defaults to False and code pushing to non-HTTPS private registries
might break as a result.*
* Added support for adding and dropping capabilities
* Added support for restart policy
* Added support for string values in `Client.create_container`'s `mem_limit`
* Added support for `.dockerignore` file in `Client.build`
### Bugfixes
* Fixed timeout behavior in `Client.stop`
### Miscellaneous
* `Client.create_container` provides better validation of the `volumes`
parameter
* Improved integration tests
0.4.0
-----
* **This version introduces breaking changes!**
* The `base_url` parameter in the `Client` constructor should now allow most
of the `DOCKER_HOST` environment values (except for the fd:// protocol)
* As a result, URLs that don't specify a port are now invalid (similar
to the official client's behavior)
* Added TLS support (see [documentation](https://github.com/dotcloud/docker-py#connection-to-daemon-using-https))
### Bugfixes
* Fixed an issue with `Client.build` streamed logs in Python 3
### Miscellaneous
* Added unit tests coverage
* Various integration tests fixes
0.3.2
-----
* Default API version is now 1.12 (support for docker 1.0)
* Added new methods `Client.get_image` and `Client.load_image`
(`docker save` and `docker load`)
* Added new method `Client.ping`
* Added new method `Client.resize`
* `Client.build` can now be provided with a custom context using the
`custom_context` parameter.
* Added support for `memswap_limit` parameter in `create_container`
* Added support for `force` parameter in `remove_container`
* Added support for `force` and `noprune` parameters in `remove_image`
* Added support for `timestamps` parameter in `logs`
* Added support for `dns_search` parameter in `start`
* Added support for `network_mode` parameter in `start`
* Added support for `size` parameter in `containers`
* Added support for `volumes_from` and `dns` parameters in `start`. As of
API version >= 1.10, these parameters no longer belong to `create_container`
* `Client.logs` now uses the logs endpoint when API version is sufficient
### Bugfixes
* Fixed a bug in pull where the `repo:tag` notation wasn't interpreted
properly
* Fixed a bug in streaming methods with python 3 (unicode, bytes/str related)
* Fixed a bug in `Client.start` where legacy notation for volumes wasn't
supported anymore.
### Miscellaneous
* The client now raises `DockerException`s when appropriate. You can import
`DockerException` (and its subclasses) from the `docker.errors` module to
catch them if needed.
* `docker.APIError` has been moved to the new `docker.errors` module as well.
* `Client.insert` is deprecated in API version > 1.11
* Improved integration tests should now run much faster.
* There is now a single source of truth for the docker-py version number.
0.3.1
-----
* Default API version is now 1.9
* Streaming responses no longer yield blank lines.
* `Client.create_container` now supports the `domainname` parameter.
* `volumes_from` parameter in `Client.create_container` now supports
iterables.
* Auth credentials are provided to the docker daemon when using `Client.build`
(new feature in API version 1.9)
### Bugfixes
* Various fixes for response streams (`logs`, `pull`, etc.).
* Fixed a bug with `Client.push` when using API version < 1.5
* Fixed a bug with API version checks.
### Miscellaneous
* `mock` has been removed from the runtime requirements.
* Added installation instructions in the README.
0.3.0
-----
* **This version introduces breaking changes!**
* Support for API version 1.7 through 1.9 (Docker 0.8.0+)
* Default API version is now 1.8
* The client has been updated to support Requests 2.x. `requests==2.2.1`
is now the recommended version.
* Links can now be specified as tuples in `Client.start` (see docs for
more information)
* Added support for various options in `Client.create_container`
(`network_disabled`, `cpu_shares`, `working_dir` and `entrypoint`)
* `Client.attach` has been reworked to work similarly to `Client.logs`
minus the historical data.
* Logs can now be streamed using the `stream` parameter.
* Added support for `tcp://` URLs as client `base_url`.
* Various auth improvements.
* Added support for custom `Client.build` timeout.
### Bugfixes
* Fixed a bug where determining the protocol of a private registry
would sometimes yield the wrong result.
* Fixed a bug where `Client.copy` wouldn't accept a dict as argument.
* Fixed several streaming bugs.
* Removed unused parameter in `Client.import_image`.
* The client's `base_url` now tolerates trailing slashes.
#### Miscellaneous
* Updated integration tests
* Small doc fixes
0.2.3
-----
* Support for API version 1.6
* Added support for links
* Added support for global request timeout
* Added `signal` parameter in `Client.kill`
* Added support for `publish_all_ports` in `Client.start`
* `Client.pull`, `Client.push` and `Client.build` can be streamed now
* Added support for websockets in `Client.attach`
* Fixed ports for Docker 0.6.5+
* Added `Client.events` method (access to the `/events` endpoint)
* Changed the way the ports and volumes are provided in `Client.start` and
`Client.create_container̀` to make them simpler and more intuitive.
### Bugfixes
* Fixed a bug where private registries on HTTPS weren't handled properly
* Fixed a bug where auth would break with Python 3
### Miscellaneous
* Test improvements
* Slight doc improvements
0.2.2
-----
* Added support for the `rm` parameter in `Client.build`
* Added support for tarball imports in `Client.import_image` through `data`
parameter.
* The `command` parameter in `Client.create_container` is now optional (for
containers that include a default run command)
### Bugfixes
* Fixed Python 3 support
* Fixed a bug where anonymous push/pull would break when no authconfig is
present
* Fixed a bug where the `quiet` parameter wouldn't be taken into account in
`Client.containers`
* Fixed a bug where `Client.push` would break when pushing to private
registries.
* Removed unused `registry` parameter in `Client.pull`.
* Removed obsolete custom error message in `Client.create_container`.
### Miscellaneous
* docker-py is now unit-tested, and Travis-CI has been enabled on the
source repository.
0.2.1
-----
* Improvements to the `tox.ini` file
### Bugfixes
* Fixed a bug where the package would fail with an `ImportError` if requests
was installed using `apt-get`
* Fixed a bug where `Client.build` would fail if given a `path` parameter.
* Fixed several bugs in `Client.login`. It should now work with API versions
1.4, 1.5.
* Please note that `Client.login` currently doesn't write auth to the
`.dockercfg` file, thus **auth is not persistent when using this method.**
0.2.0
-----
* **This version introduces breaking changes!**
* `Client.kill`, `Client.remove_container`, `Client.remove_image`,
`Client.restart`, `Client.start`, `Client.stop` and `Client.wait` don't support
varargs anymore.
* Added commands `Client.top` and `Client.copy`
* Added `lxc_conf` parameter to `Client.start`
* Added support for authentication in `Client.pull` (API version >=1.5)
* Added support for privileged containers.
* Error management overhaul. The new version should be more consistent and
* All methods that expected a container ID as argument now also support a dict
containing an `Id` key.
* Added license header to python files.
* Several `README.md` updates.
### Bugfixes
* Fixed several bugs with auth config parsing.
* Fixed a bug in `Client.push` where it would raise an exception if
the auth config wasn't loaded.
* Fixed a bug in `Client.pull` where private registry images wouldn't be parsed
properly if it contained port information.
0.1.5
-----
* `Client.build` now uses tempfiles to store build context instead of storing
it in memory
* Added `nocache` option to `Client.build`
* `Client.remove_container` now raises an exception when trying to remove a
running container
* `Client.create_container` now accepts dicts for the `environment` parameter
### Bugfixes
* Fixed a bug in `Client.create_container` on Python 2.6 where unicode
commands would fail to be parsed
* Fixed a bug in `Client.build` where the `tag` parameter would not be taken
into account
0.1.4
-----
* Added support for API connection through UNIX socket (default for docker 0.5.2+)
0.1.3
-----
* The client now tries to load the auth config from `~/.dockercfg`. This is necessary to use the push command if API version is >1.0
0.1.2
-----
* Added a `quiet parameter` to `Client.build` (mirrors the `q` parameter in the API)
0.1.1
-----
* Fixed a bug where the build command would list tar contents before sending the request
* Fixed a bug in `Client.port`
0.1.0
-----
* **This version introduces breaking changes!**
* Switched to server side build system
* Removed the BuilderClient
* Added support for contextual builds
* Added support for remote URL builds
* Added python 3 support
* Added bind mounts support
* Added API version support
* Fixed a bug where `Client.port` would fail if provided with a port of type number
* Fixed a bug where `Client._post_json` wouldn't set the Content-Type header to `application/json`
0.0.6
-----
* Added support for custom loggers in `Client.build`
* Added `Client.attach` command
* Added support for `ADD` command in builder
* Fixed a bug in `Client.logs`
* Improved unit tests
0.0.5
-----
* Added tag support for the builder
* Use `shlex` to parse plain string commands when creating a container
* Fixed several bugs in the builder
* Fixed the `quiet` option in `Client.images`
* Unit tests
0.0.4
-----
* Improved error reporting
0.0.3
-----
* Fixed a bug in `Client.tag`
* Fixed a bug where generated images would be removed after a successful build
0.0.2
-----
* Implemented first version of the builder client

View File

@ -1,35 +0,0 @@
Client
======
.. py:module:: docker.client
Creating a client
-----------------
To communicate with the Docker daemon, you first need to instantiate a client. The easiest way to do that is by calling the function :py:func:`~docker.client.from_env`. It can also be configured manually by instantiating a :py:class:`~docker.client.DockerClient` class.
.. autofunction:: from_env()
Client reference
----------------
.. autoclass:: DockerClient()
.. autoattribute:: configs
.. autoattribute:: containers
.. autoattribute:: images
.. autoattribute:: networks
.. autoattribute:: nodes
.. autoattribute:: plugins
.. autoattribute:: secrets
.. autoattribute:: services
.. autoattribute:: swarm
.. autoattribute:: volumes
.. automethod:: close()
.. automethod:: df()
.. automethod:: events()
.. automethod:: info()
.. automethod:: login()
.. automethod:: ping()
.. automethod:: version()

View File

@ -1,361 +0,0 @@
#
# docker-sdk-python documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 14 15:48:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
from importlib.metadata import version
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'myst_parser'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Docker SDK for Python'
year = datetime.datetime.now().year
copyright = f'{year} Docker Inc'
author = 'Docker Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# see https://github.com/pypa/setuptools_scm#usage-from-sphinx
release = version('docker')
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'description': 'A Python library for the Docker Engine API',
'fixed_sidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'docker-sdk-python v2.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'docker-sdk-pythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'docker-sdk-python.tex', 'docker-sdk-python Documentation',
'Docker Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
author, 'docker-sdk-python', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False

Some files were not shown because too many files have changed in this diff Show More