mirror of https://github.com/docker/docker-py.git
Merge pull request #1186 from bfirsh/two-point-oh
A new user-focused API
This commit is contained in:
commit
8478491cf8
|
|
@ -10,7 +10,7 @@ dist
|
|||
html/*
|
||||
|
||||
# Compiled Documentation
|
||||
site/
|
||||
_build/
|
||||
README.rst
|
||||
|
||||
env/
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ language: python
|
|||
python:
|
||||
- "3.5"
|
||||
env:
|
||||
- TOX_ENV=py26
|
||||
- TOX_ENV=py27
|
||||
- TOX_ENV=py33
|
||||
- TOX_ENV=py34
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
# Contributing guidelines
|
||||
|
||||
See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||
The following is specific to docker-py.
|
||||
|
||||
Thank you for your interest in the project. We look forward to your
|
||||
contribution. In order to make the process as fast and streamlined as possible,
|
||||
here is a set of guidelines we recommend you follow.
|
||||
|
|
@ -100,3 +103,34 @@ here are the steps to get you started.
|
|||
5. Run `python setup.py develop` to install the dev version of the project
|
||||
and required dependencies. We recommend you do so inside a
|
||||
[virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs)
|
||||
|
||||
## Running the tests & Code Quality
|
||||
|
||||
To get the source source code and run the unit tests, run:
|
||||
```
|
||||
$ git clone git://github.com/docker/docker-py.git
|
||||
$ cd docker-py
|
||||
$ pip install tox
|
||||
$ tox
|
||||
```
|
||||
|
||||
## Building the docs
|
||||
|
||||
```
|
||||
$ make docs
|
||||
$ open _build/index.html
|
||||
```
|
||||
|
||||
## Release Checklist
|
||||
|
||||
Before a new release, please go through the following checklist:
|
||||
|
||||
* Bump version in docker/version.py
|
||||
* Add a release note in docs/change_log.md
|
||||
* Git tag the version
|
||||
* Upload to pypi
|
||||
|
||||
## Vulnerability Reporting
|
||||
For any security issues, please do NOT file an issue or pull request on github!
|
||||
Please contact [security@docker.com](mailto:security@docker.com) or read [the
|
||||
Docker security page](https://www.docker.com/resources/security/).
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
FROM python:2.7
|
||||
FROM python:3.5
|
||||
|
||||
RUN mkdir /home/docker-py
|
||||
WORKDIR /home/docker-py
|
||||
|
||||
COPY requirements.txt /home/docker-py/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY docs-requirements.txt /home/docker-py/docs-requirements.txt
|
||||
RUN pip install -r docs-requirements.txt
|
||||
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -74,7 +74,7 @@ flake8: build
|
|||
|
||||
.PHONY: docs
|
||||
docs: build-docs
|
||||
docker run -v `pwd`/docs:/home/docker-py/docs/ -p 8000:8000 docker-py-docs mkdocs serve -a 0.0.0.0:8000
|
||||
docker run --rm -it -v `pwd`:/home/docker-py docker-py-docs sphinx-build docs ./_build
|
||||
|
||||
.PHONY: shell
|
||||
shell: build
|
||||
|
|
|
|||
77
README.md
77
README.md
|
|
@ -1,26 +1,73 @@
|
|||
docker-py
|
||||
=========
|
||||
# Docker SDK for Python
|
||||
|
||||
[](https://travis-ci.org/docker/docker-py)
|
||||
|
||||
A Python library for the Docker Remote API. It does everything the `docker` command does, but from within Python – run containers, manage them, pull/push images, etc.
|
||||
A Python library for the Docker API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Installation
|
||||
|
||||
The latest stable version is always available on PyPi.
|
||||
The latest stable version [is available on PyPi](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
|
||||
|
||||
pip install docker-py
|
||||
pip install docker
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
## Usage
|
||||
|
||||
[](https://readthedocs.org/projects/docker-py/?badge=latest)
|
||||
Connect to Docker using the default socket or the configuration in your environment:
|
||||
|
||||
[Read the full documentation here](https://docker-py.readthedocs.io/en/latest/).
|
||||
The source is available in the `docs/` directory.
|
||||
```python
|
||||
import docker
|
||||
client = docker.from_env()
|
||||
```
|
||||
|
||||
You can run containers:
|
||||
|
||||
License
|
||||
-------
|
||||
Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
|
||||
```python
|
||||
>>> client.containers.run("ubuntu", "echo hello world")
|
||||
'hello world\n'
|
||||
```
|
||||
|
||||
You can run containers in the background:
|
||||
|
||||
```python
|
||||
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
|
||||
<Container '45e6d2de7c54'>
|
||||
```
|
||||
|
||||
You can manage containers:
|
||||
|
||||
```python
|
||||
>>> client.containers.list()
|
||||
[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
|
||||
|
||||
>>> container = client.containers.get('45e6d2de7c54')
|
||||
|
||||
>>> container.attrs['Config']['Image']
|
||||
"bfirsh/reticulate-splines"
|
||||
|
||||
>>> container.logs()
|
||||
"Reticulating spline 1...\n"
|
||||
|
||||
>>> container.stop()
|
||||
```
|
||||
|
||||
You can stream logs:
|
||||
|
||||
```python
|
||||
>>> for line in container.logs(stream=True):
|
||||
... print line.strip()
|
||||
Reticulating spline 2...
|
||||
Reticulating spline 3...
|
||||
...
|
||||
```
|
||||
|
||||
You can manage images:
|
||||
|
||||
```python
|
||||
>>> client.images.pull('nginx')
|
||||
<Image 'nginx'>
|
||||
|
||||
>>> client.images.list()
|
||||
[<Image 'ubuntu'>, <Image 'nginx'>, ...]
|
||||
```
|
||||
|
||||
[Read the full documentation](https://docs.docker.com/sdk/python/) to see everything you can do.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# flake8: noqa
|
||||
from .api import APIClient
|
||||
from .client import Client, from_env
|
||||
from .version import version, version_info
|
||||
|
||||
__version__ = version
|
||||
__title__ = 'docker-py'
|
||||
|
||||
from .client import Client, AutoVersionClient, from_env # flake8: noqa
|
||||
|
|
|
|||
|
|
@ -1,10 +1,2 @@
|
|||
# flake8: noqa
|
||||
from .build import BuildApiMixin
|
||||
from .container import ContainerApiMixin
|
||||
from .daemon import DaemonApiMixin
|
||||
from .exec_api import ExecApiMixin
|
||||
from .image import ImageApiMixin
|
||||
from .network import NetworkApiMixin
|
||||
from .service import ServiceApiMixin
|
||||
from .swarm import SwarmApiMixin
|
||||
from .volume import VolumeApiMixin
|
||||
from .client import APIClient
|
||||
|
|
|
|||
|
|
@ -19,6 +19,89 @@ class BuildApiMixin(object):
|
|||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None):
|
||||
"""
|
||||
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
|
||||
needs to be set. ``path`` can be a local path (to a directory
|
||||
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
|
||||
readable file-like object to a Dockerfile.
|
||||
|
||||
If you have a tar file for the Docker build context (including a
|
||||
Dockerfile) already, pass a readable file-like object to ``fileobj``
|
||||
and also pass ``custom_context=True``. If the stream is compressed
|
||||
also, set ``encoding`` to the correct value (e.g ``gzip``).
|
||||
|
||||
Example:
|
||||
>>> from io import BytesIO
|
||||
>>> from docker import Client
|
||||
>>> dockerfile = '''
|
||||
... # Shared Volume
|
||||
... FROM busybox:buildroot-2014.02
|
||||
... VOLUME /data
|
||||
... CMD ["/bin/sh"]
|
||||
... '''
|
||||
>>> f = BytesIO(dockerfile.encode('utf-8'))
|
||||
>>> cli = Client(base_url='tcp://127.0.0.1:2375')
|
||||
>>> response = [line for line in cli.build(
|
||||
... fileobj=f, rm=True, tag='yourname/volume'
|
||||
... )]
|
||||
>>> response
|
||||
['{"stream":" ---\\u003e a9eb17255234\\n"}',
|
||||
'{"stream":"Step 1 : VOLUME /data\\n"}',
|
||||
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
|
||||
'{"stream":" ---\\u003e 713bca62012e\\n"}',
|
||||
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
|
||||
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
|
||||
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
|
||||
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
|
||||
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
|
||||
'{"stream":"Successfully built 032b8b2855fc\\n"}']
|
||||
|
||||
Args:
|
||||
path (str): Path to the directory containing the Dockerfile
|
||||
fileobj: A file object to use as the Dockerfile. (Or a file-like
|
||||
object)
|
||||
tag (str): A tag to add to the final image
|
||||
quiet (bool): Whether to return the status
|
||||
nocache (bool): Don't use the cache when set to ``True``
|
||||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
stream (bool): *Deprecated for API version > 1.8 (always True)*.
|
||||
Return a blocking generator you can iterate over to retrieve
|
||||
build output as it happens
|
||||
timeout (int): HTTP timeout
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
compressing
|
||||
pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
|
||||
- memory (int): set memory limit for build
|
||||
- memswap (int): Total memory (memory + swap), -1 to disable
|
||||
swap
|
||||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``.
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB.
|
||||
labels (dict): A dictionary of labels to set on the image.
|
||||
|
||||
Returns:
|
||||
A generator for the build output.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
``TypeError``
|
||||
If neither ``path`` nor ``fileobj`` is specified.
|
||||
"""
|
||||
remote = context = None
|
||||
headers = {}
|
||||
container_limits = container_limits or {}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,432 @@
|
|||
import json
|
||||
import struct
|
||||
import warnings
|
||||
from functools import partial
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
import six
|
||||
import websocket
|
||||
|
||||
from .build import BuildApiMixin
|
||||
from .container import ContainerApiMixin
|
||||
from .daemon import DaemonApiMixin
|
||||
from .exec_api import ExecApiMixin
|
||||
from .image import ImageApiMixin
|
||||
from .network import NetworkApiMixin
|
||||
from .service import ServiceApiMixin
|
||||
from .swarm import SwarmApiMixin
|
||||
from .volume import VolumeApiMixin
|
||||
from .. import auth, ssladapter
|
||||
from ..constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT,
|
||||
IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION,
|
||||
STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
|
||||
MINIMUM_DOCKER_API_VERSION)
|
||||
from ..errors import (DockerException, TLSParameterError,
|
||||
create_api_error_from_http_exception)
|
||||
from ..tls import TLSConfig
|
||||
from ..transport import UnixAdapter
|
||||
from ..utils import utils, check_resource, update_headers
|
||||
from ..utils.socket import frames_iter
|
||||
try:
|
||||
from ..transport import NpipeAdapter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class APIClient(
|
||||
requests.Session,
|
||||
BuildApiMixin,
|
||||
ContainerApiMixin,
|
||||
DaemonApiMixin,
|
||||
ExecApiMixin,
|
||||
ImageApiMixin,
|
||||
NetworkApiMixin,
|
||||
ServiceApiMixin,
|
||||
SwarmApiMixin,
|
||||
VolumeApiMixin):
|
||||
"""
|
||||
A low-level client for the Docker Remote API.
|
||||
|
||||
Example:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
|
||||
>>> client.version()
|
||||
{u'ApiVersion': u'1.24',
|
||||
u'Arch': u'amd64',
|
||||
u'BuildTime': u'2016-09-27T23:38:15.810178467+00:00',
|
||||
u'Experimental': True,
|
||||
u'GitCommit': u'45bed2c',
|
||||
u'GoVersion': u'go1.6.3',
|
||||
u'KernelVersion': u'4.4.22-moby',
|
||||
u'Os': u'linux',
|
||||
u'Version': u'1.12.2-rc1'}
|
||||
|
||||
Args:
|
||||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.24``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
:py:class:`~docker.tls.TLSConfig` object to use custom
|
||||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
"""
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
|
||||
super(APIClient, self).__init__()
|
||||
|
||||
if tls and not base_url:
|
||||
raise TLSParameterError(
|
||||
'If using TLS, the base_url argument must be provided.'
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.headers['User-Agent'] = user_agent
|
||||
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
base_url = utils.parse_host(
|
||||
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
|
||||
)
|
||||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixAdapter(
|
||||
base_url, timeout, pool_connections=num_pools
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://localunixsocket'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
raise DockerException(
|
||||
'The npipe:// protocol is only supported on Windows'
|
||||
)
|
||||
try:
|
||||
self._custom_adapter = NpipeAdapter(
|
||||
base_url, timeout, pool_connections=num_pools
|
||||
)
|
||||
except NameError:
|
||||
raise DockerException(
|
||||
'Install pypiwin32 package to enable npipe:// support'
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
else:
|
||||
# Use SSLAdapter for the ability to specify SSL version
|
||||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = ssladapter.SSLAdapter(
|
||||
pool_connections=num_pools
|
||||
)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
||||
# version detection needs to be after unix adapter mounting
|
||||
if version is None:
|
||||
self._version = DEFAULT_DOCKER_API_VERSION
|
||||
elif isinstance(version, six.string_types):
|
||||
if version.lower() == 'auto':
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
else:
|
||||
raise DockerException(
|
||||
'Version parameter must be a string or None. Found {0}'.format(
|
||||
type(version).__name__
|
||||
)
|
||||
)
|
||||
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
|
||||
warnings.warn(
|
||||
'The minimum API version supported is {}, but you are using '
|
||||
'version {}. It is recommended you either upgrade Docker '
|
||||
'Engine or use an older version of docker-py.'.format(
|
||||
MINIMUM_DOCKER_API_VERSION, self._version)
|
||||
)
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
return self.version(api_version=False)["ApiVersion"]
|
||||
except KeyError:
|
||||
raise DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
)
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}'.format(e)
|
||||
)
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self.timeout)
|
||||
return kwargs
|
||||
|
||||
@update_headers
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _put(self, url, **kwargs):
|
||||
return self.put(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, six.string_types):
|
||||
raise ValueError(
|
||||
'Expected a string but found {0} ({1}) '
|
||||
'instead'.format(arg, type(arg))
|
||||
)
|
||||
|
||||
quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
self.base_url, self._version, pathfmt.format(*args)
|
||||
)
|
||||
else:
|
||||
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
|
||||
|
||||
def _raise_for_status(self, response):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise create_api_error_from_http_exception(e)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 can't unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None:
|
||||
for k, v in six.iteritems(data):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
@check_resource
|
||||
def _attach_websocket(self, container, params=None):
|
||||
url = self._url("/containers/{0}/attach/ws", container)
|
||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||
full_url = req.prepare().url
|
||||
full_url = full_url.replace("http://", "ws://", 1)
|
||||
full_url = full_url.replace("https://", "wss://", 1)
|
||||
return self._create_websocket_connection(full_url)
|
||||
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
if self.base_url == "http+docker://localnpipe":
|
||||
sock = response.raw._fp.fp.raw.sock
|
||||
elif six.PY3:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
else:
|
||||
sock = response.raw._fp.fp._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
# close TLS sockets.
|
||||
sock._response = response
|
||||
except AttributeError:
|
||||
# UNIX sockets can't have attributes set on them, but that's
|
||||
# fine because we won't be doing TLS over them
|
||||
pass
|
||||
|
||||
return sock
|
||||
|
||||
def _stream_helper(self, response, decode=False):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
if response.raw._fp.chunked:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
# this read call will block until we get a chunk
|
||||
data = reader.read(1)
|
||||
if not data:
|
||||
break
|
||||
if reader._fp.chunk_left:
|
||||
data += reader.read(reader._fp.chunk_left)
|
||||
if decode:
|
||||
if six.PY3:
|
||||
data = data.decode('utf-8')
|
||||
# remove the trailing newline
|
||||
data = data.strip()
|
||||
# split the data at any newlines
|
||||
data_list = data.split("\r\n")
|
||||
# load and yield each line seperately
|
||||
for data in data_list:
|
||||
data = json.loads(data)
|
||||
yield data
|
||||
else:
|
||||
yield data
|
||||
else:
|
||||
# Response isn't chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
walker = 0
|
||||
while True:
|
||||
if len(buf[walker:]) < 8:
|
||||
break
|
||||
_, length = struct.unpack_from('>BxxxL', buf[walker:])
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_response_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
stream."""
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
while True:
|
||||
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
_, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
continue
|
||||
data = response.raw.read(length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _stream_raw_result_old(self, response):
|
||||
''' Stream raw output for API versions below 1.6 '''
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1,
|
||||
decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line
|
||||
|
||||
def _stream_raw_result(self, response):
|
||||
''' Stream result for TTY-enabled container above API 1.6 '''
|
||||
self._raise_for_status(response)
|
||||
for out in response.iter_content(chunk_size=1, decode_unicode=True):
|
||||
yield out
|
||||
|
||||
def _read_from_socket(self, response, stream):
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
if stream:
|
||||
return frames_iter(socket)
|
||||
else:
|
||||
return six.binary_type().join(frames_iter(socket))
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we're
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
may or may not exist; or we may need to just settimeout on socket
|
||||
itself, which also may or may not have settimeout on it. To avoid
|
||||
missing the correct one, we try both.
|
||||
|
||||
We also do not want to set the timeout if it is already disabled, as
|
||||
you run the risk of changing a socket that was non-blocking to
|
||||
blocking, for example when using gevent.
|
||||
"""
|
||||
sockets = [socket, getattr(socket, '_sock', None)]
|
||||
|
||||
for s in sockets:
|
||||
if not hasattr(s, 'settimeout'):
|
||||
continue
|
||||
|
||||
timeout = -1
|
||||
|
||||
if hasattr(s, 'gettimeout'):
|
||||
timeout = s.gettimeout()
|
||||
|
||||
# Don't change the timeout if it is already disabled.
|
||||
if timeout is None or timeout == 0.0:
|
||||
continue
|
||||
|
||||
s.settimeout(None)
|
||||
|
||||
def _get_result(self, container, stream, res):
|
||||
cont = self.inspect_container(container)
|
||||
return self._get_result_tty(stream, res, cont['Config']['Tty'])
|
||||
|
||||
def _get_result_tty(self, stream, res, is_tty):
|
||||
# Stream multi-plexing was only introduced in API v1.6. Anything
|
||||
# before that needs old-style streaming.
|
||||
if utils.compare_version('1.6', self._version) < 0:
|
||||
return self._stream_raw_result_old(res)
|
||||
|
||||
# We should also use raw streaming (without keep-alives)
|
||||
# if we're dealing with a tty-enabled container.
|
||||
if is_tty:
|
||||
return self._stream_raw_result(res) if stream else \
|
||||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = six.binary_type()
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(APIClient, self).get_adapter(url)
|
||||
except requests.exceptions.InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
else:
|
||||
raise e
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self._version
|
||||
|
|
@ -11,6 +11,30 @@ class ContainerApiMixin(object):
|
|||
@utils.check_resource
|
||||
def attach(self, container, stdout=True, stderr=True,
|
||||
stream=False, logs=False):
|
||||
"""
|
||||
Attach to a container.
|
||||
|
||||
The ``.logs()`` function is a wrapper around this method, which you can
|
||||
use instead if you want to fetch/stream container output without first
|
||||
retrieving the entire backlog.
|
||||
|
||||
Args:
|
||||
container (str): The container to attach to.
|
||||
stdout (bool): Include stdout.
|
||||
stderr (bool): Include stderr.
|
||||
stream (bool): Return container output progressively as an iterator
|
||||
of strings, rather than a single string.
|
||||
logs (bool): Include the container's previous output.
|
||||
|
||||
Returns:
|
||||
By default, the container's output as a single string.
|
||||
|
||||
If ``stream=True``, an iterator of output strings.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'logs': logs and 1 or 0,
|
||||
'stdout': stdout and 1 or 0,
|
||||
|
|
@ -30,6 +54,20 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def attach_socket(self, container, params=None, ws=False):
|
||||
"""
|
||||
Like ``attach``, but returns the underlying socket-like object for the
|
||||
HTTP request.
|
||||
|
||||
Args:
|
||||
container (str): The container to attach to.
|
||||
params (dict): Dictionary of request parameters (e.g. ``stdout``,
|
||||
``stderr``, ``stream``).
|
||||
ws (bool): Use websockets instead of raw HTTP.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if params is None:
|
||||
params = {
|
||||
'stdout': 1,
|
||||
|
|
@ -56,6 +94,26 @@ class ContainerApiMixin(object):
|
|||
@utils.check_resource
|
||||
def commit(self, container, repository=None, tag=None, message=None,
|
||||
author=None, changes=None, conf=None):
|
||||
"""
|
||||
Commit a container to an image. Similar to the ``docker commit``
|
||||
command.
|
||||
|
||||
Args:
|
||||
container (str): The image hash of the container
|
||||
repository (str): The repository to push the image to
|
||||
tag (str): The tag to push
|
||||
message (str): A commit message
|
||||
author (str): The name of the author
|
||||
changes (str): Dockerfile instructions to apply while committing
|
||||
conf (dict): The configuration for the container. See the
|
||||
`Remote API documentation
|
||||
<https://docs.docker.com/reference/api/docker_remote_api/>`_
|
||||
for full details.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'container': container,
|
||||
'repo': repository,
|
||||
|
|
@ -71,6 +129,50 @@ class ContainerApiMixin(object):
|
|||
def containers(self, quiet=False, all=False, trunc=False, latest=False,
|
||||
since=None, before=None, limit=-1, size=False,
|
||||
filters=None):
|
||||
"""
|
||||
List containers. Similar to the ``docker ps`` command.
|
||||
|
||||
Args:
|
||||
quiet (bool): Only display numeric Ids
|
||||
all (bool): Show all containers. Only running containers are shown
|
||||
by default trunc (bool): Truncate output
|
||||
latest (bool): Show only the latest created container, include
|
||||
non-running ones.
|
||||
since (str): Show only containers created since Id or Name, include
|
||||
non-running ones
|
||||
before (str): Show only container created before Id or Name,
|
||||
include non-running ones
|
||||
limit (int): Show `limit` last created containers, include
|
||||
non-running ones
|
||||
size (bool): Display sizes
|
||||
filters (dict): Filters to be processed on the image list.
|
||||
Available filters:
|
||||
|
||||
- `exited` (int): Only containers with specified exit code
|
||||
- `status` (str): One of ``restarting``, ``running``,
|
||||
``paused``, ``exited``
|
||||
- `label` (str): format either ``"key"`` or ``"key=value"``
|
||||
- `id` (str): The id of the container.
|
||||
- `name` (str): The name of the container.
|
||||
- `ancestor` (str): Filter by container ancestor. Format of
|
||||
``<image-name>[:tag]``, ``<image-id>``, or
|
||||
``<image@digest>``.
|
||||
- `before` (str): Only containers created before a particular
|
||||
container. Give the container name or id.
|
||||
- `since` (str): Only containers created after a particular
|
||||
container. Give container name or id.
|
||||
|
||||
A comprehensive list can be found in the documentation for
|
||||
`docker ps
|
||||
<https://docs.docker.com/engine/reference/commandline/ps>`_.
|
||||
|
||||
Returns:
|
||||
A list of dicts, one per container
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'limit': 1 if latest else limit,
|
||||
'all': 1 if all else 0,
|
||||
|
|
@ -93,6 +195,24 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def copy(self, container, resource):
|
||||
"""
|
||||
Identical to the ``docker cp`` command. Get files/folders from the
|
||||
container.
|
||||
|
||||
**Deprecated for API version >= 1.20.** Use
|
||||
:py:meth:`~ContainerApiMixin.get_archive` instead.
|
||||
|
||||
Args:
|
||||
container (str): The container to copy from
|
||||
resource (str): The path within the container
|
||||
|
||||
Returns:
|
||||
The contents of the file as a string
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if utils.version_gte(self._version, '1.20'):
|
||||
warnings.warn(
|
||||
'Client.copy() is deprecated for API version >= 1.20, '
|
||||
|
|
@ -117,7 +237,190 @@ class ContainerApiMixin(object):
|
|||
mac_address=None, labels=None, volume_driver=None,
|
||||
stop_signal=None, networking_config=None,
|
||||
healthcheck=None):
|
||||
"""
|
||||
Creates a container. Parameters are similar to those for the ``docker
|
||||
run`` command except it doesn't support the attach options (``-a``).
|
||||
|
||||
The arguments that are passed directly to this function are
|
||||
host-independent configuration options. Host-specific configuration
|
||||
is passed with the `host_config` argument. You'll normally want to
|
||||
use this method in combination with the :py:meth:`create_host_config`
|
||||
method to generate ``host_config``.
|
||||
|
||||
**Port bindings**
|
||||
|
||||
Port binding is done in two parts: first, provide a list of ports to
|
||||
open inside the container with the ``ports`` parameter, then declare
|
||||
bindings with the ``host_config`` parameter. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', ports=[1111, 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
1111: 4567,
|
||||
2222: None
|
||||
})
|
||||
)
|
||||
|
||||
|
||||
You can limit the host address on which the port will be exposed like
|
||||
such:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
|
||||
|
||||
Or without host port assignment:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
|
||||
|
||||
If you wish to use UDP instead of TCP (default), you need to declare
|
||||
ports as such in both the config and host config:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
'1111/udp': 4567, 2222: None
|
||||
})
|
||||
)
|
||||
|
||||
To bind multiple host ports to a single container port, use the
|
||||
following syntax:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={
|
||||
1111: [1234, 4567]
|
||||
})
|
||||
|
||||
You can also bind multiple IPs to a single container port:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={
|
||||
1111: [
|
||||
('192.168.0.100', 1234),
|
||||
('192.168.0.101', 1234)
|
||||
]
|
||||
})
|
||||
|
||||
**Using volumes**
|
||||
|
||||
Volume declaration is done in two parts. Provide a list of mountpoints
|
||||
to the with the ``volumes`` parameter, and declare mappings in the
|
||||
``host_config`` section.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds={
|
||||
'/home/user1/': {
|
||||
'bind': '/mnt/vol2',
|
||||
'mode': 'rw',
|
||||
},
|
||||
'/var/www': {
|
||||
'bind': '/mnt/vol1',
|
||||
'mode': 'ro',
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
You can alternatively specify binds as a list. This code is equivalent
|
||||
to the example above:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds=[
|
||||
'/home/user1/:/mnt/vol2',
|
||||
'/var/www:/mnt/vol1:ro',
|
||||
])
|
||||
)
|
||||
|
||||
**Networking**
|
||||
|
||||
You can specify networks to connect the container to by using the
|
||||
``networking_config`` parameter. At the time of creation, you can
|
||||
only connect a container to a single networking, but you
|
||||
can create more connections by using
|
||||
:py:meth:`~connect_container_to_network`.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config(
|
||||
ipv4_address='172.28.0.124',
|
||||
aliases=['foo', 'bar'],
|
||||
links=['container2']
|
||||
)
|
||||
})
|
||||
|
||||
ctnr = docker_client.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
|
||||
Args:
|
||||
image (str): The image to run
|
||||
command (str or list): The command to be run in the container
|
||||
hostname (str): Optional hostname for the container
|
||||
user (str or int): Username or UID
|
||||
detach (bool): Detached mode: run container in the background and
|
||||
return container ID
|
||||
stdin_open (bool): Keep STDIN open even if not attached
|
||||
tty (bool): Allocate a pseudo-TTY
|
||||
mem_limit (float or str): Memory limit. Accepts float values (which
|
||||
represent the memory limit of the created container in bytes)
|
||||
or a string with a units identification char (``100000b``,
|
||||
``1000k``, ``128m``, ``1g``). If a string is specified without
|
||||
a units character, bytes are assumed as an intended unit.
|
||||
ports (list of ints): A list of port numbers
|
||||
environment (dict or list): A dictionary or a list of strings in
|
||||
the following format ``["PASSWORD=xxx"]`` or
|
||||
``{"PASSWORD": "xxx"}``.
|
||||
dns (list): DNS name servers. Deprecated since API version 1.10.
|
||||
Use ``host_config`` instead.
|
||||
dns_opt (list): Additional options to be added to the container's
|
||||
``resolv.conf`` file
|
||||
volumes (str or list):
|
||||
volumes_from (list): List of container names or Ids to get
|
||||
volumes from.
|
||||
network_disabled (bool): Disable networking
|
||||
name (str): A name for the container
|
||||
entrypoint (str or list): An entrypoint
|
||||
working_dir (str): Path to the working directory
|
||||
domainname (str or list): Set custom DNS search domains
|
||||
memswap_limit (int):
|
||||
host_config (dict): A dictionary created with
|
||||
:py:meth:`create_host_config`.
|
||||
mac_address (str): The Mac Address to assign the container
|
||||
labels (dict or list): A dictionary of name-value labels (e.g.
|
||||
``{"label1": "value1", "label2": "value2"}``) or a list of
|
||||
names of labels to set with empty values (e.g.
|
||||
``["label1", "label2"]``)
|
||||
volume_driver (str): The name of a volume driver/plugin.
|
||||
stop_signal (str): The stop signal to use to stop the container
|
||||
(e.g. ``SIGINT``).
|
||||
networking_config (dict): A networking configuration generated
|
||||
by :py:meth:`create_networking_config`.
|
||||
|
||||
Returns:
|
||||
A dictionary with an image 'Id' key and a 'Warnings' key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.ImageNotFound`
|
||||
If the specified image does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(volumes, six.string_types):
|
||||
volumes = [volumes, ]
|
||||
|
||||
|
|
@ -147,6 +450,130 @@ class ContainerApiMixin(object):
|
|||
return self._result(res, True)
|
||||
|
||||
def create_host_config(self, *args, **kwargs):
|
||||
"""
|
||||
Create a dictionary for the ``host_config`` argument to
|
||||
:py:meth:`create_container`.
|
||||
|
||||
Args:
|
||||
binds (dict): Volumes to bind. See :py:meth:`create_container`
|
||||
for more information.
|
||||
blkio_weight_device: Block IO weight (relative device weight) in
|
||||
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
|
||||
blkio_weight: Block IO weight (relative weight), accepts a weight
|
||||
value between 10 and 1000.
|
||||
cap_add (list of str): Add kernel capabilities. For example,
|
||||
``["SYS_ADMIN", "MKNOD"]``.
|
||||
cap_drop (list of str): Drop kernel capabilities.
|
||||
cpu_group (int): The length of a CPU period in microseconds.
|
||||
cpu_period (int): Microseconds of CPU time that the container can
|
||||
get in a CPU period.
|
||||
cpu_shares (int): CPU shares (relative weight).
|
||||
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
|
||||
``0,1``).
|
||||
device_read_bps: Limit read rate (bytes per second) from a device
|
||||
in the form of: `[{"Path": "device_path", "Rate": rate}]`
|
||||
device_read_iops: Limit read rate (IO per second) from a device.
|
||||
device_write_bps: Limit write rate (bytes per second) from a
|
||||
device.
|
||||
device_write_iops: Limit write rate (IO per second) from a device.
|
||||
devices (list): Expose host devices to the container, as a list
|
||||
of strings in the form
|
||||
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
|
||||
|
||||
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
|
||||
to have read-write access to the host's ``/dev/sda`` via a
|
||||
node named ``/dev/xvda`` inside the container.
|
||||
dns (list): Set custom DNS servers.
|
||||
dns_search (list): DNS search domains.
|
||||
extra_hosts (dict): Addtional hostnames to resolve inside the
|
||||
container, as a mapping of hostname to IP address.
|
||||
group_add (list): List of additional group names and/or IDs that
|
||||
the container process will run as.
|
||||
ipc_mode (str): Set the IPC mode for the container.
|
||||
isolation (str): Isolation technology to use. Default: `None`.
|
||||
links (dict or list of tuples): Either a dictionary mapping name
|
||||
to alias or as a list of ``(name, alias)`` tuples.
|
||||
log_config (dict): Logging configuration, as a dictionary with
|
||||
keys:
|
||||
|
||||
- ``type`` The logging driver name.
|
||||
- ``config`` A dictionary of configuration for the logging
|
||||
driver.
|
||||
|
||||
lxc_conf (dict): LXC config.
|
||||
mem_limit (float or str): Memory limit. Accepts float values
|
||||
(which represent the memory limit of the created container in
|
||||
bytes) or a string with a units identification char
|
||||
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
|
||||
specified without a units character, bytes are assumed as an
|
||||
mem_swappiness (int): Tune a container's memory swappiness
|
||||
behavior. Accepts number between 0 and 100.
|
||||
memswap_limit (str or int): Maximum amount of memory + swap a
|
||||
container is allowed to consume.
|
||||
network_mode (str): One of:
|
||||
|
||||
- ``bridge`` Create a new network stack for the container on
|
||||
on the bridge network.
|
||||
- ``none`` No networking for this container.
|
||||
- ``container:<name|id>`` Reuse another container's network
|
||||
stack.
|
||||
- ``host`` Use the host network stack.
|
||||
oom_kill_disable (bool): Whether to disable OOM killer.
|
||||
oom_score_adj (int): An integer value containing the score given
|
||||
to the container in order to tune OOM killer preferences.
|
||||
pid_mode (str): If set to ``host``, use the host PID namespace
|
||||
inside the container.
|
||||
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
|
||||
unlimited.
|
||||
port_bindings (dict): See :py:meth:`create_container`
|
||||
for more information.
|
||||
privileged (bool): Give extended privileges to this container.
|
||||
publish_all_ports (bool): Publish all ports to the host.
|
||||
read_only (bool): Mount the container's root filesystem as read
|
||||
only.
|
||||
restart_policy (dict): Restart the container when it exits.
|
||||
Configured as a dictionary with keys:
|
||||
|
||||
- ``Name`` One of ``on-failure``, or ``always``.
|
||||
- ``MaximumRetryCount`` Number of times to restart the
|
||||
container on failure.
|
||||
security_opt (list): A list of string values to customize labels
|
||||
for MLS systems, such as SELinux.
|
||||
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
|
||||
sysctls (dict): Kernel parameters to set in the container.
|
||||
tmpfs (dict): Temporary filesystems to mount, as a dictionary
|
||||
mapping a path inside the container to options for that path.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
'/mnt/vol2': '',
|
||||
'/mnt/vol1': 'size=3G,uid=1000'
|
||||
}
|
||||
|
||||
ulimits (list): Ulimits to set inside the container, as a list of
|
||||
dicts.
|
||||
userns_mode (str): Sets the user namespace mode for the container
|
||||
when user namespace remapping option is enabled. Supported
|
||||
values are: ``host``
|
||||
volumes_from (list): List of container names or IDs to get
|
||||
volumes from.
|
||||
|
||||
|
||||
Returns:
|
||||
(dict) A dictionary which can be passed to the ``host_config``
|
||||
argument to :py:meth:`create_container`.
|
||||
|
||||
Example:
|
||||
|
||||
>>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
|
||||
volumes_from=['nostalgic_newton'])
|
||||
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
|
||||
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
|
||||
|
||||
"""
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
if 'version' in kwargs:
|
||||
|
|
@ -158,19 +585,98 @@ class ContainerApiMixin(object):
|
|||
return utils.create_host_config(*args, **kwargs)
|
||||
|
||||
def create_networking_config(self, *args, **kwargs):
|
||||
"""
|
||||
Create a networking config dictionary to be used as the
|
||||
``networking_config`` parameter in :py:meth:`create_container`.
|
||||
|
||||
Args:
|
||||
endpoints_config (dict): A dictionary mapping network names to
|
||||
endpoint configurations generated by
|
||||
:py:meth:`create_endpoint_config`.
|
||||
|
||||
Returns:
|
||||
(dict) A networking config.
|
||||
|
||||
Example:
|
||||
|
||||
>>> docker_client.create_network('network1')
|
||||
>>> networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config()
|
||||
})
|
||||
>>> container = docker_client.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
|
||||
"""
|
||||
return create_networking_config(*args, **kwargs)
|
||||
|
||||
def create_endpoint_config(self, *args, **kwargs):
|
||||
"""
|
||||
Create an endpoint config dictionary to be used with
|
||||
:py:meth:`create_networking_config`.
|
||||
|
||||
Args:
|
||||
aliases (list): A list of aliases for this endpoint. Names in
|
||||
that list can be used within the network to reach the
|
||||
container. Defaults to ``None``.
|
||||
links (list): A list of links for this endpoint. Containers
|
||||
declared in this list will be linked to this container.
|
||||
Defaults to ``None``.
|
||||
ipv4_address (str): The IP address of this container on the
|
||||
network, using the IPv4 protocol. Defaults to ``None``.
|
||||
ipv6_address (str): The IP address of this container on the
|
||||
network, using the IPv6 protocol. Defaults to ``None``.
|
||||
link_local_ips (list): A list of link-local (IPv4/IPv6)
|
||||
addresses.
|
||||
|
||||
Returns:
|
||||
(dict) An endpoint config.
|
||||
|
||||
Example:
|
||||
|
||||
>>> endpoint_config = client.create_endpoint_config(
|
||||
aliases=['web', 'app'],
|
||||
links=['app_db'],
|
||||
ipv4_address='132.65.0.123'
|
||||
)
|
||||
|
||||
"""
|
||||
return create_endpoint_config(self._version, *args, **kwargs)
|
||||
|
||||
@utils.check_resource
|
||||
def diff(self, container):
|
||||
"""
|
||||
Inspect changes on a container's filesystem.
|
||||
|
||||
Args:
|
||||
container (str): The container to diff
|
||||
|
||||
Returns:
|
||||
(str)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(
|
||||
self._get(self._url("/containers/{0}/changes", container)), True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
def export(self, container):
|
||||
"""
|
||||
Export the contents of a filesystem as a tar archive.
|
||||
|
||||
Args:
|
||||
container (str): The container to export
|
||||
|
||||
Returns:
|
||||
(str): The filesystem tar archive
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
res = self._get(
|
||||
self._url("/containers/{0}/export", container), stream=True
|
||||
)
|
||||
|
|
@ -180,6 +686,22 @@ class ContainerApiMixin(object):
|
|||
@utils.check_resource
|
||||
@utils.minimum_version('1.20')
|
||||
def get_archive(self, container, path):
|
||||
"""
|
||||
Retrieve a file or folder from a container in the form of a tar
|
||||
archive.
|
||||
|
||||
Args:
|
||||
container (str): The container where the file is located
|
||||
path (str): Path to the file or folder to retrieve
|
||||
|
||||
Returns:
|
||||
(tuple): First element is a raw tar data stream. Second element is
|
||||
a dict containing ``stat`` information on the specified ``path``.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'path': path
|
||||
}
|
||||
|
|
@ -194,12 +716,37 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def inspect_container(self, container):
|
||||
"""
|
||||
Identical to the `docker inspect` command, but only for containers.
|
||||
|
||||
Args:
|
||||
container (str): The container to inspect
|
||||
|
||||
Returns:
|
||||
(dict): Similar to the output of `docker inspect`, but as a
|
||||
single dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(
|
||||
self._get(self._url("/containers/{0}/json", container)), True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
def kill(self, container, signal=None):
|
||||
"""
|
||||
Kill a container or send a signal to a container.
|
||||
|
||||
Args:
|
||||
container (str): The container to kill
|
||||
signal (str or int): The signal to send. Defaults to ``SIGKILL``
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/containers/{0}/kill", container)
|
||||
params = {}
|
||||
if signal is not None:
|
||||
|
|
@ -213,6 +760,32 @@ class ContainerApiMixin(object):
|
|||
@utils.check_resource
|
||||
def logs(self, container, stdout=True, stderr=True, stream=False,
|
||||
timestamps=False, tail='all', since=None, follow=None):
|
||||
"""
|
||||
Get logs from a container. Similar to the ``docker logs`` command.
|
||||
|
||||
The ``stream`` parameter makes the ``logs`` function return a blocking
|
||||
generator you can iterate over to retrieve log output as it happens.
|
||||
|
||||
Args:
|
||||
container (str): The container to get logs from
|
||||
stdout (bool): Get ``STDOUT``
|
||||
stderr (bool): Get ``STDERR``
|
||||
stream (bool): Stream the response
|
||||
timestamps (bool): Show timestamps
|
||||
tail (str or int): Output specified number of lines at the end of
|
||||
logs. Either an integer of number of lines or the string
|
||||
``all``. Default ``all``
|
||||
since (datetime or int): Show logs since a given datetime or
|
||||
integer epoch (in seconds)
|
||||
follow (bool): Follow log output
|
||||
|
||||
Returns:
|
||||
(generator or str)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if utils.compare_version('1.11', self._version) >= 0:
|
||||
if follow is None:
|
||||
follow = stream
|
||||
|
|
@ -249,12 +822,48 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def pause(self, container):
|
||||
"""
|
||||
Pauses all processes within a container.
|
||||
|
||||
Args:
|
||||
container (str): The container to pause
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/containers/{0}/pause', container)
|
||||
res = self._post(url)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
def port(self, container, private_port):
|
||||
"""
|
||||
Lookup the public-facing port that is NAT-ed to ``private_port``.
|
||||
Identical to the ``docker port`` command.
|
||||
|
||||
Args:
|
||||
container (str): The container to look up
|
||||
private_port (int): The private port to inspect
|
||||
|
||||
Returns:
|
||||
(list of dict): The mapping for the host ports
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
|
||||
7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> cli.port('7174d6347063', 80)
|
||||
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
|
||||
"""
|
||||
res = self._get(self._url("/containers/{0}/json", container))
|
||||
self._raise_for_status(res)
|
||||
json_ = res.json()
|
||||
|
|
@ -279,6 +888,26 @@ class ContainerApiMixin(object):
|
|||
@utils.check_resource
|
||||
@utils.minimum_version('1.20')
|
||||
def put_archive(self, container, path, data):
|
||||
"""
|
||||
Insert a file or folder in an existing container using a tar archive as
|
||||
source.
|
||||
|
||||
Args:
|
||||
container (str): The container where the file(s) will be extracted
|
||||
path (str): Path inside the container where the file(s) will be
|
||||
extracted. Must exist.
|
||||
data (bytes): tar data to be extracted
|
||||
|
||||
Returns:
|
||||
(bool): True if the call succeeds.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Raises:
|
||||
:py:class:`~docker.errors.APIError` If an error occurs.
|
||||
"""
|
||||
params = {'path': path}
|
||||
url = self._url('/containers/{0}/archive', container)
|
||||
res = self._put(url, params=params, data=data)
|
||||
|
|
@ -287,6 +916,21 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def remove_container(self, container, v=False, link=False, force=False):
|
||||
"""
|
||||
Remove a container. Similar to the ``docker rm`` command.
|
||||
|
||||
Args:
|
||||
container (str): The container to remove
|
||||
v (bool): Remove the volumes associated with the container
|
||||
link (bool): Remove the specified link and not the underlying
|
||||
container
|
||||
force (bool): Force the removal of a running container (uses
|
||||
``SIGKILL``)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'v': v, 'link': link, 'force': force}
|
||||
res = self._delete(
|
||||
self._url("/containers/{0}", container), params=params
|
||||
|
|
@ -296,6 +940,17 @@ class ContainerApiMixin(object):
|
|||
@utils.minimum_version('1.17')
|
||||
@utils.check_resource
|
||||
def rename(self, container, name):
|
||||
"""
|
||||
Rename a container. Similar to the ``docker rename`` command.
|
||||
|
||||
Args:
|
||||
container (str): ID of the container to rename
|
||||
name (str): New name for the container
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/containers/{0}/rename", container)
|
||||
params = {'name': name}
|
||||
res = self._post(url, params=params)
|
||||
|
|
@ -303,6 +958,18 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def resize(self, container, height, width):
|
||||
"""
|
||||
Resize the tty session.
|
||||
|
||||
Args:
|
||||
container (str or dict): The container to resize
|
||||
height (int): Height of tty session
|
||||
width (int): Width of tty session
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'h': height, 'w': width}
|
||||
url = self._url("/containers/{0}/resize", container)
|
||||
res = self._post(url, params=params)
|
||||
|
|
@ -310,6 +977,20 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def restart(self, container, timeout=10):
|
||||
"""
|
||||
Restart a container. Similar to the ``docker restart`` command.
|
||||
|
||||
Args:
|
||||
container (str or dict): The container to restart. If a dict, the
|
||||
``Id`` key is used.
|
||||
timeout (int): Number of seconds to try to stop for before killing
|
||||
the container. Once killed it will then be restarted. Default
|
||||
is 10 seconds.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'t': timeout}
|
||||
url = self._url("/containers/{0}/restart", container)
|
||||
res = self._post(url, params=params)
|
||||
|
|
@ -322,7 +1003,28 @@ class ContainerApiMixin(object):
|
|||
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
|
||||
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
|
||||
security_opt=None, ulimits=None):
|
||||
"""
|
||||
Start a container. Similar to the ``docker start`` command, but
|
||||
doesn't support attach options.
|
||||
|
||||
**Deprecation warning:** For API version > 1.15, it is highly
|
||||
recommended to provide host config options in the ``host_config``
|
||||
parameter of :py:meth:`~ContainerApiMixin.create_container`.
|
||||
|
||||
Args:
|
||||
container (str): The container to start
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> container = cli.create_container(
|
||||
... image='busybox:latest',
|
||||
... command='/bin/sleep 30')
|
||||
>>> cli.start(container=container.get('Id'))
|
||||
"""
|
||||
if utils.compare_version('1.10', self._version) < 0:
|
||||
if dns is not None:
|
||||
raise errors.InvalidVersion(
|
||||
|
|
@ -386,6 +1088,22 @@ class ContainerApiMixin(object):
|
|||
@utils.minimum_version('1.17')
|
||||
@utils.check_resource
|
||||
def stats(self, container, decode=None, stream=True):
|
||||
"""
|
||||
Stream statistics for a specific container. Similar to the
|
||||
``docker stats`` command.
|
||||
|
||||
Args:
|
||||
container (str): The container to stream statistics from
|
||||
decode (bool): If set to true, stream will be decoded into dicts
|
||||
on the fly. False by default.
|
||||
stream (bool): If set to false, only the current stats will be
|
||||
returned instead of a stream. True by default.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
"""
|
||||
url = self._url("/containers/{0}/stats", container)
|
||||
if stream:
|
||||
return self._stream_helper(self._get(url, stream=True),
|
||||
|
|
@ -396,6 +1114,18 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def stop(self, container, timeout=10):
|
||||
"""
|
||||
Stops a container. Similar to the ``docker stop`` command.
|
||||
|
||||
Args:
|
||||
container (str): The container to stop
|
||||
timeout (int): Timeout in seconds to wait for the container to
|
||||
stop before sending a ``SIGKILL``. Default: 10
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'t': timeout}
|
||||
url = self._url("/containers/{0}/stop", container)
|
||||
|
||||
|
|
@ -405,6 +1135,20 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def top(self, container, ps_args=None):
|
||||
"""
|
||||
Display the running processes of a container.
|
||||
|
||||
Args:
|
||||
container (str): The container to inspect
|
||||
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
|
||||
|
||||
Returns:
|
||||
(str): The output of the top
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
u = self._url("/containers/{0}/top", container)
|
||||
params = {}
|
||||
if ps_args is not None:
|
||||
|
|
@ -413,6 +1157,12 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def unpause(self, container):
|
||||
"""
|
||||
Unpause all processes within a container.
|
||||
|
||||
Args:
|
||||
container (str): The container to unpause
|
||||
"""
|
||||
url = self._url('/containers/{0}/unpause', container)
|
||||
res = self._post(url)
|
||||
self._raise_for_status(res)
|
||||
|
|
@ -425,6 +1175,31 @@ class ContainerApiMixin(object):
|
|||
mem_reservation=None, memswap_limit=None, kernel_memory=None,
|
||||
restart_policy=None
|
||||
):
|
||||
"""
|
||||
Update resource configs of one or more containers.
|
||||
|
||||
Args:
|
||||
container (str): The container to inspect
|
||||
blkio_weight (int): Block IO (relative weight), between 10 and 1000
|
||||
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
|
||||
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
|
||||
cpu_shares (int): CPU shares (relative weight)
|
||||
cpuset_cpus (str): CPUs in which to allow execution
|
||||
cpuset_mems (str): MEMs in which to allow execution
|
||||
mem_limit (int or str): Memory limit
|
||||
mem_reservation (int or str): Memory soft limit
|
||||
memswap_limit (int or str): Total memory (memory + swap), -1 to
|
||||
disable swap
|
||||
kernel_memory (int or str): Kernel memory limit
|
||||
restart_policy (dict): Restart policy dictionary
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary containing a ``Warnings`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/containers/{0}/update', container)
|
||||
data = {}
|
||||
if blkio_weight:
|
||||
|
|
@ -460,6 +1235,25 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def wait(self, container, timeout=None):
|
||||
"""
|
||||
Block until a container stops, then return its exit code. Similar to
|
||||
the ``docker wait`` command.
|
||||
|
||||
Args:
|
||||
container (str or dict): The container to wait on. If a dict, the
|
||||
``Id`` key is used.
|
||||
timeout (int): Request timeout
|
||||
|
||||
Returns:
|
||||
(int): The exit code of the container. Returns ``-1`` if the API
|
||||
responds without a ``StatusCode`` attribute.
|
||||
|
||||
Raises:
|
||||
:py:class:`requests.exceptions.ReadTimeout`
|
||||
If the timeout is exceeded.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/containers/{0}/wait", container)
|
||||
res = self._post(url, timeout=timeout)
|
||||
self._raise_for_status(res)
|
||||
|
|
|
|||
|
|
@ -2,13 +2,42 @@ import os
|
|||
import warnings
|
||||
from datetime import datetime
|
||||
|
||||
from ..auth import auth
|
||||
from .. import auth, utils
|
||||
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
|
||||
from ..utils import utils
|
||||
|
||||
|
||||
class DaemonApiMixin(object):
|
||||
def events(self, since=None, until=None, filters=None, decode=None):
|
||||
"""
|
||||
Get real-time events from the server. Similar to the ``docker events``
|
||||
command.
|
||||
|
||||
Args:
|
||||
since (UTC datetime or int): Get events from this point
|
||||
until (UTC datetime or int): Get events until this point
|
||||
filters (dict): Filter the events by event time, container or image
|
||||
decode (bool): If set to true, stream will be decoded into dicts on
|
||||
the fly. False by default.
|
||||
|
||||
Returns:
|
||||
(generator): A blocking generator you can iterate over to retrieve
|
||||
events as they happen.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> for event in client.events()
|
||||
... print event
|
||||
{u'from': u'image/with:tag',
|
||||
u'id': u'container-id',
|
||||
u'status': u'start',
|
||||
u'time': 1423339459}
|
||||
...
|
||||
"""
|
||||
|
||||
if isinstance(since, datetime):
|
||||
since = utils.datetime_to_timestamp(since)
|
||||
|
||||
|
|
@ -30,10 +59,42 @@ class DaemonApiMixin(object):
|
|||
)
|
||||
|
||||
def info(self):
|
||||
"""
|
||||
Display system-wide information. Identical to the ``docker info``
|
||||
command.
|
||||
|
||||
Returns:
|
||||
(dict): The info as a dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url("/info")), True)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False, insecure_registry=False, dockercfg_path=None):
|
||||
"""
|
||||
Authenticate with a registry. Similar to the ``docker login`` command.
|
||||
|
||||
Args:
|
||||
username (str): The registry username
|
||||
password (str): The plaintext password
|
||||
email (str): The email for the registry account
|
||||
registry (str): URL to the registry. E.g.
|
||||
``https://index.docker.io/v1/``
|
||||
reauth (bool): Whether refresh existing authentication on the
|
||||
Docker server.
|
||||
dockercfg_path (str): Use a custom path for the ``.dockercfg`` file
|
||||
(default ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
(dict): The response from the login request
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if insecure_registry:
|
||||
warnings.warn(
|
||||
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
|
||||
|
|
@ -69,8 +130,30 @@ class DaemonApiMixin(object):
|
|||
return self._result(response, json=True)
|
||||
|
||||
def ping(self):
|
||||
return self._result(self._get(self._url('/_ping')))
|
||||
"""
|
||||
Checks the server is responsive. An exception will be raised if it
|
||||
isn't responding.
|
||||
|
||||
Returns:
|
||||
(bool) The response from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url('/_ping'))) == 'OK'
|
||||
|
||||
def version(self, api_version=True):
|
||||
"""
|
||||
Returns version information from the server. Similar to the ``docker
|
||||
version`` command.
|
||||
|
||||
Returns:
|
||||
(dict): The server version information
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/version", versioned_api=api_version)
|
||||
return self._result(self._get(url), json=True)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,28 @@ class ExecApiMixin(object):
|
|||
@utils.check_resource
|
||||
def exec_create(self, container, cmd, stdout=True, stderr=True,
|
||||
stdin=False, tty=False, privileged=False, user=''):
|
||||
"""
|
||||
Sets up an exec instance in a running container.
|
||||
|
||||
Args:
|
||||
container (str): Target container where exec instance will be
|
||||
created
|
||||
cmd (str or list): Command to be executed
|
||||
stdout (bool): Attach to stdout. Default: ``True``
|
||||
stderr (bool): Attach to stderr. Default: ``True``
|
||||
stdin (bool): Attach to stdin. Default: ``False``
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
privileged (bool): Run as privileged.
|
||||
user (str): User to execute command as. Default: root
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary with an exec ``Id`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
if privileged and utils.compare_version('1.19', self._version) < 0:
|
||||
raise errors.InvalidVersion(
|
||||
'Privileged exec is not supported in API < 1.19'
|
||||
|
|
@ -37,6 +59,19 @@ class ExecApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.16')
|
||||
def exec_inspect(self, exec_id):
|
||||
"""
|
||||
Return low-level information about an exec command.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary of values returned by the endpoint.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
res = self._get(self._url("/exec/{0}/json", exec_id))
|
||||
|
|
@ -44,6 +79,15 @@ class ExecApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.15')
|
||||
def exec_resize(self, exec_id, height=None, width=None):
|
||||
"""
|
||||
Resize the tty session used by the specified exec command.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
height (int): Height of tty session
|
||||
width (int): Width of tty session
|
||||
"""
|
||||
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
|
||||
|
|
@ -55,6 +99,24 @@ class ExecApiMixin(object):
|
|||
@utils.minimum_version('1.15')
|
||||
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
|
||||
socket=False):
|
||||
"""
|
||||
Start a previously set up exec instance.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
detach (bool): If true, detach from the exec command.
|
||||
Default: False
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
stream (bool): Stream response data. Default: False
|
||||
|
||||
Returns:
|
||||
(generator or str): If ``stream=True``, a generator yielding
|
||||
response chunks. A string containing response data otherwise.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
# we want opened socket if socket == True
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
import logging
|
||||
import os
|
||||
import six
|
||||
import warnings
|
||||
|
||||
from ..auth import auth
|
||||
import six
|
||||
|
||||
from .. import auth, errors, utils
|
||||
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
|
||||
from .. import utils
|
||||
from .. import errors
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -15,17 +14,71 @@ class ImageApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def get_image(self, image):
|
||||
"""
|
||||
Get a tarball of an image. Similar to the ``docker save`` command.
|
||||
|
||||
Args:
|
||||
image (str): Image name to get
|
||||
|
||||
Returns:
|
||||
(urllib3.response.HTTPResponse object): The response from the
|
||||
daemon.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> image = cli.get_image("fedora:latest")
|
||||
>>> f = open('/tmp/fedora-latest.tar', 'w')
|
||||
>>> f.write(image.data)
|
||||
>>> f.close()
|
||||
"""
|
||||
res = self._get(self._url("/images/{0}/get", image), stream=True)
|
||||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
@utils.check_resource
|
||||
def history(self, image):
|
||||
"""
|
||||
Show the history of an image.
|
||||
|
||||
Args:
|
||||
image (str): The image to show history for
|
||||
|
||||
Returns:
|
||||
(str): The history of the image
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
res = self._get(self._url("/images/{0}/history", image))
|
||||
return self._result(res, True)
|
||||
|
||||
def images(self, name=None, quiet=False, all=False, viz=False,
|
||||
filters=None):
|
||||
"""
|
||||
List images. Similar to the ``docker images`` command.
|
||||
|
||||
Args:
|
||||
name (str): Only show images belonging to the repository ``name``
|
||||
quiet (bool): Only return numeric IDs as a list.
|
||||
all (bool): Show intermediate image layers. By default, these are
|
||||
filtered out.
|
||||
filters (dict): Filters to be processed on the image list.
|
||||
Available filters:
|
||||
- ``dangling`` (bool)
|
||||
- ``label`` (str): format either ``key`` or ``key=value``
|
||||
|
||||
Returns:
|
||||
(dict or list): A list if ``quiet=True``, otherwise a dict.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if viz:
|
||||
if utils.compare_version('1.7', self._version) >= 0:
|
||||
raise Exception('Viz output is not supported in API >= 1.7!')
|
||||
|
|
@ -45,6 +98,25 @@ class ImageApiMixin(object):
|
|||
|
||||
def import_image(self, src=None, repository=None, tag=None, image=None,
|
||||
changes=None, stream_src=False):
|
||||
"""
|
||||
Import an image. Similar to the ``docker import`` command.
|
||||
|
||||
If ``src`` is a string or unicode string, it will first be treated as a
|
||||
path to a tarball on the local system. If there is an error reading
|
||||
from that file, ``src`` will be treated as a URL instead to fetch the
|
||||
image from. You can also pass an open file handle as ``src``, in which
|
||||
case the data will be read from that file.
|
||||
|
||||
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
|
||||
be taken as the name of an existing image to import from.
|
||||
|
||||
Args:
|
||||
src (str or file): Path to tarfile, URL, or file-like object
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
image (str): Use another image like the ``FROM`` Dockerfile
|
||||
parameter
|
||||
"""
|
||||
if not (src or image):
|
||||
raise errors.DockerException(
|
||||
'Must specify src or image to import from'
|
||||
|
|
@ -78,6 +150,16 @@ class ImageApiMixin(object):
|
|||
|
||||
def import_image_from_data(self, data, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
|
||||
allows importing in-memory bytes data.
|
||||
|
||||
Args:
|
||||
data (bytes collection): Bytes collection containing valid tar data
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
|
||||
u = self._url('/images/create')
|
||||
params = _import_image_params(
|
||||
repository, tag, src='-', changes=changes
|
||||
|
|
@ -91,6 +173,19 @@ class ImageApiMixin(object):
|
|||
|
||||
def import_image_from_file(self, filename, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from a tar file on disk.
|
||||
|
||||
Args:
|
||||
filename (str): Full path to a tar file.
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
|
||||
Raises:
|
||||
IOError: File does not exist.
|
||||
"""
|
||||
|
||||
return self.import_image(
|
||||
src=filename, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
|
@ -104,12 +199,31 @@ class ImageApiMixin(object):
|
|||
|
||||
def import_image_from_url(self, url, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from a URL.
|
||||
|
||||
Args:
|
||||
url (str): A URL pointing to a tar file.
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
return self.import_image(
|
||||
src=url, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_image(self, image, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from another image, like the ``FROM`` Dockerfile
|
||||
parameter.
|
||||
|
||||
Args:
|
||||
image (str): Image name to import from
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
return self.import_image(
|
||||
image=image, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
|
@ -129,16 +243,75 @@ class ImageApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def inspect_image(self, image):
|
||||
"""
|
||||
Get detailed information about an image. Similar to the ``docker
|
||||
inspect`` command, but only for containers.
|
||||
|
||||
Args:
|
||||
container (str): The container to inspect
|
||||
|
||||
Returns:
|
||||
(dict): Similar to the output of ``docker inspect``, but as a
|
||||
single dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(
|
||||
self._get(self._url("/images/{0}/json", image)), True
|
||||
)
|
||||
|
||||
def load_image(self, data):
|
||||
"""
|
||||
Load an image that was previously saved using
|
||||
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
|
||||
save``). Similar to ``docker load``.
|
||||
|
||||
Args:
|
||||
data (binary): Image data to be loaded.
|
||||
"""
|
||||
res = self._post(self._url("/images/load"), data=data)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def pull(self, repository, tag=None, stream=False,
|
||||
insecure_registry=False, auth_config=None, decode=False):
|
||||
"""
|
||||
Pulls an image. Similar to the ``docker pull`` command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to pull
|
||||
tag (str): The tag to pull
|
||||
stream (bool): Stream the output as a generator
|
||||
insecure_registry (bool): Use an insecure registry
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(generator or str): The output
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> for line in cli.pull('busybox', stream=True):
|
||||
... print(json.dumps(json.loads(line), indent=4))
|
||||
{
|
||||
"status": "Pulling image (latest) from busybox",
|
||||
"progressDetail": {},
|
||||
"id": "e72ac664f4f0"
|
||||
}
|
||||
{
|
||||
"status": "Pulling image (latest) from busybox, endpoint: ...",
|
||||
"progressDetail": {},
|
||||
"id": "e72ac664f4f0"
|
||||
}
|
||||
|
||||
"""
|
||||
if insecure_registry:
|
||||
warnings.warn(
|
||||
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
|
||||
|
|
@ -178,6 +351,38 @@ class ImageApiMixin(object):
|
|||
|
||||
def push(self, repository, tag=None, stream=False,
|
||||
insecure_registry=False, auth_config=None, decode=False):
|
||||
"""
|
||||
Push an image or a repository to the registry. Similar to the ``docker
|
||||
push`` command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to push to
|
||||
tag (str): An optional tag to push
|
||||
stream (bool): Stream the output as a blocking generator
|
||||
insecure_registry (bool): Use ``http://`` to connect to the
|
||||
registry
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(generator or str): The output from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
>>> for line in cli.push('yourname/app', stream=True):
|
||||
... print line
|
||||
{"status":"Pushing repository yourname/app (1 tags)"}
|
||||
{"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
|
||||
{"status":"Image already pushed, skipping","progressDetail":{},
|
||||
"id":"511136ea3c5a"}
|
||||
...
|
||||
|
||||
"""
|
||||
if insecure_registry:
|
||||
warnings.warn(
|
||||
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
|
||||
|
|
@ -215,11 +420,33 @@ class ImageApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def remove_image(self, image, force=False, noprune=False):
|
||||
"""
|
||||
Remove an image. Similar to the ``docker rmi`` command.
|
||||
|
||||
Args:
|
||||
image (str): The image to remove
|
||||
force (bool): Force removal of the image
|
||||
noprune (bool): Do not delete untagged parents
|
||||
"""
|
||||
params = {'force': force, 'noprune': noprune}
|
||||
res = self._delete(self._url("/images/{0}", image), params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def search(self, term):
|
||||
"""
|
||||
Search for images on Docker Hub. Similar to the ``docker search``
|
||||
command.
|
||||
|
||||
Args:
|
||||
term (str): A term to search for.
|
||||
|
||||
Returns:
|
||||
(list of dicts): The response of the search.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(
|
||||
self._get(self._url("/images/search"), params={'term': term}),
|
||||
True
|
||||
|
|
@ -227,6 +454,22 @@ class ImageApiMixin(object):
|
|||
|
||||
@utils.check_resource
|
||||
def tag(self, image, repository, tag=None, force=False):
|
||||
"""
|
||||
Tag an image into a repository. Similar to the ``docker tag`` command.
|
||||
|
||||
Args:
|
||||
image (str): The image to tag
|
||||
repository (str): The repository to set for the tag
|
||||
tag (str): The tag name
|
||||
force (bool): Force
|
||||
|
||||
Returns:
|
||||
(bool): ``True`` if successful
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'tag': tag,
|
||||
'repo': repository,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,21 @@ from ..utils import version_lt
|
|||
class NetworkApiMixin(object):
|
||||
@minimum_version('1.21')
|
||||
def networks(self, names=None, ids=None):
|
||||
"""
|
||||
List networks. Similar to the ``docker networks ls`` command.
|
||||
|
||||
Args:
|
||||
names (list): List of names to filter by
|
||||
ids (list): List of ids to filter by
|
||||
|
||||
Returns:
|
||||
(dict): List of network objects.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
filters = {}
|
||||
if names:
|
||||
filters['name'] = names
|
||||
|
|
@ -24,6 +39,51 @@ class NetworkApiMixin(object):
|
|||
def create_network(self, name, driver=None, options=None, ipam=None,
|
||||
check_duplicate=None, internal=False, labels=None,
|
||||
enable_ipv6=False):
|
||||
"""
|
||||
Create a network. Similar to the ``docker network create``.
|
||||
|
||||
Args:
|
||||
name (str): Name of the network
|
||||
driver (str): Name of the driver used to create the network
|
||||
options (dict): Driver options as a key-value dictionary
|
||||
ipam (dict): Optional custom IP scheme for the network.
|
||||
Created with :py:meth:`~docker.utils.create_ipam_config`.
|
||||
check_duplicate (bool): Request daemon to check for networks with
|
||||
same name. Default: ``True``.
|
||||
internal (bool): Restrict external access to the network. Default
|
||||
``False``.
|
||||
labels (dict): Map of labels to set on the network. Default
|
||||
``None``.
|
||||
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
|
||||
|
||||
Returns:
|
||||
(dict): The created network reference object
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
A network using the bridge driver:
|
||||
|
||||
>>> client.create_network("network1", driver="bridge")
|
||||
|
||||
You can also create more advanced networks with custom IPAM
|
||||
configurations. For example, setting the subnet to
|
||||
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> ipam_pool = docker.utils.create_ipam_pool(
|
||||
subnet='192.168.52.0/24',
|
||||
gateway='192.168.52.254'
|
||||
)
|
||||
>>> ipam_config = docker.utils.create_ipam_config(
|
||||
pool_configs=[ipam_pool]
|
||||
)
|
||||
>>> docker_client.create_network("network1", driver="bridge",
|
||||
ipam=ipam_config)
|
||||
"""
|
||||
if options is not None and not isinstance(options, dict):
|
||||
raise TypeError('options must be a dictionary')
|
||||
|
||||
|
|
@ -63,12 +123,24 @@ class NetworkApiMixin(object):
|
|||
|
||||
@minimum_version('1.21')
|
||||
def remove_network(self, net_id):
|
||||
"""
|
||||
Remove a network. Similar to the ``docker network rm`` command.
|
||||
|
||||
Args:
|
||||
net_id (str): The network's id
|
||||
"""
|
||||
url = self._url("/networks/{0}", net_id)
|
||||
res = self._delete(url)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@minimum_version('1.21')
|
||||
def inspect_network(self, net_id):
|
||||
"""
|
||||
Get detailed information about a network.
|
||||
|
||||
Args:
|
||||
net_id (str): ID of network
|
||||
"""
|
||||
url = self._url("/networks/{0}", net_id)
|
||||
res = self._get(url)
|
||||
return self._result(res, json=True)
|
||||
|
|
@ -79,6 +151,24 @@ class NetworkApiMixin(object):
|
|||
ipv4_address=None, ipv6_address=None,
|
||||
aliases=None, links=None,
|
||||
link_local_ips=None):
|
||||
"""
|
||||
Connect a container to a network.
|
||||
|
||||
Args:
|
||||
container (str): container-id/name to be connected to the network
|
||||
net_id (str): network id
|
||||
aliases (list): A list of aliases for this endpoint. Names in that
|
||||
list can be used within the network to reach the container.
|
||||
Defaults to ``None``.
|
||||
links (list): A list of links for this endpoint. Containers
|
||||
declared in this list will be linkedto this container.
|
||||
Defaults to ``None``.
|
||||
ipv4_address (str): The IP address of this container on the
|
||||
network, using the IPv4 protocol. Defaults to ``None``.
|
||||
ipv6_address (str): The IP address of this container on the
|
||||
network, using the IPv6 protocol. Defaults to ``None``.
|
||||
link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
|
||||
"""
|
||||
data = {
|
||||
"Container": container,
|
||||
"EndpointConfig": self.create_endpoint_config(
|
||||
|
|
@ -95,6 +185,16 @@ class NetworkApiMixin(object):
|
|||
@minimum_version('1.21')
|
||||
def disconnect_container_from_network(self, container, net_id,
|
||||
force=False):
|
||||
"""
|
||||
Disconnect a container from a network.
|
||||
|
||||
Args:
|
||||
container (str): container ID or name to be disconnected from the
|
||||
network
|
||||
net_id (str): network ID
|
||||
force (bool): Force the container to disconnect from a network.
|
||||
Default: ``False``
|
||||
"""
|
||||
data = {"Container": container}
|
||||
if force:
|
||||
if version_lt(self._version, '1.22'):
|
||||
|
|
|
|||
|
|
@ -1,8 +1,5 @@
|
|||
import warnings
|
||||
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
from ..auth import auth
|
||||
from .. import auth, errors, utils
|
||||
|
||||
|
||||
class ServiceApiMixin(object):
|
||||
|
|
@ -12,6 +9,32 @@ class ServiceApiMixin(object):
|
|||
update_config=None, networks=None, endpoint_config=None,
|
||||
endpoint_spec=None
|
||||
):
|
||||
"""
|
||||
Create a service.
|
||||
|
||||
Args:
|
||||
task_template (dict): Specification of the task to start as part
|
||||
of the new service.
|
||||
name (string): User-defined name for the service. Optional.
|
||||
labels (dict): A map of labels to associate with the service.
|
||||
Optional.
|
||||
mode (string): Scheduling mode for the service (``replicated`` or
|
||||
``global``). Defaults to ``replicated``.
|
||||
update_config (dict): Specification for the update strategy of the
|
||||
service. Default: ``None``
|
||||
networks (list): List of network names or IDs to attach the
|
||||
service to. Default: ``None``.
|
||||
endpoint_config (dict): Properties that can be configured to
|
||||
access and load balance a service. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
A dictionary containing an ``ID`` key for the newly created
|
||||
service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if endpoint_config is not None:
|
||||
warnings.warn(
|
||||
'endpoint_config has been renamed to endpoint_spec.',
|
||||
|
|
@ -46,18 +69,58 @@ class ServiceApiMixin(object):
|
|||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def inspect_service(self, service):
|
||||
"""
|
||||
Return information about a service.
|
||||
|
||||
Args:
|
||||
service (str): Service name or ID
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/services/{0}', service)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def inspect_task(self, task):
|
||||
"""
|
||||
Retrieve information about a task.
|
||||
|
||||
Args:
|
||||
task (str): Task ID
|
||||
|
||||
Returns:
|
||||
(dict): Information about the task.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/tasks/{0}', task)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def remove_service(self, service):
|
||||
"""
|
||||
Stop and remove a service.
|
||||
|
||||
Args:
|
||||
service (str): Service name or ID
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/services/{0}', service)
|
||||
resp = self._delete(url)
|
||||
self._raise_for_status(resp)
|
||||
|
|
@ -65,6 +128,20 @@ class ServiceApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def services(self, filters=None):
|
||||
"""
|
||||
List services.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id`` and ``name``. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries containing data about each service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
|
|
@ -73,6 +150,22 @@ class ServiceApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def tasks(self, filters=None):
|
||||
"""
|
||||
Retrieve a list of tasks.
|
||||
|
||||
Args:
|
||||
filters (dict): A map of filters to process on the tasks list.
|
||||
Valid filters: ``id``, ``name``, ``service``, ``node``,
|
||||
``label`` and ``desired-state``.
|
||||
|
||||
Returns:
|
||||
(list): List of task dictionaries.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
|
|
@ -85,7 +178,37 @@ class ServiceApiMixin(object):
|
|||
labels=None, mode=None, update_config=None,
|
||||
networks=None, endpoint_config=None,
|
||||
endpoint_spec=None):
|
||||
"""
|
||||
Update a service.
|
||||
|
||||
Args:
|
||||
service (string): A service identifier (either its name or service
|
||||
ID).
|
||||
version (int): The version number of the service object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
task_template (dict): Specification of the updated task to start
|
||||
as part of the service. See the [TaskTemplate
|
||||
class](#TaskTemplate) for details.
|
||||
name (string): New name for the service. Optional.
|
||||
labels (dict): A map of labels to associate with the service.
|
||||
Optional.
|
||||
mode (string): Scheduling mode for the service (``replicated`` or
|
||||
``global``). Defaults to ``replicated``.
|
||||
update_config (dict): Specification for the update strategy of the
|
||||
service. See the [UpdateConfig class](#UpdateConfig) for
|
||||
details. Default: ``None``.
|
||||
networks (list): List of network names or IDs to attach the
|
||||
service to. Default: ``None``.
|
||||
endpoint_config (dict): Properties that can be configured to
|
||||
access and load balance a service. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if endpoint_config is not None:
|
||||
warnings.warn(
|
||||
'endpoint_config has been renamed to endpoint_spec.',
|
||||
|
|
|
|||
|
|
@ -7,11 +7,87 @@ log = logging.getLogger(__name__)
|
|||
class SwarmApiMixin(object):
|
||||
|
||||
def create_swarm_spec(self, *args, **kwargs):
|
||||
"""
|
||||
Create a ``docker.types.SwarmSpec`` instance that can be used as the
|
||||
``swarm_spec`` argument in
|
||||
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
|
||||
|
||||
Args:
|
||||
task_history_retention_limit (int): Maximum number of tasks
|
||||
history stored.
|
||||
snapshot_interval (int): Number of logs entries between snapshot.
|
||||
keep_old_snapshots (int): Number of snapshots to keep beyond the
|
||||
current snapshot.
|
||||
log_entries_for_slow_followers (int): Number of log entries to
|
||||
keep around to sync up slow followers after a snapshot is
|
||||
created.
|
||||
heartbeat_tick (int): Amount of ticks (in seconds) between each
|
||||
heartbeat.
|
||||
election_tick (int): Amount of ticks (in seconds) needed without a
|
||||
leader to trigger a new election.
|
||||
dispatcher_heartbeat_period (int): The delay for an agent to send
|
||||
a heartbeat to the dispatcher.
|
||||
node_cert_expiry (int): Automatic expiry for nodes certificates.
|
||||
external_ca (dict): Configuration for forwarding signing requests
|
||||
to an external certificate authority. Use
|
||||
``docker.types.SwarmExternalCA``.
|
||||
name (string): Swarm's name
|
||||
|
||||
Returns:
|
||||
``docker.types.SwarmSpec`` instance.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> spec = client.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
>>> client.init_swarm(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
|
||||
force_new_cluster=False, swarm_spec=spec
|
||||
)
|
||||
"""
|
||||
return utils.SwarmSpec(*args, **kwargs)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None):
|
||||
"""
|
||||
Initialize a new Swarm using the current connected engine as the first
|
||||
node.
|
||||
|
||||
Args:
|
||||
advertise_addr (string): Externally reachable address advertised
|
||||
to other nodes. This can either be an address/port combination
|
||||
in the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
port number, like ``eth0:4567``. If the port number is omitted,
|
||||
the port number from the listen address is used. If
|
||||
``advertise_addr`` is not specified, it will be automatically
|
||||
detected when possible. Default: None
|
||||
listen_addr (string): Listen address used for inter-manager
|
||||
communication, as well as determining the networking interface
|
||||
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
|
||||
an address/port combination in the form ``192.168.1.1:4567``,
|
||||
or an interface followed by a port number, like ``eth0:4567``.
|
||||
If the port number is omitted, the default swarm listening port
|
||||
is used. Default: '0.0.0.0:2377'
|
||||
force_new_cluster (bool): Force creating a new Swarm, even if
|
||||
already part of one. Default: False
|
||||
swarm_spec (dict): Configuration settings of the new Swarm. Use
|
||||
``Client.create_swarm_spec`` to generate a valid
|
||||
configuration. Default: None
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/swarm/init')
|
||||
if swarm_spec is not None and not isinstance(swarm_spec, dict):
|
||||
raise TypeError('swarm_spec must be a dictionary')
|
||||
|
|
@ -27,18 +103,67 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_swarm(self):
|
||||
"""
|
||||
Retrieve low-level information about the current swarm.
|
||||
|
||||
Returns:
|
||||
A dictionary containing data about the swarm.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/swarm')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_node(self, node_id):
|
||||
"""
|
||||
Retrieve low-level information about a swarm node
|
||||
|
||||
Args:
|
||||
node_id (string): ID of the node to be inspected.
|
||||
|
||||
Returns:
|
||||
A dictionary containing data about this node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/nodes/{0}', node_id)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
|
||||
advertise_addr=None):
|
||||
"""
|
||||
Make this Engine join a swarm that has already been created.
|
||||
|
||||
Args:
|
||||
remote_addrs (list): Addresses of one or more manager nodes already
|
||||
participating in the Swarm to join.
|
||||
join_token (string): Secret token for joining this Swarm.
|
||||
listen_addr (string): Listen address used for inter-manager
|
||||
communication if the node gets promoted to manager, as well as
|
||||
determining the networking interface used for the VXLAN Tunnel
|
||||
Endpoint (VTEP). Default: ``None``
|
||||
advertise_addr (string): Externally reachable address advertised
|
||||
to other nodes. This can either be an address/port combination
|
||||
in the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
port number, like ``eth0:4567``. If the port number is omitted,
|
||||
the port number from the listen address is used. If
|
||||
AdvertiseAddr is not specified, it will be automatically
|
||||
detected when possible. Default: ``None``
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
data = {
|
||||
"RemoteAddrs": remote_addrs,
|
||||
"ListenAddr": listen_addr,
|
||||
|
|
@ -52,6 +177,20 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def leave_swarm(self, force=False):
|
||||
"""
|
||||
Leave a swarm.
|
||||
|
||||
Args:
|
||||
force (bool): Leave the swarm even if this node is a manager.
|
||||
Default: ``False``
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/swarm/leave')
|
||||
response = self._post(url, params={'force': force})
|
||||
# Ignore "this node is not part of a swarm" error
|
||||
|
|
@ -62,6 +201,21 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def nodes(self, filters=None):
|
||||
"""
|
||||
List swarm nodes.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name``, ``membership`` and ``role``.
|
||||
Default: ``None``
|
||||
|
||||
Returns:
|
||||
A list of dictionaries containing data about each swarm node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/nodes')
|
||||
params = {}
|
||||
if filters:
|
||||
|
|
@ -71,6 +225,34 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def update_node(self, node_id, version, node_spec=None):
|
||||
"""
|
||||
Update the Node's configuration
|
||||
|
||||
Args:
|
||||
|
||||
version (int): The version number of the node object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
node_spec (dict): Configuration settings to update. Any values
|
||||
not provided will be removed. Default: ``None``
|
||||
|
||||
Returns:
|
||||
`True` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> node_spec = {'Availability': 'active',
|
||||
'Name': 'node-name',
|
||||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
}
|
||||
>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
|
||||
node_spec=node_spec)
|
||||
|
||||
"""
|
||||
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
|
||||
res = self._post_json(url, data=node_spec)
|
||||
self._raise_for_status(res)
|
||||
|
|
@ -79,6 +261,28 @@ class SwarmApiMixin(object):
|
|||
@utils.minimum_version('1.24')
|
||||
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
|
||||
rotate_manager_token=False):
|
||||
"""
|
||||
Update the Swarm's configuration
|
||||
|
||||
Args:
|
||||
version (int): The version number of the swarm object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
swarm_spec (dict): Configuration settings to update. Use
|
||||
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
|
||||
generate a valid configuration. Default: ``None``.
|
||||
rotate_worker_token (bool): Rotate the worker join token. Default:
|
||||
``False``.
|
||||
rotate_manager_token (bool): Rotate the manager join token.
|
||||
Default: ``False``.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/swarm/update')
|
||||
response = self._post_json(url, data=swarm_spec, params={
|
||||
'rotateWorkerToken': rotate_worker_token,
|
||||
|
|
|
|||
|
|
@ -5,6 +5,32 @@ from .. import utils
|
|||
class VolumeApiMixin(object):
|
||||
@utils.minimum_version('1.21')
|
||||
def volumes(self, filters=None):
|
||||
"""
|
||||
List volumes currently registered by the docker daemon. Similar to the
|
||||
``docker volume ls`` command.
|
||||
|
||||
Args:
|
||||
filters (dict): Server-side list filtering options.
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary with list of volume objects as value of the
|
||||
``Volumes`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> cli.volumes()
|
||||
{u'Volumes': [{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'},
|
||||
{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
|
||||
u'Name': u'baz'}]}
|
||||
"""
|
||||
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
|
|
@ -13,6 +39,34 @@ class VolumeApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.21')
|
||||
def create_volume(self, name, driver=None, driver_opts=None, labels=None):
|
||||
"""
|
||||
Create and register a named volume
|
||||
|
||||
Args:
|
||||
name (str): Name of the volume
|
||||
driver (str): Name of the driver used to create the volume
|
||||
driver_opts (dict): Driver options as a key-value dictionary
|
||||
labels (dict): Labels to set on the volume
|
||||
|
||||
Returns:
|
||||
(dict): The created volume reference object
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> volume = cli.create_volume(name='foobar', driver='local',
|
||||
driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
labels={"key": "value"})
|
||||
>>> print(volume)
|
||||
{u'Driver': u'local',
|
||||
u'Labels': {u'key': u'value'},
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'}
|
||||
|
||||
"""
|
||||
url = self._url('/volumes/create')
|
||||
if driver_opts is not None and not isinstance(driver_opts, dict):
|
||||
raise TypeError('driver_opts must be a dictionary')
|
||||
|
|
@ -36,11 +90,42 @@ class VolumeApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.21')
|
||||
def inspect_volume(self, name):
|
||||
"""
|
||||
Retrieve volume info by name.
|
||||
|
||||
Args:
|
||||
name (str): volume name
|
||||
|
||||
Returns:
|
||||
(dict): Volume information dictionary
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> cli.inspect_volume('foobar')
|
||||
{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'}
|
||||
|
||||
"""
|
||||
url = self._url('/volumes/{0}', name)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.21')
|
||||
def remove_volume(self, name):
|
||||
"""
|
||||
Remove a volume. Similar to the ``docker volume rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The volume's name
|
||||
|
||||
Raises:
|
||||
|
||||
``docker.errors.APIError``: If volume failed to remove.
|
||||
"""
|
||||
url = self._url('/volumes/{0}', name)
|
||||
resp = self._delete(url)
|
||||
self._raise_for_status(resp)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import os
|
|||
import dockerpycreds
|
||||
import six
|
||||
|
||||
from .. import errors
|
||||
from . import errors
|
||||
|
||||
INDEX_NAME = 'docker.io'
|
||||
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
from .auth import (
|
||||
INDEX_NAME,
|
||||
INDEX_URL,
|
||||
encode_header,
|
||||
load_config,
|
||||
resolve_authconfig,
|
||||
resolve_repository_name,
|
||||
) # flake8: noqa
|
||||
539
docker/client.py
539
docker/client.py
|
|
@ -1,408 +1,167 @@
|
|||
import json
|
||||
import struct
|
||||
from functools import partial
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
import six
|
||||
import websocket
|
||||
from .api.client import APIClient
|
||||
from .models.containers import ContainerCollection
|
||||
from .models.images import ImageCollection
|
||||
from .models.networks import NetworkCollection
|
||||
from .models.nodes import NodeCollection
|
||||
from .models.services import ServiceCollection
|
||||
from .models.swarm import Swarm
|
||||
from .models.volumes import VolumeCollection
|
||||
from .utils import kwargs_from_env
|
||||
|
||||
|
||||
from . import api
|
||||
from . import constants
|
||||
from . import errors
|
||||
from .auth import auth
|
||||
from .ssladapter import ssladapter
|
||||
from .tls import TLSConfig
|
||||
from .transport import UnixAdapter
|
||||
from .utils import utils, check_resource, update_headers, kwargs_from_env
|
||||
from .utils.socket import frames_iter
|
||||
try:
|
||||
from .transport import NpipeAdapter
|
||||
except ImportError:
|
||||
pass
|
||||
class Client(object):
|
||||
"""
|
||||
A client for communicating with a Docker server.
|
||||
|
||||
Example:
|
||||
|
||||
def from_env(**kwargs):
|
||||
return Client.from_env(**kwargs)
|
||||
>>> import docker
|
||||
>>> client = Client(base_url='unix://var/run/docker.sock')
|
||||
|
||||
|
||||
class Client(
|
||||
requests.Session,
|
||||
api.BuildApiMixin,
|
||||
api.ContainerApiMixin,
|
||||
api.DaemonApiMixin,
|
||||
api.ExecApiMixin,
|
||||
api.ImageApiMixin,
|
||||
api.NetworkApiMixin,
|
||||
api.ServiceApiMixin,
|
||||
api.SwarmApiMixin,
|
||||
api.VolumeApiMixin):
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=constants.DEFAULT_USER_AGENT,
|
||||
num_pools=constants.DEFAULT_NUM_POOLS):
|
||||
super(Client, self).__init__()
|
||||
|
||||
if tls and not base_url:
|
||||
raise errors.TLSParameterError(
|
||||
'If using TLS, the base_url argument must be provided.'
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.headers['User-Agent'] = user_agent
|
||||
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
base_url = utils.parse_host(
|
||||
base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
|
||||
)
|
||||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixAdapter(
|
||||
base_url, timeout, pool_connections=num_pools
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://localunixsocket'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
raise errors.DockerException(
|
||||
'The npipe:// protocol is only supported on Windows'
|
||||
)
|
||||
try:
|
||||
self._custom_adapter = NpipeAdapter(
|
||||
base_url, timeout, pool_connections=num_pools
|
||||
)
|
||||
except NameError:
|
||||
raise errors.DockerException(
|
||||
'Install pypiwin32 package to enable npipe:// support'
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
else:
|
||||
# Use SSLAdapter for the ability to specify SSL version
|
||||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = ssladapter.SSLAdapter(
|
||||
pool_connections=num_pools
|
||||
)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
||||
# version detection needs to be after unix adapter mounting
|
||||
if version is None:
|
||||
self._version = constants.DEFAULT_DOCKER_API_VERSION
|
||||
elif isinstance(version, six.string_types):
|
||||
if version.lower() == 'auto':
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'Version parameter must be a string or None. Found {0}'.format(
|
||||
type(version).__name__
|
||||
)
|
||||
)
|
||||
Args:
|
||||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.24``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
:py:class:`~docker.tls.TLSConfig` object to use custom
|
||||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.api = APIClient(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, **kwargs):
|
||||
"""
|
||||
Return a client configured from environment variables.
|
||||
|
||||
The environment variables used are the same as those used by the
|
||||
Docker command-line client. They are:
|
||||
|
||||
.. envvar:: DOCKER_HOST
|
||||
|
||||
The URL to the Docker host.
|
||||
|
||||
.. envvar:: DOCKER_TLS_VERIFY
|
||||
|
||||
Verify the host against a CA certificate.
|
||||
|
||||
.. envvar:: DOCKER_CERT_PATH
|
||||
|
||||
A path to a directory containing TLS certificates to use when
|
||||
connecting to the Docker host.
|
||||
|
||||
Args:
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.24``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
environment (dict): The environment to read environment variables
|
||||
from. Default: the value of ``os.environ``
|
||||
|
||||
Example:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.from_env()
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
timeout = kwargs.pop('timeout', None)
|
||||
version = kwargs.pop('version', None)
|
||||
return cls(timeout=timeout, version=version,
|
||||
**kwargs_from_env(**kwargs))
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
return self.version(api_version=False)["ApiVersion"]
|
||||
except KeyError:
|
||||
raise errors.DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
)
|
||||
except Exception as e:
|
||||
raise errors.DockerException(
|
||||
'Error while fetching server API version: {0}'.format(e)
|
||||
)
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self.timeout)
|
||||
return kwargs
|
||||
|
||||
@update_headers
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _put(self, url, **kwargs):
|
||||
return self.put(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, six.string_types):
|
||||
raise ValueError(
|
||||
'Expected a string but found {0} ({1}) '
|
||||
'instead'.format(arg, type(arg))
|
||||
)
|
||||
|
||||
quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
self.base_url, self._version, pathfmt.format(*args)
|
||||
)
|
||||
else:
|
||||
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
|
||||
|
||||
def _raise_for_status(self, response, explanation=None):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
raise errors.NotFound(e, response, explanation=explanation)
|
||||
raise errors.APIError(e, response, explanation=explanation)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 can't unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None:
|
||||
for k, v in six.iteritems(data):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
@check_resource
|
||||
def _attach_websocket(self, container, params=None):
|
||||
url = self._url("/containers/{0}/attach/ws", container)
|
||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||
full_url = req.prepare().url
|
||||
full_url = full_url.replace("http://", "ws://", 1)
|
||||
full_url = full_url.replace("https://", "wss://", 1)
|
||||
return self._create_websocket_connection(full_url)
|
||||
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
if self.base_url == "http+docker://localnpipe":
|
||||
sock = response.raw._fp.fp.raw.sock
|
||||
elif six.PY3:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
else:
|
||||
sock = response.raw._fp.fp._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
# close TLS sockets.
|
||||
sock._response = response
|
||||
except AttributeError:
|
||||
# UNIX sockets can't have attributes set on them, but that's
|
||||
# fine because we won't be doing TLS over them
|
||||
pass
|
||||
|
||||
return sock
|
||||
|
||||
def _stream_helper(self, response, decode=False):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
if response.raw._fp.chunked:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
# this read call will block until we get a chunk
|
||||
data = reader.read(1)
|
||||
if not data:
|
||||
break
|
||||
if reader._fp.chunk_left:
|
||||
data += reader.read(reader._fp.chunk_left)
|
||||
if decode:
|
||||
if six.PY3:
|
||||
data = data.decode('utf-8')
|
||||
# remove the trailing newline
|
||||
data = data.strip()
|
||||
# split the data at any newlines
|
||||
data_list = data.split("\r\n")
|
||||
# load and yield each line seperately
|
||||
for data in data_list:
|
||||
data = json.loads(data)
|
||||
yield data
|
||||
else:
|
||||
yield data
|
||||
else:
|
||||
# Response isn't chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
walker = 0
|
||||
while True:
|
||||
if len(buf[walker:]) < 8:
|
||||
break
|
||||
_, length = struct.unpack_from('>BxxxL', buf[walker:])
|
||||
start = walker + constants.STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_response_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
stream."""
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
while True:
|
||||
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
_, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
continue
|
||||
data = response.raw.read(length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _stream_raw_result_old(self, response):
|
||||
''' Stream raw output for API versions below 1.6 '''
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1,
|
||||
decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line
|
||||
|
||||
def _stream_raw_result(self, response):
|
||||
''' Stream result for TTY-enabled container above API 1.6 '''
|
||||
self._raise_for_status(response)
|
||||
for out in response.iter_content(chunk_size=1, decode_unicode=True):
|
||||
yield out
|
||||
|
||||
def _read_from_socket(self, response, stream):
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
if stream:
|
||||
return frames_iter(socket)
|
||||
else:
|
||||
return six.binary_type().join(frames_iter(socket))
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we're
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
may or may not exist; or we may need to just settimeout on socket
|
||||
itself, which also may or may not have settimeout on it. To avoid
|
||||
missing the correct one, we try both.
|
||||
|
||||
We also do not want to set the timeout if it is already disabled, as
|
||||
you run the risk of changing a socket that was non-blocking to
|
||||
blocking, for example when using gevent.
|
||||
# Resources
|
||||
@property
|
||||
def containers(self):
|
||||
"""
|
||||
sockets = [socket, getattr(socket, '_sock', None)]
|
||||
|
||||
for s in sockets:
|
||||
if not hasattr(s, 'settimeout'):
|
||||
continue
|
||||
|
||||
timeout = -1
|
||||
|
||||
if hasattr(s, 'gettimeout'):
|
||||
timeout = s.gettimeout()
|
||||
|
||||
# Don't change the timeout if it is already disabled.
|
||||
if timeout is None or timeout == 0.0:
|
||||
continue
|
||||
|
||||
s.settimeout(None)
|
||||
|
||||
def _get_result(self, container, stream, res):
|
||||
cont = self.inspect_container(container)
|
||||
return self._get_result_tty(stream, res, cont['Config']['Tty'])
|
||||
|
||||
def _get_result_tty(self, stream, res, is_tty):
|
||||
# Stream multi-plexing was only introduced in API v1.6. Anything
|
||||
# before that needs old-style streaming.
|
||||
if utils.compare_version('1.6', self._version) < 0:
|
||||
return self._stream_raw_result_old(res)
|
||||
|
||||
# We should also use raw streaming (without keep-alives)
|
||||
# if we're dealing with a tty-enabled container.
|
||||
if is_tty:
|
||||
return self._stream_raw_result(res) if stream else \
|
||||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = six.binary_type()
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(Client, self).get_adapter(url)
|
||||
except requests.exceptions.InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
else:
|
||||
raise e
|
||||
An object for managing containers on the server. See the
|
||||
:doc:`containers documentation <containers>` for full details.
|
||||
"""
|
||||
return ContainerCollection(client=self)
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self._version
|
||||
def images(self):
|
||||
"""
|
||||
An object for managing images on the server. See the
|
||||
:doc:`images documentation <images>` for full details.
|
||||
"""
|
||||
return ImageCollection(client=self)
|
||||
|
||||
@property
|
||||
def networks(self):
|
||||
"""
|
||||
An object for managing networks on the server. See the
|
||||
:doc:`networks documentation <networks>` for full details.
|
||||
"""
|
||||
return NetworkCollection(client=self)
|
||||
|
||||
@property
|
||||
def nodes(self):
|
||||
"""
|
||||
An object for managing nodes on the server. See the
|
||||
:doc:`nodes documentation <nodes>` for full details.
|
||||
"""
|
||||
return NodeCollection(client=self)
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
"""
|
||||
An object for managing services on the server. See the
|
||||
:doc:`services documentation <services>` for full details.
|
||||
"""
|
||||
return ServiceCollection(client=self)
|
||||
|
||||
@property
|
||||
def swarm(self):
|
||||
"""
|
||||
An object for managing a swarm on the server. See the
|
||||
:doc:`swarm documentation <swarm>` for full details.
|
||||
"""
|
||||
return Swarm(client=self)
|
||||
|
||||
@property
|
||||
def volumes(self):
|
||||
"""
|
||||
An object for managing volumes on the server. See the
|
||||
:doc:`volumes documentation <volumes>` for full details.
|
||||
"""
|
||||
return VolumeCollection(client=self)
|
||||
|
||||
# Top-level methods
|
||||
def events(self, *args, **kwargs):
|
||||
return self.api.events(*args, **kwargs)
|
||||
events.__doc__ = APIClient.events.__doc__
|
||||
|
||||
def info(self, *args, **kwargs):
|
||||
return self.api.info(*args, **kwargs)
|
||||
info.__doc__ = APIClient.info.__doc__
|
||||
|
||||
def login(self, *args, **kwargs):
|
||||
return self.api.login(*args, **kwargs)
|
||||
login.__doc__ = APIClient.login.__doc__
|
||||
|
||||
def ping(self, *args, **kwargs):
|
||||
return self.api.ping(*args, **kwargs)
|
||||
ping.__doc__ = APIClient.ping.__doc__
|
||||
|
||||
def version(self, *args, **kwargs):
|
||||
return self.api.version(*args, **kwargs)
|
||||
version.__doc__ = APIClient.version.__doc__
|
||||
|
||||
def __getattr__(self, name):
|
||||
s = ["'Client' object has no attribute '{}'".format(name)]
|
||||
# If a user calls a method on APIClient, they
|
||||
if hasattr(APIClient, name):
|
||||
s.append("In docker-py 2.0, this method is now on the object "
|
||||
"APIClient. See the low-level API section of the "
|
||||
"documentation for more details.".format(name))
|
||||
raise AttributeError(' '.join(s))
|
||||
|
||||
|
||||
class AutoVersionClient(Client):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'version' in kwargs and kwargs['version']:
|
||||
raise errors.DockerException(
|
||||
'Can not specify version for AutoVersionClient'
|
||||
)
|
||||
kwargs['version'] = 'auto'
|
||||
super(AutoVersionClient, self).__init__(*args, **kwargs)
|
||||
from_env = Client.from_env
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import sys
|
|||
from .version import version
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.24'
|
||||
MINIMUM_DOCKER_API_VERSION = '1.24'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = [
|
||||
|
|
|
|||
|
|
@ -1,21 +1,44 @@
|
|||
import requests
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
A base class from which all other exceptions inherit.
|
||||
|
||||
If you want to catch all errors that the Docker SDK might raise,
|
||||
catch this base exception.
|
||||
"""
|
||||
|
||||
|
||||
def create_api_error_from_http_exception(e):
|
||||
"""
|
||||
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||
"""
|
||||
response = e.response
|
||||
try:
|
||||
explanation = response.json()['message']
|
||||
except ValueError:
|
||||
explanation = response.content.strip()
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and 'No such image' in str(explanation):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise cls(e, response=response, explanation=explanation)
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError, DockerException):
|
||||
"""
|
||||
An HTTP error from the API.
|
||||
"""
|
||||
def __init__(self, message, response=None, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 doesn't
|
||||
super(APIError, self).__init__(message)
|
||||
self.response = response
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
try:
|
||||
self.explanation = response.json()['message']
|
||||
except ValueError:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
|
|
@ -32,21 +55,30 @@ class APIError(requests.exceptions.HTTPError):
|
|||
|
||||
return message
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
if self.response:
|
||||
return self.response.status_code
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 400 <= self.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 500 <= self.status_code < 600
|
||||
|
||||
|
||||
class NotFound(APIError):
|
||||
pass
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidVersion(DockerException):
|
||||
pass
|
||||
|
||||
|
|
@ -76,3 +108,38 @@ class TLSParameterError(DockerException):
|
|||
|
||||
class NullResource(DockerException, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class ContainerError(DockerException):
|
||||
"""
|
||||
Represents a container that has exited with a non-zero exit code.
|
||||
"""
|
||||
def __init__(self, container, exit_status, command, image, stderr):
|
||||
self.container = container
|
||||
self.exit_status = exit_status
|
||||
self.command = command
|
||||
self.image = image
|
||||
self.stderr = stderr
|
||||
msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
|
||||
"{}").format(command, image, exit_status, stderr)
|
||||
super(ContainerError, self).__init__(msg)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def create_unexpected_kwargs_error(name, kwargs):
|
||||
quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
|
||||
text = ["{}() ".format(name)]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
text.append("got unexpected keyword arguments ")
|
||||
text.append(', '.join(quoted_kwargs))
|
||||
return TypeError(''.join(text))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,883 @@
|
|||
import copy
|
||||
|
||||
from ..errors import (ContainerError, ImageNotFound,
|
||||
create_unexpected_kwargs_error)
|
||||
from ..utils import create_host_config
|
||||
from .images import Image
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Container(Model):
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""
|
||||
The name of the container.
|
||||
"""
|
||||
if self.attrs.get('Name') is not None:
|
||||
return self.attrs['Name'].lstrip('/')
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
"""
|
||||
The status of the container. For example, ``running``, or ``exited``.
|
||||
"""
|
||||
return self.attrs['State']['Status']
|
||||
|
||||
def attach(self, **kwargs):
|
||||
"""
|
||||
Attach to this container.
|
||||
|
||||
:py:meth:`logs` is a wrapper around this method, which you can
|
||||
use instead if you want to fetch/stream container output without first
|
||||
retrieving the entire backlog.
|
||||
|
||||
Args:
|
||||
stdout (bool): Include stdout.
|
||||
stderr (bool): Include stderr.
|
||||
stream (bool): Return container output progressively as an iterator
|
||||
of strings, rather than a single string.
|
||||
logs (bool): Include the container's previous output.
|
||||
|
||||
Returns:
|
||||
By default, the container's output as a single string.
|
||||
|
||||
If ``stream=True``, an iterator of output strings.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.attach(self.id, **kwargs)
|
||||
|
||||
def attach_socket(self, **kwargs):
|
||||
"""
|
||||
Like :py:meth:`attach`, but returns the underlying socket-like object
|
||||
for the HTTP request.
|
||||
|
||||
Args:
|
||||
params (dict): Dictionary of request parameters (e.g. ``stdout``,
|
||||
``stderr``, ``stream``).
|
||||
ws (bool): Use websockets instead of raw HTTP.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.attach_socket(self.id, **kwargs)
|
||||
|
||||
def commit(self, repository=None, tag=None, **kwargs):
|
||||
"""
|
||||
Commit a container to an image. Similar to the ``docker commit``
|
||||
command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to push the image to
|
||||
tag (str): The tag to push
|
||||
message (str): A commit message
|
||||
author (str): The name of the author
|
||||
changes (str): Dockerfile instructions to apply while committing
|
||||
conf (dict): The configuration for the container. See the
|
||||
`Remote API documentation
|
||||
<https://docs.docker.com/reference/api/docker_remote_api/>`_
|
||||
for full details.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
resp = self.client.api.commit(self.id, repository=repository, tag=tag,
|
||||
**kwargs)
|
||||
return self.client.images.get(resp['Id'])
|
||||
|
||||
def diff(self):
|
||||
"""
|
||||
Inspect changes on a container's filesystem.
|
||||
|
||||
Returns:
|
||||
(str)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.diff(self.id)
|
||||
|
||||
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
|
||||
privileged=False, user='', detach=False, stream=False,
|
||||
socket=False):
|
||||
"""
|
||||
Run a command inside this container. Similar to
|
||||
``docker exec``.
|
||||
|
||||
Args:
|
||||
cmd (str or list): Command to be executed
|
||||
stdout (bool): Attach to stdout. Default: ``True``
|
||||
stderr (bool): Attach to stderr. Default: ``True``
|
||||
stdin (bool): Attach to stdin. Default: ``False``
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
privileged (bool): Run as privileged.
|
||||
user (str): User to execute command as. Default: root
|
||||
detach (bool): If true, detach from the exec command.
|
||||
Default: False
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
stream (bool): Stream response data. Default: False
|
||||
|
||||
Returns:
|
||||
(generator or str): If ``stream=True``, a generator yielding
|
||||
response chunks. A string containing response data otherwise.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.exec_create(
|
||||
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
|
||||
privileged=privileged, user=user
|
||||
)
|
||||
return self.client.api.exec_start(
|
||||
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
|
||||
)
|
||||
|
||||
def export(self):
|
||||
"""
|
||||
Export the contents of the container's filesystem as a tar archive.
|
||||
|
||||
Returns:
|
||||
(str): The filesystem tar archive
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.export(self.id)
|
||||
|
||||
def get_archive(self, path):
|
||||
"""
|
||||
Retrieve a file or folder from the container in the form of a tar
|
||||
archive.
|
||||
|
||||
Args:
|
||||
path (str): Path to the file or folder to retrieve
|
||||
|
||||
Returns:
|
||||
(tuple): First element is a raw tar data stream. Second element is
|
||||
a dict containing ``stat`` information on the specified ``path``.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.get_archive(self.id, path)
|
||||
|
||||
def kill(self, signal=None):
|
||||
"""
|
||||
Kill or send a signal to the container.
|
||||
|
||||
Args:
|
||||
signal (str or int): The signal to send. Defaults to ``SIGKILL``
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
return self.client.api.kill(self.id, signal=signal)
|
||||
|
||||
def logs(self, **kwargs):
|
||||
"""
|
||||
Get logs from this container. Similar to the ``docker logs`` command.
|
||||
|
||||
The ``stream`` parameter makes the ``logs`` function return a blocking
|
||||
generator you can iterate over to retrieve log output as it happens.
|
||||
|
||||
Args:
|
||||
stdout (bool): Get ``STDOUT``
|
||||
stderr (bool): Get ``STDERR``
|
||||
stream (bool): Stream the response
|
||||
timestamps (bool): Show timestamps
|
||||
tail (str or int): Output specified number of lines at the end of
|
||||
logs. Either an integer of number of lines or the string
|
||||
``all``. Default ``all``
|
||||
since (datetime or int): Show logs since a given datetime or
|
||||
integer epoch (in seconds)
|
||||
follow (bool): Follow log output
|
||||
|
||||
Returns:
|
||||
(generator or str): Logs from the container.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.logs(self.id, **kwargs)
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Pauses all processes within this container.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.pause(self.id)
|
||||
|
||||
def put_archive(self, path, data):
|
||||
"""
|
||||
Insert a file or folder in this container using a tar archive as
|
||||
source.
|
||||
|
||||
Args:
|
||||
path (str): Path inside the container where the file(s) will be
|
||||
extracted. Must exist.
|
||||
data (bytes): tar data to be extracted
|
||||
|
||||
Returns:
|
||||
(bool): True if the call succeeds.
|
||||
|
||||
Raises:
|
||||
:py:class:`~docker.errors.APIError` If an error occurs.
|
||||
"""
|
||||
return self.client.api.put_archive(self.id, path, data)
|
||||
|
||||
def remove(self, **kwargs):
|
||||
"""
|
||||
Remove this container. Similar to the ``docker rm`` command.
|
||||
|
||||
Args:
|
||||
v (bool): Remove the volumes associated with the container
|
||||
link (bool): Remove the specified link and not the underlying
|
||||
container
|
||||
force (bool): Force the removal of a running container (uses
|
||||
``SIGKILL``)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.remove_container(self.id, **kwargs)
|
||||
|
||||
def rename(self, name):
|
||||
"""
|
||||
Rename this container. Similar to the ``docker rename`` command.
|
||||
|
||||
Args:
|
||||
name (str): New name for the container
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.rename(self.id, name)
|
||||
|
||||
def resize(self, height, width):
|
||||
"""
|
||||
Resize the tty session.
|
||||
|
||||
Args:
|
||||
height (int): Height of tty session
|
||||
width (int): Width of tty session
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.resize(self.id, height, width)
|
||||
|
||||
def restart(self, **kwargs):
|
||||
"""
|
||||
Restart this container. Similar to the ``docker restart`` command.
|
||||
|
||||
Args:
|
||||
timeout (int): Number of seconds to try to stop for before killing
|
||||
the container. Once killed it will then be restarted. Default
|
||||
is 10 seconds.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.restart(self.id, **kwargs)
|
||||
|
||||
def start(self, **kwargs):
|
||||
"""
|
||||
Start this container. Similar to the ``docker start`` command, but
|
||||
doesn't support attach options.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.start(self.id, **kwargs)
|
||||
|
||||
def stats(self, **kwargs):
|
||||
"""
|
||||
Stream statistics for this container. Similar to the
|
||||
``docker stats`` command.
|
||||
|
||||
Args:
|
||||
decode (bool): If set to true, stream will be decoded into dicts
|
||||
on the fly. False by default.
|
||||
stream (bool): If set to false, only the current stats will be
|
||||
returned instead of a stream. True by default.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.stats(self.id, **kwargs)
|
||||
|
||||
def stop(self, **kwargs):
|
||||
"""
|
||||
Stops a container. Similar to the ``docker stop`` command.
|
||||
|
||||
Args:
|
||||
timeout (int): Timeout in seconds to wait for the container to
|
||||
stop before sending a ``SIGKILL``. Default: 10
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.stop(self.id, **kwargs)
|
||||
|
||||
def top(self, **kwargs):
|
||||
"""
|
||||
Display the running processes of the container.
|
||||
|
||||
Args:
|
||||
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
|
||||
|
||||
Returns:
|
||||
(str): The output of the top
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.top(self.id, **kwargs)
|
||||
|
||||
def unpause(self):
|
||||
"""
|
||||
Unpause all processes within the container.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.unpause(self.id)
|
||||
|
||||
def update(self, **kwargs):
|
||||
"""
|
||||
Update resource configuration of the containers.
|
||||
|
||||
Args:
|
||||
blkio_weight (int): Block IO (relative weight), between 10 and 1000
|
||||
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
|
||||
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
|
||||
cpu_shares (int): CPU shares (relative weight)
|
||||
cpuset_cpus (str): CPUs in which to allow execution
|
||||
cpuset_mems (str): MEMs in which to allow execution
|
||||
mem_limit (int or str): Memory limit
|
||||
mem_reservation (int or str): Memory soft limit
|
||||
memswap_limit (int or str): Total memory (memory + swap), -1 to
|
||||
disable swap
|
||||
kernel_memory (int or str): Kernel memory limit
|
||||
restart_policy (dict): Restart policy dictionary
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary containing a ``Warnings`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.update_container(self.id, **kwargs)
|
||||
|
||||
def wait(self, **kwargs):
|
||||
"""
|
||||
Block until the container stops, then return its exit code. Similar to
|
||||
the ``docker wait`` command.
|
||||
|
||||
Args:
|
||||
timeout (int): Request timeout
|
||||
|
||||
Returns:
|
||||
(int): The exit code of the container. Returns ``-1`` if the API
|
||||
responds without a ``StatusCode`` attribute.
|
||||
|
||||
Raises:
|
||||
:py:class:`requests.exceptions.ReadTimeout`
|
||||
If the timeout is exceeded.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.wait(self.id, **kwargs)
|
||||
|
||||
|
||||
class ContainerCollection(Collection):
|
||||
model = Container
|
||||
|
||||
def run(self, image, command=None, stdout=True, stderr=False,
|
||||
remove=False, **kwargs):
|
||||
"""
|
||||
Run a container. By default, it will wait for the container to finish
|
||||
and return its logs, similar to ``docker run``.
|
||||
|
||||
If the ``detach`` argument is ``True``, it will start the container
|
||||
and immediately return a :py:class:`Container` object, similar to
|
||||
``docker run -d``.
|
||||
|
||||
Example:
|
||||
Run a container and get its output:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.from_env()
|
||||
>>> client.containers.run('alpine', 'echo hello world')
|
||||
b'hello world\\n'
|
||||
|
||||
Run a container and detach:
|
||||
|
||||
>>> container = client.containers.run('bfirsh/reticulate-splines',
|
||||
detach=True)
|
||||
>>> container.logs()
|
||||
'Reticulating spline 1...\\nReticulating spline 2...\\n'
|
||||
|
||||
Args:
|
||||
image (str): The image to run.
|
||||
command (str or list): The command to run in the container.
|
||||
blkio_weight_device: Block IO weight (relative device weight) in
|
||||
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
|
||||
blkio_weight: Block IO weight (relative weight), accepts a weight
|
||||
value between 10 and 1000.
|
||||
cap_add (list of str): Add kernel capabilities. For example,
|
||||
``["SYS_ADMIN", "MKNOD"]``.
|
||||
cap_drop (list of str): Drop kernel capabilities.
|
||||
cpu_group (int): The length of a CPU period in microseconds.
|
||||
cpu_period (int): Microseconds of CPU time that the container can
|
||||
get in a CPU period.
|
||||
cpu_shares (int): CPU shares (relative weight).
|
||||
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
|
||||
``0,1``).
|
||||
detach (bool): Run container in the background and return a
|
||||
:py:class:`Container` object.
|
||||
device_read_bps: Limit read rate (bytes per second) from a device
|
||||
in the form of: `[{"Path": "device_path", "Rate": rate}]`
|
||||
device_read_iops: Limit read rate (IO per second) from a device.
|
||||
device_write_bps: Limit write rate (bytes per second) from a
|
||||
device.
|
||||
device_write_iops: Limit write rate (IO per second) from a device.
|
||||
devices (list): Expose host devices to the container, as a list
|
||||
of strings in the form
|
||||
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
|
||||
|
||||
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
|
||||
to have read-write access to the host's ``/dev/sda`` via a
|
||||
node named ``/dev/xvda`` inside the container.
|
||||
dns (list): Set custom DNS servers.
|
||||
dns_opt (list): Additional options to be added to the container's
|
||||
``resolv.conf`` file.
|
||||
dns_search (list): DNS search domains.
|
||||
domainname (str or list): Set custom DNS search domains.
|
||||
entrypoint (str or list): The entrypoint for the container.
|
||||
environment (dict or list): Environment variables to set inside
|
||||
the container, as a dictionary or a list of strings in the
|
||||
format ``["SOMEVARIABLE=xxx"]``.
|
||||
extra_hosts (dict): Addtional hostnames to resolve inside the
|
||||
container, as a mapping of hostname to IP address.
|
||||
group_add (list): List of additional group names and/or IDs that
|
||||
the container process will run as.
|
||||
hostname (str): Optional hostname for the container.
|
||||
ipc_mode (str): Set the IPC mode for the container.
|
||||
isolation (str): Isolation technology to use. Default: `None`.
|
||||
labels (dict or list): A dictionary of name-value labels (e.g.
|
||||
``{"label1": "value1", "label2": "value2"}``) or a list of
|
||||
names of labels to set with empty values (e.g.
|
||||
``["label1", "label2"]``)
|
||||
links (dict or list of tuples): Either a dictionary mapping name
|
||||
to alias or as a list of ``(name, alias)`` tuples.
|
||||
log_config (dict): Logging configuration, as a dictionary with
|
||||
keys:
|
||||
|
||||
- ``type`` The logging driver name.
|
||||
- ``config`` A dictionary of configuration for the logging
|
||||
driver.
|
||||
|
||||
mac_address (str): MAC address to assign to the container.
|
||||
mem_limit (float or str): Memory limit. Accepts float values
|
||||
(which represent the memory limit of the created container in
|
||||
bytes) or a string with a units identification char
|
||||
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
|
||||
specified without a units character, bytes are assumed as an
|
||||
intended unit.
|
||||
mem_limit (str or int): Maximum amount of memory container is
|
||||
allowed to consume. (e.g. ``1G``).
|
||||
mem_swappiness (int): Tune a container's memory swappiness
|
||||
behavior. Accepts number between 0 and 100.
|
||||
memswap_limit (str or int): Maximum amount of memory + swap a
|
||||
container is allowed to consume.
|
||||
networks (list): A list of network names to connect this
|
||||
container to.
|
||||
name (str): The name for this container.
|
||||
network_disabled (bool): Disable networking.
|
||||
network_mode (str): One of:
|
||||
|
||||
- ``bridge`` Create a new network stack for the container on
|
||||
on the bridge network.
|
||||
- ``none`` No networking for this container.
|
||||
- ``container:<name|id>`` Reuse another container's network
|
||||
stack.
|
||||
- ``host`` Use the host network stack.
|
||||
oom_kill_disable (bool): Whether to disable OOM killer.
|
||||
oom_score_adj (int): An integer value containing the score given
|
||||
to the container in order to tune OOM killer preferences.
|
||||
pid_mode (str): If set to ``host``, use the host PID namespace
|
||||
inside the container.
|
||||
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
|
||||
unlimited.
|
||||
ports (dict): Ports to bind inside the container.
|
||||
|
||||
The keys of the dictionary are the ports to bind inside the
|
||||
container, either as an integer or a string in the form
|
||||
``port/protocol``, where the protocol is either ``tcp`` or
|
||||
``udp``.
|
||||
|
||||
The values of the dictionary are the corresponding ports to
|
||||
open on the host, which can be either:
|
||||
|
||||
- The port number, as an integer. For example,
|
||||
``{'2222/tcp': 3333}`` will expose port 2222 inside the
|
||||
container as port 3333 on the host.
|
||||
- ``None``, to assign a random host port. For example,
|
||||
``{'2222/tcp': None}``.
|
||||
- A tuple of ``(address, port)`` if you want to specify the
|
||||
host interface. For example,
|
||||
``{'1111/tcp': ('127.0.0.1', 1111)}``.
|
||||
- A list of integers, if you want to bind multiple host ports
|
||||
to a single container port. For example,
|
||||
``{'1111/tcp': [1234, 4567]}``.
|
||||
|
||||
privileged (bool): Give extended privileges to this container.
|
||||
publish_all_ports (bool): Publish all ports to the host.
|
||||
read_only (bool): Mount the container's root filesystem as read
|
||||
only.
|
||||
remove (bool): Remove the container when it has finished running.
|
||||
Default: ``False``.
|
||||
restart_policy (dict): Restart the container when it exits.
|
||||
Configured as a dictionary with keys:
|
||||
|
||||
- ``Name`` One of ``on-failure``, or ``always``.
|
||||
- ``MaximumRetryCount`` Number of times to restart the
|
||||
container on failure.
|
||||
|
||||
For example:
|
||||
``{"Name": "on-failure", "MaximumRetryCount": 5}``
|
||||
|
||||
security_opt (list): A list of string values to customize labels
|
||||
for MLS systems, such as SELinux.
|
||||
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
|
||||
stdin_open (bool): Keep ``STDIN`` open even if not attached.
|
||||
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
|
||||
Default: ``True``.
|
||||
stdout (bool): Return logs from ``STDERR`` when ``detach=False``.
|
||||
Default: ``False``.
|
||||
stop_signal (str): The stop signal to use to stop the container
|
||||
(e.g. ``SIGINT``).
|
||||
sysctls (dict): Kernel parameters to set in the container.
|
||||
tmpfs (dict): Temporary filesystems to mount, as a dictionary
|
||||
mapping a path inside the container to options for that path.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
'/mnt/vol2': '',
|
||||
'/mnt/vol1': 'size=3G,uid=1000'
|
||||
}
|
||||
|
||||
tty (bool): Allocate a pseudo-TTY.
|
||||
ulimits (list): Ulimits to set inside the container, as a list of
|
||||
dicts.
|
||||
user (str or int): Username or UID to run commands as inside the
|
||||
container.
|
||||
userns_mode (str): Sets the user namespace mode for the container
|
||||
when user namespace remapping option is enabled. Supported
|
||||
values are: ``host``
|
||||
volume_driver (str): The name of a volume driver/plugin.
|
||||
volumes (dict or list): A dictionary to configure volumes mounted
|
||||
inside the container. The key is either the host path or a
|
||||
volume name, and the value is a dictionary with the keys:
|
||||
|
||||
- ``bind`` The path to mount the volume inside the container
|
||||
- ``mode`` Either ``rw`` to mount the volume read/write, or
|
||||
``ro`` to mount it read-only.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
|
||||
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
|
||||
|
||||
volumes_from (list): List of container names or IDs to get
|
||||
volumes from.
|
||||
working_dir (str): Path to the working directory.
|
||||
|
||||
Returns:
|
||||
The container logs, either ``STDOUT``, ``STDERR``, or both,
|
||||
depending on the value of the ``stdout`` and ``stderr`` arguments.
|
||||
|
||||
If ``detach`` is ``True``, a :py:class:`Container` object is
|
||||
returned instead.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.ContainerError`
|
||||
If the container exits with a non-zero exit code and
|
||||
``detach`` is ``False``.
|
||||
:py:class:`docker.errors.ImageNotFound`
|
||||
If the specified image does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(image, Image):
|
||||
image = image.id
|
||||
detach = kwargs.pop("detach", False)
|
||||
if detach and remove:
|
||||
raise RuntimeError("The options 'detach' and 'remove' cannot be "
|
||||
"used together.")
|
||||
|
||||
try:
|
||||
container = self.create(image=image, command=command,
|
||||
detach=detach, **kwargs)
|
||||
except ImageNotFound:
|
||||
self.client.images.pull(image)
|
||||
container = self.create(image=image, command=command,
|
||||
detach=detach, **kwargs)
|
||||
|
||||
container.start()
|
||||
|
||||
if detach:
|
||||
return container
|
||||
|
||||
exit_status = container.wait()
|
||||
if exit_status != 0:
|
||||
stdout = False
|
||||
stderr = True
|
||||
out = container.logs(stdout=stdout, stderr=stderr)
|
||||
if remove:
|
||||
container.remove()
|
||||
if exit_status != 0:
|
||||
raise ContainerError(container, exit_status, command, image, out)
|
||||
return out
|
||||
|
||||
def create(self, image, command=None, **kwargs):
|
||||
"""
|
||||
Create a container without starting it. Similar to ``docker create``.
|
||||
|
||||
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
|
||||
``stderr``, and ``remove``.
|
||||
|
||||
Returns:
|
||||
A :py:class:`Container` object.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.ImageNotFound`
|
||||
If the specified image does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(image, Image):
|
||||
image = image.id
|
||||
kwargs['image'] = image
|
||||
kwargs['command'] = command
|
||||
kwargs['version'] = self.client.api._version
|
||||
create_kwargs = _create_container_args(kwargs)
|
||||
resp = self.client.api.create_container(**create_kwargs)
|
||||
return self.get(resp['Id'])
|
||||
|
||||
def get(self, container_id):
|
||||
"""
|
||||
Get a container by name or ID.
|
||||
|
||||
Args:
|
||||
container_id (str): Container name or ID.
|
||||
|
||||
Returns:
|
||||
A :py:class:`Container` object.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
If the container does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.inspect_container(container_id)
|
||||
return self.prepare_model(resp)
|
||||
|
||||
def list(self, all=False, before=None, filters=None, limit=-1, since=None):
|
||||
"""
|
||||
List containers. Similar to the ``docker ps`` command.
|
||||
|
||||
Args:
|
||||
all (bool): Show all containers. Only running containers are shown
|
||||
by default trunc (bool): Truncate output
|
||||
since (str): Show only containers created since Id or Name, include
|
||||
non-running ones
|
||||
before (str): Show only container created before Id or Name,
|
||||
include non-running ones
|
||||
limit (int): Show `limit` last created containers, include
|
||||
non-running ones
|
||||
filters (dict): Filters to be processed on the image list.
|
||||
Available filters:
|
||||
|
||||
- `exited` (int): Only containers with specified exit code
|
||||
- `status` (str): One of ``restarting``, ``running``,
|
||||
``paused``, ``exited``
|
||||
- `label` (str): format either ``"key"`` or ``"key=value"``
|
||||
- `id` (str): The id of the container.
|
||||
- `name` (str): The name of the container.
|
||||
- `ancestor` (str): Filter by container ancestor. Format of
|
||||
``<image-name>[:tag]``, ``<image-id>``, or
|
||||
``<image@digest>``.
|
||||
- `before` (str): Only containers created before a particular
|
||||
container. Give the container name or id.
|
||||
- `since` (str): Only containers created after a particular
|
||||
container. Give container name or id.
|
||||
|
||||
A comprehensive list can be found in the documentation for
|
||||
`docker ps
|
||||
<https://docs.docker.com/engine/reference/commandline/ps>`_.
|
||||
|
||||
Returns:
|
||||
(list of :py:class:`Container`)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.containers(all=all, before=before,
|
||||
filters=filters, limit=limit,
|
||||
since=since)
|
||||
return [self.get(r['Id']) for r in resp]
|
||||
|
||||
|
||||
# kwargs to copy straight from run to create
|
||||
RUN_CREATE_KWARGS = [
|
||||
'command',
|
||||
'detach',
|
||||
'domainname',
|
||||
'entrypoint',
|
||||
'environment',
|
||||
'healthcheck',
|
||||
'hostname',
|
||||
'image',
|
||||
'labels',
|
||||
'mac_address',
|
||||
'name',
|
||||
'network_disabled',
|
||||
'stdin_open',
|
||||
'stop_signal',
|
||||
'tty',
|
||||
'user',
|
||||
'volume_driver',
|
||||
'working_dir',
|
||||
]
|
||||
|
||||
# kwargs to copy straight from run to host_config
|
||||
RUN_HOST_CONFIG_KWARGS = [
|
||||
'blkio_weight_device',
|
||||
'blkio_weight',
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
'cgroup_parent',
|
||||
'cpu_period',
|
||||
'cpu_quota',
|
||||
'cpu_shares',
|
||||
'cpuset_cpus',
|
||||
'device_read_bps',
|
||||
'device_read_iops',
|
||||
'device_write_bps',
|
||||
'device_write_iops',
|
||||
'devices',
|
||||
'dns_opt',
|
||||
'dns_search',
|
||||
'dns',
|
||||
'extra_hosts',
|
||||
'group_add',
|
||||
'ipc_mode',
|
||||
'isolation',
|
||||
'kernel_memory',
|
||||
'links',
|
||||
'log_config',
|
||||
'lxc_conf',
|
||||
'mem_limit',
|
||||
'mem_reservation',
|
||||
'mem_swappiness',
|
||||
'memswap_limit',
|
||||
'network_mode',
|
||||
'oom_kill_disable',
|
||||
'oom_score_adj',
|
||||
'pid_mode',
|
||||
'pids_limit',
|
||||
'privileged',
|
||||
'publish_all_ports',
|
||||
'read_only',
|
||||
'restart_policy',
|
||||
'security_opt',
|
||||
'shm_size',
|
||||
'sysctls',
|
||||
'tmpfs',
|
||||
'ulimits',
|
||||
'userns_mode',
|
||||
'version',
|
||||
'volumes_from',
|
||||
]
|
||||
|
||||
|
||||
def _create_container_args(kwargs):
|
||||
"""
|
||||
Convert arguments to create() to arguments to create_container().
|
||||
"""
|
||||
# Copy over kwargs which can be copied directly
|
||||
create_kwargs = {}
|
||||
for key in copy.copy(kwargs):
|
||||
if key in RUN_CREATE_KWARGS:
|
||||
create_kwargs[key] = kwargs.pop(key)
|
||||
host_config_kwargs = {}
|
||||
for key in copy.copy(kwargs):
|
||||
if key in RUN_HOST_CONFIG_KWARGS:
|
||||
host_config_kwargs[key] = kwargs.pop(key)
|
||||
|
||||
# Process kwargs which are split over both create and host_config
|
||||
ports = kwargs.pop('ports', {})
|
||||
if ports:
|
||||
host_config_kwargs['port_bindings'] = ports
|
||||
|
||||
volumes = kwargs.pop('volumes', {})
|
||||
if volumes:
|
||||
host_config_kwargs['binds'] = volumes
|
||||
|
||||
networks = kwargs.pop('networks', [])
|
||||
if networks:
|
||||
create_kwargs['networking_config'] = {network: None
|
||||
for network in networks}
|
||||
|
||||
# All kwargs should have been consumed by this point, so raise
|
||||
# error if any are left
|
||||
if kwargs:
|
||||
raise create_unexpected_kwargs_error('run', kwargs)
|
||||
|
||||
create_kwargs['host_config'] = create_host_config(**host_config_kwargs)
|
||||
|
||||
# Fill in any kwargs which need processing by create_host_config first
|
||||
port_bindings = create_kwargs['host_config'].get('PortBindings')
|
||||
if port_bindings:
|
||||
# sort to make consistent for tests
|
||||
create_kwargs['ports'] = [tuple(p.split('/', 1))
|
||||
for p in sorted(port_bindings.keys())]
|
||||
binds = create_kwargs['host_config'].get('Binds')
|
||||
if binds:
|
||||
create_kwargs['volumes'] = [v.split(':')[0] for v in binds]
|
||||
return create_kwargs
|
||||
|
|
@ -0,0 +1,269 @@
|
|||
import re
|
||||
|
||||
import six
|
||||
|
||||
from ..api import APIClient
|
||||
from ..errors import BuildError
|
||||
from ..utils.json_stream import json_stream
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Image(Model):
|
||||
"""
|
||||
An image on the server.
|
||||
"""
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
|
||||
|
||||
@property
|
||||
def short_id(self):
|
||||
"""
|
||||
The ID of the image truncated to 10 characters, plus the ``sha256:``
|
||||
prefix.
|
||||
"""
|
||||
if self.id.startswith('sha256:'):
|
||||
return self.id[:17]
|
||||
return self.id[:10]
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
"""
|
||||
The image's tags.
|
||||
"""
|
||||
return [
|
||||
tag for tag in self.attrs.get('RepoTags', [])
|
||||
if tag != '<none>:<none>'
|
||||
]
|
||||
|
||||
def history(self):
|
||||
"""
|
||||
Show the history of an image.
|
||||
|
||||
Returns:
|
||||
(str): The history of the image.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.history(self.id)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Get a tarball of an image. Similar to the ``docker save`` command.
|
||||
|
||||
Returns:
|
||||
(urllib3.response.HTTPResponse object): The response from the
|
||||
daemon.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> image = cli.get("fedora:latest")
|
||||
>>> resp = image.save()
|
||||
>>> f = open('/tmp/fedora-latest.tar', 'w')
|
||||
>>> f.write(resp.data)
|
||||
>>> f.close()
|
||||
"""
|
||||
return self.client.api.get_image(self.id)
|
||||
|
||||
def tag(self, repository, tag=None, **kwargs):
|
||||
"""
|
||||
Tag this image into a repository. Similar to the ``docker tag``
|
||||
command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to set for the tag
|
||||
tag (str): The tag name
|
||||
force (bool): Force
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Returns:
|
||||
(bool): ``True`` if successful
|
||||
"""
|
||||
self.client.api.tag(self.id, repository, tag=tag, **kwargs)
|
||||
|
||||
|
||||
class ImageCollection(Collection):
|
||||
model = Image
|
||||
|
||||
def build(self, **kwargs):
|
||||
"""
|
||||
Build an image and return it. Similar to the ``docker build``
|
||||
command. Either ``path`` or ``fileobj`` must be set.
|
||||
|
||||
If you have a tar file for the Docker build context (including a
|
||||
Dockerfile) already, pass a readable file-like object to ``fileobj``
|
||||
and also pass ``custom_context=True``. If the stream is compressed
|
||||
also, set ``encoding`` to the correct value (e.g ``gzip``).
|
||||
|
||||
If you want to get the raw output of the build, use the
|
||||
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
|
||||
low-level API.
|
||||
|
||||
Args:
|
||||
path (str): Path to the directory containing the Dockerfile
|
||||
fileobj: A file object to use as the Dockerfile. (Or a file-like
|
||||
object)
|
||||
tag (str): A tag to add to the final image
|
||||
quiet (bool): Whether to return the status
|
||||
nocache (bool): Don't use the cache when set to ``True``
|
||||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
stream (bool): *Deprecated for API version > 1.8 (always True)*.
|
||||
Return a blocking generator you can iterate over to retrieve
|
||||
build output as it happens
|
||||
timeout (int): HTTP timeout
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
compressing
|
||||
pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
|
||||
- memory (int): set memory limit for build
|
||||
- memswap (int): Total memory (memory + swap), -1 to disable
|
||||
swap
|
||||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Image`): The built image.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.BuildError`
|
||||
If there is an error during the build.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns any other error.
|
||||
``TypeError``
|
||||
If neither ``path`` nor ``fileobj`` is specified.
|
||||
"""
|
||||
resp = self.client.api.build(**kwargs)
|
||||
if isinstance(resp, six.string_types):
|
||||
return self.get(resp)
|
||||
events = list(json_stream(resp))
|
||||
if not events:
|
||||
return BuildError('Unknown')
|
||||
event = events[-1]
|
||||
if 'stream' in event:
|
||||
match = re.search(r'Successfully built ([0-9a-f]+)',
|
||||
event.get('stream', ''))
|
||||
if match:
|
||||
image_id = match.group(1)
|
||||
return self.get(image_id)
|
||||
|
||||
raise BuildError(event.get('error') or event)
|
||||
|
||||
def get(self, name):
|
||||
"""
|
||||
Gets an image.
|
||||
|
||||
Args:
|
||||
name (str): The name of the image.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Image`): The image.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.ImageNotFound` If the image does not
|
||||
exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.prepare_model(self.client.api.inspect_image(name))
|
||||
|
||||
def list(self, name=None, all=False, filters=None):
|
||||
"""
|
||||
List images on the server.
|
||||
|
||||
Args:
|
||||
name (str): Only show images belonging to the repository ``name``
|
||||
all (bool): Show intermediate image layers. By default, these are
|
||||
filtered out.
|
||||
filters (dict): Filters to be processed on the image list.
|
||||
Available filters:
|
||||
- ``dangling`` (bool)
|
||||
- ``label`` (str): format either ``key`` or ``key=value``
|
||||
|
||||
Returns:
|
||||
(list of :py:class:`Image`): The images.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.images(name=name, all=all, filters=filters)
|
||||
return [self.prepare_model(r) for r in resp]
|
||||
|
||||
def load(self, data):
|
||||
"""
|
||||
Load an image that was previously saved using
|
||||
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
|
||||
Similar to ``docker load``.
|
||||
|
||||
Args:
|
||||
data (binary): Image data to be loaded.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.load_image(data)
|
||||
|
||||
def pull(self, name, **kwargs):
|
||||
"""
|
||||
Pull an image of the given name and return it. Similar to the
|
||||
``docker pull`` command.
|
||||
|
||||
If you want to get the raw pull output, use the
|
||||
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
|
||||
low-level API.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to pull
|
||||
tag (str): The tag to pull
|
||||
insecure_registry (bool): Use an insecure registry
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.client.Client.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Image`): The image that has been pulled.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> image = client.images.pull('busybox')
|
||||
"""
|
||||
self.client.api.pull(name, **kwargs)
|
||||
return self.get(name)
|
||||
|
||||
def push(self, repository, tag=None, **kwargs):
|
||||
return self.client.api.push(repository, tag=tag, **kwargs)
|
||||
push.__doc__ = APIClient.push.__doc__
|
||||
|
||||
def remove(self, *args, **kwargs):
|
||||
self.client.api.remove_image(*args, **kwargs)
|
||||
remove.__doc__ = APIClient.remove_image.__doc__
|
||||
|
||||
def search(self, *args, **kwargs):
|
||||
return self.client.api.search(*args, **kwargs)
|
||||
search.__doc__ = APIClient.search.__doc__
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
from .containers import Container
|
||||
from .resource import Model, Collection
|
||||
|
||||
|
||||
class Network(Model):
|
||||
"""
|
||||
A Docker network.
|
||||
"""
|
||||
@property
|
||||
def name(self):
|
||||
"""
|
||||
The name of the network.
|
||||
"""
|
||||
return self.attrs.get('Name')
|
||||
|
||||
@property
|
||||
def containers(self):
|
||||
"""
|
||||
The containers that are connected to the network, as a list of
|
||||
:py:class:`~docker.models.containers.Container` objects.
|
||||
"""
|
||||
return [
|
||||
self.client.containers.get(cid) for cid in
|
||||
self.attrs.get('Containers', {}).keys()
|
||||
]
|
||||
|
||||
def connect(self, container):
|
||||
"""
|
||||
Connect a container to this network.
|
||||
|
||||
Args:
|
||||
container (str): Container to connect to this network, as either
|
||||
an ID, name, or :py:class:`~docker.models.containers.Container`
|
||||
object.
|
||||
aliases (list): A list of aliases for this endpoint. Names in that
|
||||
list can be used within the network to reach the container.
|
||||
Defaults to ``None``.
|
||||
links (list): A list of links for this endpoint. Containers
|
||||
declared in this list will be linkedto this container.
|
||||
Defaults to ``None``.
|
||||
ipv4_address (str): The IP address of this container on the
|
||||
network, using the IPv4 protocol. Defaults to ``None``.
|
||||
ipv6_address (str): The IP address of this container on the
|
||||
network, using the IPv6 protocol. Defaults to ``None``.
|
||||
link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(container, Container):
|
||||
container = container.id
|
||||
return self.client.api.connect_container_to_network(container, self.id)
|
||||
|
||||
def disconnect(self, container):
|
||||
"""
|
||||
Disconnect a container from this network.
|
||||
|
||||
Args:
|
||||
container (str): Container to disconnect from this network, as
|
||||
either an ID, name, or
|
||||
:py:class:`~docker.models.containers.Container` object.
|
||||
force (bool): Force the container to disconnect from a network.
|
||||
Default: ``False``
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(container, Container):
|
||||
container = container.id
|
||||
return self.client.api.disconnect_container_from_network(container,
|
||||
self.id)
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Remove this network.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.remove_network(self.id)
|
||||
|
||||
|
||||
class NetworkCollection(Collection):
|
||||
"""
|
||||
Networks on the Docker server.
|
||||
"""
|
||||
model = Network
|
||||
|
||||
def create(self, name, *args, **kwargs):
|
||||
"""
|
||||
Create a network. Similar to the ``docker network create``.
|
||||
|
||||
Args:
|
||||
name (str): Name of the network
|
||||
driver (str): Name of the driver used to create the network
|
||||
options (dict): Driver options as a key-value dictionary
|
||||
ipam (dict): Optional custom IP scheme for the network.
|
||||
Created with :py:meth:`~docker.utils.create_ipam_config`.
|
||||
check_duplicate (bool): Request daemon to check for networks with
|
||||
same name. Default: ``True``.
|
||||
internal (bool): Restrict external access to the network. Default
|
||||
``False``.
|
||||
labels (dict): Map of labels to set on the network. Default
|
||||
``None``.
|
||||
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Network`): The network that was created.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
A network using the bridge driver:
|
||||
|
||||
>>> client.networks.create("network1", driver="bridge")
|
||||
|
||||
You can also create more advanced networks with custom IPAM
|
||||
configurations. For example, setting the subnet to
|
||||
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> ipam_pool = docker.utils.create_ipam_pool(
|
||||
subnet='192.168.52.0/24',
|
||||
gateway='192.168.52.254'
|
||||
)
|
||||
>>> ipam_config = docker.utils.create_ipam_config(
|
||||
pool_configs=[ipam_pool]
|
||||
)
|
||||
>>> client.networks.create(
|
||||
"network1",
|
||||
driver="bridge",
|
||||
ipam=ipam_config
|
||||
)
|
||||
|
||||
"""
|
||||
resp = self.client.api.create_network(name, *args, **kwargs)
|
||||
return self.get(resp['Id'])
|
||||
|
||||
def get(self, network_id):
|
||||
"""
|
||||
Get a network by its ID.
|
||||
|
||||
Args:
|
||||
network_id (str): The ID of the network.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Network`) The network.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
If the network does not exist.
|
||||
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
"""
|
||||
return self.prepare_model(self.client.api.inspect_network(network_id))
|
||||
|
||||
def list(self, *args, **kwargs):
|
||||
"""
|
||||
List networks. Similar to the ``docker networks ls`` command.
|
||||
|
||||
Args:
|
||||
names (list): List of names to filter by.
|
||||
ids (list): List of ids to filter by.
|
||||
|
||||
Returns:
|
||||
(list of :py:class:`Network`) The networks on the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.networks(*args, **kwargs)
|
||||
return [self.prepare_model(item) for item in resp]
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
from .resource import Model, Collection
|
||||
|
||||
|
||||
class Node(Model):
|
||||
"""A node in a swarm."""
|
||||
id_attribute = 'ID'
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""
|
||||
The version number of the service. If this is not the same as the
|
||||
server, the :py:meth:`update` function will not work and you will
|
||||
need to call :py:meth:`reload` before calling it again.
|
||||
"""
|
||||
return self.attrs.get('Version').get('Index')
|
||||
|
||||
def update(self, node_spec):
|
||||
"""
|
||||
Update the node's configuration.
|
||||
|
||||
Args:
|
||||
node_spec (dict): Configuration settings to update. Any values
|
||||
not provided will be removed. Default: ``None``
|
||||
|
||||
Returns:
|
||||
`True` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> node_spec = {'Availability': 'active',
|
||||
'Name': 'node-name',
|
||||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
}
|
||||
>>> node.update(node_spec)
|
||||
|
||||
"""
|
||||
return self.client.api.update_node(self.id, self.version, node_spec)
|
||||
|
||||
|
||||
class NodeCollection(Collection):
|
||||
"""Nodes on the Docker server."""
|
||||
model = Node
|
||||
|
||||
def get(self, node_id):
|
||||
"""
|
||||
Get a node.
|
||||
|
||||
Args:
|
||||
node_id (string): ID of the node to be inspected.
|
||||
|
||||
Returns:
|
||||
A :py:class:`Node` object.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.prepare_model(self.client.api.inspect_node(node_id))
|
||||
|
||||
def list(self, *args, **kwargs):
|
||||
"""
|
||||
List swarm nodes.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name``, ``membership`` and ``role``.
|
||||
Default: ``None``
|
||||
|
||||
Returns:
|
||||
A list of :py:class:`Node` objects.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> client.nodes.list(filters={'role': 'manager'})
|
||||
"""
|
||||
return [
|
||||
self.prepare_model(n)
|
||||
for n in self.client.api.nodes(*args, **kwargs)
|
||||
]
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
|
||||
class Model(object):
|
||||
"""
|
||||
A base class for representing a single object on the server.
|
||||
"""
|
||||
id_attribute = 'Id'
|
||||
|
||||
def __init__(self, attrs=None, client=None, collection=None):
|
||||
#: A client pointing at the server that this object is on.
|
||||
self.client = client
|
||||
|
||||
#: The collection that this model is part of.
|
||||
self.collection = collection
|
||||
|
||||
#: The raw representation of this object from the API
|
||||
self.attrs = attrs
|
||||
if self.attrs is None:
|
||||
self.attrs = {}
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.id == other.id
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""
|
||||
The ID of the object.
|
||||
"""
|
||||
return self.attrs.get(self.id_attribute)
|
||||
|
||||
@property
|
||||
def short_id(self):
|
||||
"""
|
||||
The ID of the object, truncated to 10 characters.
|
||||
"""
|
||||
return self.id[:10]
|
||||
|
||||
def reload(self):
|
||||
"""
|
||||
Load this object from the server again and update ``attrs`` with the
|
||||
new data.
|
||||
"""
|
||||
new_model = self.collection.get(self.id)
|
||||
self.attrs = new_model.attrs
|
||||
|
||||
|
||||
class Collection(object):
|
||||
"""
|
||||
A base class for representing all objects of a particular type on the
|
||||
server.
|
||||
"""
|
||||
|
||||
#: The type of object this collection represents, set by subclasses
|
||||
model = None
|
||||
|
||||
def __init__(self, client=None):
|
||||
#: The client pointing at the server that this collection of objects
|
||||
#: is on.
|
||||
self.client = client
|
||||
|
||||
def list(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get(self, key):
|
||||
raise NotImplementedError
|
||||
|
||||
def create(self, attrs=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def prepare_model(self, attrs):
|
||||
"""
|
||||
Create a model from a set of attributes.
|
||||
"""
|
||||
if isinstance(attrs, Model):
|
||||
attrs.client = self.client
|
||||
attrs.collection = self
|
||||
return attrs
|
||||
elif isinstance(attrs, dict):
|
||||
return self.model(attrs=attrs, client=self.client, collection=self)
|
||||
else:
|
||||
raise Exception("Can't create %s from %s" %
|
||||
(self.model.__name__, attrs))
|
||||
|
|
@ -0,0 +1,240 @@
|
|||
import copy
|
||||
from docker.errors import create_unexpected_kwargs_error
|
||||
from docker.types import TaskTemplate, ContainerSpec
|
||||
from .resource import Model, Collection
|
||||
|
||||
|
||||
class Service(Model):
|
||||
"""A service."""
|
||||
id_attribute = 'ID'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The service's name."""
|
||||
return self.attrs['Spec']['Name']
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""
|
||||
The version number of the service. If this is not the same as the
|
||||
server, the :py:meth:`update` function will not work and you will
|
||||
need to call :py:meth:`reload` before calling it again.
|
||||
"""
|
||||
return self.attrs.get('Version').get('Index')
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Stop and remove the service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.remove_service(self.id)
|
||||
|
||||
def tasks(self, filters=None):
|
||||
"""
|
||||
List the tasks in this service.
|
||||
|
||||
Args:
|
||||
filters (dict): A map of filters to process on the tasks list.
|
||||
Valid filters: ``id``, ``name``, ``node``,
|
||||
``label``, and ``desired-state``.
|
||||
|
||||
Returns:
|
||||
(list): List of task dictionaries.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if filters is None:
|
||||
filters = {}
|
||||
filters['service'] = self.id
|
||||
return self.client.api.tasks(filters=filters)
|
||||
|
||||
def update(self, **kwargs):
|
||||
"""
|
||||
Update a service's configuration. Similar to the ``docker service
|
||||
update`` command.
|
||||
|
||||
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
# Image is required, so if it hasn't been set, use current image
|
||||
if 'image' not in kwargs:
|
||||
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
|
||||
kwargs['image'] = spec['Image']
|
||||
|
||||
create_kwargs = _get_create_service_kwargs('update', kwargs)
|
||||
|
||||
return self.client.api.update_service(
|
||||
self.id,
|
||||
self.version,
|
||||
**create_kwargs
|
||||
)
|
||||
|
||||
|
||||
class ServiceCollection(Collection):
|
||||
"""Services on the Docker server."""
|
||||
model = Service
|
||||
|
||||
def create(self, image, command=None, **kwargs):
|
||||
"""
|
||||
Create a service. Similar to the ``docker service create`` command.
|
||||
|
||||
Args:
|
||||
image (str): The image name to use for the containers.
|
||||
command (list of str or str): Command to run.
|
||||
args (list of str): Arguments to the command.
|
||||
constraints (list of str): Placement constraints.
|
||||
container_labels (dict): Labels to apply to the container.
|
||||
endpoint_spec (dict): Properties that can be configured to
|
||||
access and load balance a service. Default: ``None``.
|
||||
env (list of str): Environment variables, in the form
|
||||
``KEY=val``.
|
||||
labels (dict): Labels to apply to the service.
|
||||
log_driver (str): Log driver to use for containers.
|
||||
log_driver_options (dict): Log driver options.
|
||||
mode (string): Scheduling mode for the service (``replicated`` or
|
||||
``global``). Defaults to ``replicated``.
|
||||
mounts (list of str): Mounts for the containers, in the form
|
||||
``source:target:options``, where options is either
|
||||
``ro`` or ``rw``.
|
||||
name (str): Name to give to the service.
|
||||
networks (list): List of network names or IDs to attach the
|
||||
service to. Default: ``None``.
|
||||
resources (dict): Resource limits and reservations. For the
|
||||
format, see the Remote API documentation.
|
||||
restart_policy (dict): Restart policy for containers. For the
|
||||
format, see the Remote API documentation.
|
||||
stop_grace_period (int): Amount of time to wait for
|
||||
containers to terminate before forcefully killing them.
|
||||
update_config (dict): Specification for the update strategy of the
|
||||
service. Default: ``None``
|
||||
user (str): User to run commands as.
|
||||
workdir (str): Working directory for commands to run.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Service`) The created service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
kwargs['image'] = image
|
||||
kwargs['command'] = command
|
||||
create_kwargs = _get_create_service_kwargs('create', kwargs)
|
||||
service_id = self.client.api.create_service(**create_kwargs)
|
||||
return self.get(service_id)
|
||||
|
||||
def get(self, service_id):
|
||||
"""
|
||||
Get a service.
|
||||
|
||||
Args:
|
||||
service_id (str): The ID of the service.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Service`): The service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
If the service does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.prepare_model(self.client.api.inspect_service(service_id))
|
||||
|
||||
def list(self, **kwargs):
|
||||
"""
|
||||
List services.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id`` and ``name``. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
(list of :py:class:`Service`): The services.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return [
|
||||
self.prepare_model(s)
|
||||
for s in self.client.api.services(**kwargs)
|
||||
]
|
||||
|
||||
|
||||
# kwargs to copy straight over to ContainerSpec
|
||||
CONTAINER_SPEC_KWARGS = [
|
||||
'image',
|
||||
'command',
|
||||
'args',
|
||||
'env',
|
||||
'workdir',
|
||||
'user',
|
||||
'labels',
|
||||
'mounts',
|
||||
'stop_grace_period',
|
||||
]
|
||||
|
||||
# kwargs to copy straight over to TaskTemplate
|
||||
TASK_TEMPLATE_KWARGS = [
|
||||
'resources',
|
||||
'restart_policy',
|
||||
]
|
||||
|
||||
# kwargs to copy straight over to create_service
|
||||
CREATE_SERVICE_KWARGS = [
|
||||
'name',
|
||||
'labels',
|
||||
'mode',
|
||||
'update_config',
|
||||
'networks',
|
||||
'endpoint_spec',
|
||||
]
|
||||
|
||||
|
||||
def _get_create_service_kwargs(func_name, kwargs):
|
||||
# Copy over things which can be copied directly
|
||||
create_kwargs = {}
|
||||
for key in copy.copy(kwargs):
|
||||
if key in CREATE_SERVICE_KWARGS:
|
||||
create_kwargs[key] = kwargs.pop(key)
|
||||
container_spec_kwargs = {}
|
||||
for key in copy.copy(kwargs):
|
||||
if key in CONTAINER_SPEC_KWARGS:
|
||||
container_spec_kwargs[key] = kwargs.pop(key)
|
||||
task_template_kwargs = {}
|
||||
for key in copy.copy(kwargs):
|
||||
if key in TASK_TEMPLATE_KWARGS:
|
||||
task_template_kwargs[key] = kwargs.pop(key)
|
||||
|
||||
if 'container_labels' in kwargs:
|
||||
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
|
||||
|
||||
if 'constraints' in kwargs:
|
||||
task_template_kwargs['placement'] = {
|
||||
'Constraints': kwargs.pop('constraints')
|
||||
}
|
||||
|
||||
if 'log_driver' in kwargs:
|
||||
task_template_kwargs['log_driver'] = {
|
||||
'Name': kwargs.pop('log_driver'),
|
||||
'Options': kwargs.pop('log_driver_options', {})
|
||||
}
|
||||
|
||||
# All kwargs should have been consumed by this point, so raise
|
||||
# error if any are left
|
||||
if kwargs:
|
||||
raise create_unexpected_kwargs_error(func_name, kwargs)
|
||||
|
||||
container_spec = ContainerSpec(**container_spec_kwargs)
|
||||
task_template_kwargs['container_spec'] = container_spec
|
||||
create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
|
||||
return create_kwargs
|
||||
|
|
@ -0,0 +1,146 @@
|
|||
from docker.api import APIClient
|
||||
from docker.errors import APIError
|
||||
from docker.types import SwarmSpec
|
||||
from .resource import Model
|
||||
|
||||
|
||||
class Swarm(Model):
|
||||
"""
|
||||
The server's Swarm state. This a singleton that must be reloaded to get
|
||||
the current state of the Swarm.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Swarm, self).__init__(*args, **kwargs)
|
||||
if self.client:
|
||||
try:
|
||||
self.reload()
|
||||
except APIError as e:
|
||||
if e.response.status_code != 406:
|
||||
raise
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""
|
||||
The version number of the swarm. If this is not the same as the
|
||||
server, the :py:meth:`update` function will not work and you will
|
||||
need to call :py:meth:`reload` before calling it again.
|
||||
"""
|
||||
return self.attrs.get('Version').get('Index')
|
||||
|
||||
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None, **kwargs):
|
||||
"""
|
||||
Initialize a new swarm on this Engine.
|
||||
|
||||
Args:
|
||||
advertise_addr (str): Externally reachable address advertised to
|
||||
other nodes. This can either be an address/port combination in
|
||||
the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
port number, like ``eth0:4567``. If the port number is omitted,
|
||||
the port number from the listen address is used.
|
||||
|
||||
If not specified, it will be automatically detected when
|
||||
possible.
|
||||
listen_addr (str): Listen address used for inter-manager
|
||||
communication, as well as determining the networking interface
|
||||
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
|
||||
an address/port combination in the form ``192.168.1.1:4567``,
|
||||
or an interface followed by a port number, like ``eth0:4567``.
|
||||
If the port number is omitted, the default swarm listening port
|
||||
is used. Default: ``0.0.0.0:2377``
|
||||
force_new_cluster (bool): Force creating a new Swarm, even if
|
||||
already part of one. Default: False
|
||||
task_history_retention_limit (int): Maximum number of tasks
|
||||
history stored.
|
||||
snapshot_interval (int): Number of logs entries between snapshot.
|
||||
keep_old_snapshots (int): Number of snapshots to keep beyond the
|
||||
current snapshot.
|
||||
log_entries_for_slow_followers (int): Number of log entries to
|
||||
keep around to sync up slow followers after a snapshot is
|
||||
created.
|
||||
heartbeat_tick (int): Amount of ticks (in seconds) between each
|
||||
heartbeat.
|
||||
election_tick (int): Amount of ticks (in seconds) needed without a
|
||||
leader to trigger a new election.
|
||||
dispatcher_heartbeat_period (int): The delay for an agent to send
|
||||
a heartbeat to the dispatcher.
|
||||
node_cert_expiry (int): Automatic expiry for nodes certificates.
|
||||
external_ca (dict): Configuration for forwarding signing requests
|
||||
to an external certificate authority. Use
|
||||
``docker.types.SwarmExternalCA``.
|
||||
name (string): Swarm's name
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> client.swarm.init(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
|
||||
force_new_cluster=False, snapshot_interval=5000,
|
||||
log_entries_for_slow_followers=1200
|
||||
)
|
||||
|
||||
"""
|
||||
init_kwargs = {}
|
||||
for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:
|
||||
if arg in kwargs:
|
||||
init_kwargs[arg] = kwargs[arg]
|
||||
del kwargs[arg]
|
||||
init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
|
||||
self.client.api.init_swarm(**init_kwargs)
|
||||
self.reload()
|
||||
|
||||
def join(self, *args, **kwargs):
|
||||
return self.client.api.join_swarm(*args, **kwargs)
|
||||
join.__doc__ = APIClient.join_swarm.__doc__
|
||||
|
||||
def leave(self, *args, **kwargs):
|
||||
return self.client.api.leave_swarm(*args, **kwargs)
|
||||
leave.__doc__ = APIClient.leave_swarm.__doc__
|
||||
|
||||
def reload(self):
|
||||
"""
|
||||
Inspect the swarm on the server and store the response in
|
||||
:py:attr:`attrs`.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
self.attrs = self.client.api.inspect_swarm()
|
||||
|
||||
def update(self, rotate_worker_token=False, rotate_manager_token=False,
|
||||
**kwargs):
|
||||
"""
|
||||
Update the swarm's configuration.
|
||||
|
||||
It takes the same arguments as :py:meth:`init`, except
|
||||
``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
|
||||
addition, it takes these arguments:
|
||||
|
||||
Args:
|
||||
rotate_worker_token (bool): Rotate the worker join token. Default:
|
||||
``False``.
|
||||
rotate_manager_token (bool): Rotate the manager join token.
|
||||
Default: ``False``.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
"""
|
||||
# this seems to have to be set
|
||||
if kwargs.get('node_cert_expiry') is None:
|
||||
kwargs['node_cert_expiry'] = 7776000000000000
|
||||
|
||||
return self.client.api.update_swarm(
|
||||
version=self.version,
|
||||
swarm_spec=SwarmSpec(**kwargs),
|
||||
rotate_worker_token=rotate_worker_token,
|
||||
rotate_manager_token=rotate_manager_token
|
||||
)
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
from .resource import Model, Collection
|
||||
|
||||
|
||||
class Volume(Model):
|
||||
"""A volume."""
|
||||
id_attribute = 'Name'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The name of the volume."""
|
||||
return self.attrs['Name']
|
||||
|
||||
def remove(self):
|
||||
"""Remove this volume."""
|
||||
return self.client.api.remove_volume(self.id)
|
||||
|
||||
|
||||
class VolumeCollection(Collection):
|
||||
"""Volumes on the Docker server."""
|
||||
model = Volume
|
||||
|
||||
def create(self, name, **kwargs):
|
||||
"""
|
||||
Create a volume.
|
||||
|
||||
Args:
|
||||
name (str): Name of the volume
|
||||
driver (str): Name of the driver used to create the volume
|
||||
driver_opts (dict): Driver options as a key-value dictionary
|
||||
labels (dict): Labels to set on the volume
|
||||
|
||||
Returns:
|
||||
(:py:class:`Volume`): The volume created.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> volume = client.volumes.create(name='foobar', driver='local',
|
||||
driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
labels={"key": "value"})
|
||||
|
||||
"""
|
||||
obj = self.client.api.create_volume(name, **kwargs)
|
||||
return self.prepare_model(obj)
|
||||
|
||||
def get(self, volume_id):
|
||||
"""
|
||||
Get a volume.
|
||||
|
||||
Args:
|
||||
volume_id (str): Volume name.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Volume`): The volume.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
If the volume does not exist.
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.prepare_model(self.client.api.inspect_volume(volume_id))
|
||||
|
||||
def list(self, **kwargs):
|
||||
"""
|
||||
List volumes. Similar to the ``docker volume ls`` command.
|
||||
|
||||
Args:
|
||||
filters (dict): Server-side list filtering options.
|
||||
|
||||
Returns:
|
||||
(list of :py:class:`Volume`): The volumes.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
resp = self.client.api.volumes(**kwargs)
|
||||
if not resp.get('Volumes'):
|
||||
return []
|
||||
return [self.prepare_model(obj) for obj in resp['Volumes']]
|
||||
|
|
@ -1 +0,0 @@
|
|||
from .ssladapter import SSLAdapter # flake8: noqa
|
||||
|
|
@ -1,11 +1,24 @@
|
|||
import os
|
||||
import ssl
|
||||
|
||||
from . import errors
|
||||
from .ssladapter import ssladapter
|
||||
from . import errors, ssladapter
|
||||
|
||||
|
||||
class TLSConfig(object):
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
cert = None
|
||||
ca_cert = None
|
||||
verify = None
|
||||
|
|
@ -58,6 +71,9 @@ class TLSConfig(object):
|
|||
)
|
||||
|
||||
def configure_client(self, client):
|
||||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
client.ssl_version = self.ssl_version
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
|
||||
import six
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
def stream_as_text(stream):
|
||||
"""Given a stream of bytes or text, if any of the items in the stream
|
||||
are bytes convert them to text.
|
||||
This function can be removed once docker-py returns text streams instead
|
||||
of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, six.text_type):
|
||||
data = data.decode('utf-8', 'replace')
|
||||
yield data
|
||||
|
||||
|
||||
def json_splitter(buffer):
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
|
||||
return obj, rest
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def json_stream(stream):
|
||||
"""Given a stream of text, return a stream of json objects.
|
||||
This handles streams which are inconsistently buffered (some entries may
|
||||
be newline delimited, and others are not).
|
||||
"""
|
||||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer, separator=u'\n'):
|
||||
index = buffer.find(six.text_type(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[:index + 1], buffer[index + 1:]
|
||||
|
||||
|
||||
def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
||||
"""Given a generator which yields strings and a splitter function,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Unlike string.split(), each chunk includes the trailing
|
||||
separator, except for the last one if none was found on the end
|
||||
of the input.
|
||||
"""
|
||||
splitter = splitter or line_splitter
|
||||
buffered = six.text_type('')
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
while True:
|
||||
buffer_split = splitter(buffered)
|
||||
if buffer_split is None:
|
||||
break
|
||||
|
||||
item, buffered = buffer_split
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e)
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
from .ports import (
|
||||
split_port,
|
||||
build_port_bindings
|
||||
) # flake8: noqa
|
||||
|
|
@ -39,6 +39,38 @@ BYTE_UNITS = {
|
|||
|
||||
def create_ipam_pool(subnet=None, iprange=None, gateway=None,
|
||||
aux_addresses=None):
|
||||
"""
|
||||
Create an IPAM pool config dictionary to be added to the
|
||||
``pool_configs`` parameter of
|
||||
:py:meth:`~docker.utils.create_ipam_config`.
|
||||
|
||||
Args:
|
||||
|
||||
subnet (str): Custom subnet for this IPAM pool using the CIDR
|
||||
notation. Defaults to ``None``.
|
||||
iprange (str): Custom IP range for endpoints in this IPAM pool using
|
||||
the CIDR notation. Defaults to ``None``.
|
||||
gateway (str): Custom IP address for the pool's gateway.
|
||||
aux_addresses (dict): A dictionary of ``key -> ip_address``
|
||||
relationships specifying auxiliary addresses that need to be
|
||||
allocated by the IPAM driver.
|
||||
|
||||
Returns:
|
||||
(dict) An IPAM pool config
|
||||
|
||||
Example:
|
||||
|
||||
>>> ipam_pool = docker.utils.create_ipam_pool(
|
||||
subnet='124.42.0.0/16',
|
||||
iprange='124.42.0.0/24',
|
||||
gateway='124.42.0.254',
|
||||
aux_addresses={
|
||||
'reserved1': '124.42.1.1'
|
||||
}
|
||||
)
|
||||
>>> ipam_config = docker.utils.create_ipam_config(
|
||||
pool_configs=[ipam_pool])
|
||||
"""
|
||||
return {
|
||||
'Subnet': subnet,
|
||||
'IPRange': iprange,
|
||||
|
|
@ -48,6 +80,25 @@ def create_ipam_pool(subnet=None, iprange=None, gateway=None,
|
|||
|
||||
|
||||
def create_ipam_config(driver='default', pool_configs=None):
|
||||
"""
|
||||
Create an IPAM (IP Address Management) config dictionary to be used with
|
||||
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
|
||||
|
||||
Args:
|
||||
driver (str): The IPAM driver to use. Defaults to ``default``.
|
||||
pool_configs (list): A list of pool configuration dictionaries as
|
||||
created by :py:meth:`~docker.utils.create_ipam_pool`. Defaults to
|
||||
empty list.
|
||||
|
||||
Returns:
|
||||
(dict) An IPAM config.
|
||||
|
||||
Example:
|
||||
|
||||
>>> ipam_config = docker.utils.create_ipam_config(driver='default')
|
||||
>>> network = client.create_network('network1', ipam=ipam_config)
|
||||
|
||||
"""
|
||||
return {
|
||||
'Driver': driver,
|
||||
'Config': pool_configs or []
|
||||
|
|
|
|||
|
|
@ -1 +1,2 @@
|
|||
mkdocs==0.15.3
|
||||
recommonmark==0.4.0
|
||||
Sphinx==1.4.6
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
dl.hide-signature > dt {
|
||||
display: none;
|
||||
}
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
{% extends "!page.html" %}
|
||||
{% set css_files = css_files + ["_static/custom.css"] %}
|
||||
1237
docs/api.md
1237
docs/api.md
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,109 @@
|
|||
Low-level API
|
||||
=============
|
||||
|
||||
The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with.
|
||||
|
||||
It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power.
|
||||
|
||||
.. py:module:: docker.api
|
||||
|
||||
.. autoclass:: docker.api.client.APIClient
|
||||
|
||||
Containers
|
||||
----------
|
||||
|
||||
.. py:module:: docker.api.container
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: ContainerApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. py:module:: docker.api.image
|
||||
|
||||
Images
|
||||
------
|
||||
|
||||
.. py:module:: docker.api.image
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: ImageApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Building images
|
||||
---------------
|
||||
|
||||
.. py:module:: docker.api.build
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: BuildApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Networks
|
||||
--------
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: docker.api.network.NetworkApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Utilities
|
||||
~~~~~~~~~
|
||||
|
||||
These functions are available under ``docker.utils`` to create arguments
|
||||
for :py:meth:`create_network`:
|
||||
|
||||
.. autofunction:: docker.utils.create_ipam_config
|
||||
.. autofunction:: docker.utils.create_ipam_pool
|
||||
|
||||
Volumes
|
||||
-------
|
||||
|
||||
.. py:module:: docker.api.volume
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: VolumeApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Executing commands in containers
|
||||
--------------------------------
|
||||
|
||||
.. py:module:: docker.api.exec_api
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: ExecApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Swarms
|
||||
------
|
||||
|
||||
.. py:module:: docker.api.swarm
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: SwarmApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Services
|
||||
--------
|
||||
|
||||
.. py:module:: docker.api.service
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: ServiceApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
The Docker daemon
|
||||
-----------------
|
||||
|
||||
.. py:module:: docker.api.daemon
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: DaemonApiMixin
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
Change Log
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.10.3
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
Client
|
||||
======
|
||||
.. py:module:: docker.client
|
||||
|
||||
|
||||
Creating a client
|
||||
-----------------
|
||||
|
||||
To communicate with the Docker daemon, you first need to instantiate a client. The easiest way to do that is by calling the function :py:func:`~docker.client.from_env`. It can also be configured manually by instantiating a :py:class:`~docker.client.Client` class.
|
||||
|
||||
.. autofunction:: from_env()
|
||||
|
||||
Client reference
|
||||
----------------
|
||||
|
||||
.. autoclass:: Client()
|
||||
|
||||
.. autoattribute:: containers
|
||||
.. autoattribute:: images
|
||||
.. autoattribute:: networks
|
||||
.. autoattribute:: nodes
|
||||
.. autoattribute:: services
|
||||
.. autoattribute:: swarm
|
||||
.. autoattribute:: volumes
|
||||
|
||||
.. automethod:: events()
|
||||
.. automethod:: info()
|
||||
.. automethod:: login()
|
||||
.. automethod:: ping()
|
||||
.. automethod:: version()
|
||||
|
|
@ -0,0 +1,365 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# docker-sdk-python documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Sep 14 15:48:58 2016.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import datetime
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.napoleon',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
|
||||
source_parsers = {
|
||||
'.md': CommonMarkParser,
|
||||
}
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
# source_suffix = '.md'
|
||||
|
||||
# The encoding of source files.
|
||||
#
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Docker SDK for Python'
|
||||
year = datetime.datetime.now().year
|
||||
copyright = u'%d Docker Inc' % year
|
||||
author = u'Docker Inc'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u'2.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'2.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#
|
||||
# today = ''
|
||||
#
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#
|
||||
add_module_names = False
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
html_theme_options = {
|
||||
'description': 'A Python library for the Docker Remote API',
|
||||
'fixed_sidebar': True,
|
||||
}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents.
|
||||
# "<project> v<release> documentation" by default.
|
||||
#
|
||||
# html_title = u'docker-sdk-python v2.0'
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (relative to this directory) to use as a favicon of
|
||||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#
|
||||
# html_extra_path = []
|
||||
|
||||
# If not None, a 'Last updated on:' timestamp is inserted at every page
|
||||
# bottom, using the given strftime format.
|
||||
# The empty string is equivalent to '%b %d, %Y'.
|
||||
#
|
||||
# html_last_updated_fmt = None
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'about.html',
|
||||
'navigation.html',
|
||||
'searchbox.html',
|
||||
]
|
||||
}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
# html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
|
||||
#
|
||||
# html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# 'ja' uses this config value.
|
||||
# 'zh' user can custom change `jieba` dictionary path.
|
||||
#
|
||||
# html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'docker-sdk-pythondoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'docker-sdk-python.tex', u'docker-sdk-python Documentation',
|
||||
u'Docker Inc.', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#
|
||||
# latex_appendices = []
|
||||
|
||||
# It false, will not define \strong, \code, itleref, \crossref ... but only
|
||||
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
|
||||
# packages.
|
||||
#
|
||||
# latex_keep_old_macro_names = True
|
||||
|
||||
# If false, no module index is generated.
|
||||
#
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
|
||||
author, 'docker-sdk-python', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#
|
||||
# texinfo_no_detailmenu = False
|
||||
|
||||
|
||||
# Napoleon settings
|
||||
napoleon_google_docstring = True
|
||||
napoleon_numpy_docstring = False
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
Containers
|
||||
==========
|
||||
|
||||
.. py:module:: docker.models.containers
|
||||
|
||||
Run and manage containers on the server.
|
||||
|
||||
Methods available on ``client.containers``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. autoclass:: ContainerCollection
|
||||
|
||||
.. automethod:: run(image, command=None, **kwargs)
|
||||
.. automethod:: create(image, command=None, **kwargs)
|
||||
.. automethod:: get(id_or_name)
|
||||
.. automethod:: list(**kwargs)
|
||||
|
||||
Container objects
|
||||
-----------------
|
||||
|
||||
.. autoclass:: Container()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. autoattribute:: name
|
||||
.. autoattribute:: status
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
.. automethod:: attach
|
||||
.. automethod:: attach_socket
|
||||
.. automethod:: commit
|
||||
.. automethod:: diff
|
||||
.. automethod:: exec_run
|
||||
.. automethod:: export
|
||||
.. automethod:: get_archive
|
||||
.. automethod:: kill
|
||||
.. automethod:: logs
|
||||
.. automethod:: pause
|
||||
.. automethod:: put_archive
|
||||
.. automethod:: remove
|
||||
.. automethod:: rename
|
||||
.. automethod:: resize
|
||||
.. automethod:: restart
|
||||
.. automethod:: start
|
||||
.. automethod:: stats
|
||||
.. automethod:: stop
|
||||
.. automethod:: top
|
||||
.. automethod:: unpause
|
||||
.. automethod:: update
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
# Contributing
|
||||
See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||
The following is specific to docker-py.
|
||||
|
||||
## Running the tests & Code Quality
|
||||
|
||||
|
||||
To get the source source code and run the unit tests, run:
|
||||
```
|
||||
$ git clone git://github.com/docker/docker-py.git
|
||||
$ cd docker-py
|
||||
$ pip install tox
|
||||
$ tox
|
||||
```
|
||||
|
||||
## Building the docs
|
||||
Docs are built with [MkDocs](http://www.mkdocs.org/). For development, you can
|
||||
run the following in the project directory:
|
||||
```
|
||||
$ pip install -r docs-requirements.txt
|
||||
$ mkdocs serve
|
||||
```
|
||||
|
||||
## Release Checklist
|
||||
|
||||
Before a new release, please go through the following checklist:
|
||||
|
||||
* Bump version in docker/version.py
|
||||
* Add a release note in docs/change_log.md
|
||||
* Git tag the version
|
||||
* Upload to pypi
|
||||
|
||||
## Vulnerability Reporting
|
||||
For any security issues, please do NOT file an issue or pull request on github!
|
||||
Please contact [security@docker.com](mailto:security@docker.com) or read [the
|
||||
Docker security page](https://www.docker.com/resources/security/).
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# Access to devices on the host
|
||||
|
||||
If you need to directly expose some host devices to a container, you can use
|
||||
the devices parameter in the `host_config` param in `Client.create_container`
|
||||
as shown below:
|
||||
|
||||
```python
|
||||
cli.create_container(
|
||||
'busybox', 'true', host_config=cli.create_host_config(devices=[
|
||||
'/dev/sda:/dev/xvda:rwm'
|
||||
])
|
||||
)
|
||||
```
|
||||
|
||||
Each string is a single mapping using the following format:
|
||||
`<path_on_host>:<path_in_container>:<cgroup_permissions>`
|
||||
The above example allows the container to have read-write access to
|
||||
the host's `/dev/sda` via a node named `/dev/xvda` inside the container.
|
||||
|
||||
As a more verbose alternative, each host device definition can be specified as
|
||||
a dictionary with the following keys:
|
||||
|
||||
```python
|
||||
{
|
||||
'PathOnHost': '/dev/sda1',
|
||||
'PathInContainer': '/dev/xvda',
|
||||
'CgroupPermissions': 'rwm'
|
||||
}
|
||||
```
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
# HostConfig object
|
||||
|
||||
The Docker Remote API introduced [support for HostConfig in version 1.15](http://docs.docker.com/reference/api/docker_remote_api_v1.15/#create-a-container).
|
||||
This object contains all the parameters you could previously pass to `Client.start`.
|
||||
*It is highly recommended that users pass the HostConfig in the `host_config`*
|
||||
*param of `Client.create_container` instead of `Client.start`*
|
||||
|
||||
## HostConfig helper
|
||||
|
||||
### Client.create_host_config
|
||||
|
||||
Creates a HostConfig dictionary to be used with `Client.create_container`.
|
||||
|
||||
`binds` allows to bind a directory in the host to the container. See [Using
|
||||
volumes](volumes.md) for more information.
|
||||
|
||||
`port_bindings` exposes container ports to the host.
|
||||
See [Port bindings](port-bindings.md) for more information.
|
||||
|
||||
`lxc_conf` allows to pass LXC configuration options using a dictionary.
|
||||
|
||||
`privileged` starts the container in privileged mode.
|
||||
|
||||
[Links](http://docs.docker.io/en/latest/use/working_with_links_names/) can be
|
||||
specified with the `links` argument. They can either be specified as a
|
||||
dictionary mapping name to alias or as a list of `(name, alias)` tuples.
|
||||
|
||||
`dns` and `volumes_from` are only available if they are used with version v1.10
|
||||
of docker remote API. Otherwise they are ignored.
|
||||
|
||||
`network_mode` is available since v1.11 and sets the Network mode for the
|
||||
container ('bridge': creates a new network stack for the container on the
|
||||
Docker bridge, 'none': no networking for this container, 'container:[name|id]':
|
||||
reuses another container network stack, 'host': use the host network stack
|
||||
inside the container or any name that identifies an existing Docker network).
|
||||
|
||||
`restart_policy` is available since v1.2.0 and sets the container's *RestartPolicy*
|
||||
which defines the conditions under which a container should be restarted upon exit.
|
||||
If no *RestartPolicy* is defined, the container will not be restarted when it exits.
|
||||
The *RestartPolicy* is specified as a dict. For example, if the container
|
||||
should always be restarted:
|
||||
```python
|
||||
{
|
||||
"MaximumRetryCount": 0,
|
||||
"Name": "always"
|
||||
}
|
||||
```
|
||||
|
||||
It is possible to restart the container only on failure as well as limit the number
|
||||
of restarts. For example:
|
||||
```python
|
||||
{
|
||||
"MaximumRetryCount": 5,
|
||||
"Name": "on-failure"
|
||||
}
|
||||
```
|
||||
|
||||
`cap_add` and `cap_drop` are available since v1.2.0 and can be used to add or
|
||||
drop certain capabilities. The user may specify the capabilities as an array
|
||||
for example:
|
||||
```python
|
||||
[
|
||||
"SYS_ADMIN",
|
||||
"MKNOD"
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
**Params**
|
||||
|
||||
* binds: Volumes to bind. See [Using volumes](volumes.md) for more information.
|
||||
* port_bindings (dict): Port bindings. See [Port bindings](port-bindings.md)
|
||||
for more information.
|
||||
* lxc_conf (dict): LXC config
|
||||
* oom_kill_disable (bool): Whether to disable OOM killer
|
||||
* oom_score_adj (int): An integer value containing the score given to the
|
||||
container in order to tune OOM killer preferences
|
||||
* publish_all_ports (bool): Whether to publish all ports to the host
|
||||
* links (dict or list of tuples): either as a dictionary mapping name to alias
|
||||
or as a list of `(name, alias)` tuples
|
||||
* privileged (bool): Give extended privileges to this container
|
||||
* dns (list): Set custom DNS servers
|
||||
* dns_search (list): DNS search domains
|
||||
* volumes_from (str or list): List of container names or Ids to get volumes
|
||||
from. Optionally a single string joining container id's with commas
|
||||
* network_mode (str): One of `['bridge', 'none', 'container:<name|id>', 'host']`
|
||||
* restart_policy (dict): "Name" param must be one of
|
||||
`['on-failure', 'always']`
|
||||
* cap_add (list of str): Add kernel capabilities
|
||||
* cap_drop (list of str): Drop kernel capabilities
|
||||
* extra_hosts (dict): custom host-to-IP mappings (host:ip)
|
||||
* read_only (bool): mount the container's root filesystem as read only
|
||||
* pid_mode (str): if set to "host", use the host PID namespace inside the
|
||||
container
|
||||
* ipc_mode (str): Set the IPC mode for the container
|
||||
* security_opt (list): A list of string values to customize labels for MLS
|
||||
systems, such as SELinux.
|
||||
* ulimits (list): A list of dicts or `docker.utils.Ulimit` objects. A list
|
||||
of ulimits to be set in the container.
|
||||
* log_config (`docker.utils.LogConfig` or dict): Logging configuration to
|
||||
container
|
||||
* mem_limit (str or int): Maximum amount of memory container is allowed to
|
||||
consume. (e.g. `'1G'`)
|
||||
* memswap_limit (str or int): Maximum amount of memory + swap a container is
|
||||
allowed to consume.
|
||||
* mem_swappiness (int): Tune a container's memory swappiness behavior.
|
||||
Accepts number between 0 and 100.
|
||||
* shm_size (str or int): Size of /dev/shm. (e.g. `'1G'`)
|
||||
* cpu_group (int): The length of a CPU period in microseconds.
|
||||
* cpu_period (int): Microseconds of CPU time that the container can get in a
|
||||
CPU period.
|
||||
* cpu_shares (int): CPU shares (relative weight)
|
||||
* cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1)
|
||||
* blkio_weight: Block IO weight (relative weight), accepts a weight value
|
||||
between 10 and 1000.
|
||||
* blkio_weight_device: Block IO weight (relative device weight) in the form of:
|
||||
`[{"Path": "device_path", "Weight": weight}]`
|
||||
* device_read_bps: Limit read rate (bytes per second) from a device in the
|
||||
form of: `[{"Path": "device_path", "Rate": rate}]`
|
||||
* device_write_bps: Limit write rate (bytes per second) from a device.
|
||||
* device_read_iops: Limit read rate (IO per second) from a device.
|
||||
* device_write_iops: Limit write rate (IO per second) from a device.
|
||||
* group_add (list): List of additional group names and/or IDs that the
|
||||
container process will run as.
|
||||
* devices (list): Host device bindings. See [host devices](host-devices.md)
|
||||
for more information.
|
||||
* tmpfs: Temporary filesystems to mount. See [Using tmpfs](tmpfs.md) for more
|
||||
information.
|
||||
* sysctls (dict): Kernel parameters to set in the container.
|
||||
* userns_mode (str): Sets the user namespace mode for the container when user
|
||||
namespace remapping option is enabled. Supported values are: `host`
|
||||
* pids_limit (int): Tune a container’s pids limit. Set -1 for unlimited.
|
||||
* isolation (str): Isolation technology to use. Default: `None`.
|
||||
|
||||
**Returns** (dict) HostConfig dictionary
|
||||
|
||||
```python
|
||||
>>> from docker import Client
|
||||
>>> cli = Client()
|
||||
>>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'], volumes_from=['nostalgic_newton'])
|
||||
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
|
||||
```
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
Images
|
||||
======
|
||||
|
||||
.. py:module:: docker.models.images
|
||||
|
||||
Manage images on the server.
|
||||
|
||||
Methods available on ``client.images``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: ImageCollection
|
||||
|
||||
.. automethod:: build
|
||||
.. automethod:: get
|
||||
.. automethod:: list(**kwargs)
|
||||
.. automethod:: load
|
||||
.. automethod:: pull
|
||||
.. automethod:: push
|
||||
.. automethod:: remove
|
||||
.. automethod:: search
|
||||
|
||||
|
||||
Image objects
|
||||
-------------
|
||||
|
||||
.. autoclass:: Image()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. autoattribute:: tags
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
|
||||
.. automethod:: history
|
||||
.. automethod:: reload
|
||||
.. automethod:: save
|
||||
.. automethod:: tag
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# docker-py documentation
|
||||
|
||||
An API client for docker written in Python
|
||||
|
||||
## Installation
|
||||
|
||||
Our latest stable is always available on PyPi.
|
||||
|
||||
pip install docker-py
|
||||
|
||||
## Documentation
|
||||
Full documentation is available in the `/docs/` directory.
|
||||
|
||||
## License
|
||||
Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
Docker SDK for Python
|
||||
=====================
|
||||
|
||||
A Python library for the Docker Remote API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
|
||||
|
||||
For more information about the Remote API, `see its documentation <https://docs.docker.com/engine/reference/api/docker_remote_api/>`_.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The latest stable version `is available on PyPi <https://pypi.python.org/pypi/docker/>`_. Either add ``docker`` to your ``requirements.txt`` file or install with pip::
|
||||
|
||||
pip install docker
|
||||
|
||||
Getting started
|
||||
---------------
|
||||
|
||||
To talk to a Docker daemon, you first need to instantiate a client. You can use :py:func:`~docker.client.from_env` to connect using the default socket or the configuration in your environment:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import docker
|
||||
client = docker.from_env()
|
||||
|
||||
You can now run containers:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.containers.run("ubuntu", "echo hello world")
|
||||
'hello world\n'
|
||||
|
||||
You can run containers in the background:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
|
||||
<Container '45e6d2de7c54'>
|
||||
|
||||
You can manage containers:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.containers.list()
|
||||
[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
|
||||
|
||||
>>> container = client.containers.get('45e6d2de7c54')
|
||||
|
||||
>>> container.attrs['Config']['Image']
|
||||
"bfirsh/reticulate-splines"
|
||||
|
||||
>>> container.logs()
|
||||
"Reticulating spline 1...\n"
|
||||
|
||||
>>> container.stop()
|
||||
|
||||
You can stream logs:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> for line in container.logs(stream=True):
|
||||
... print line.strip()
|
||||
Reticulating spline 2...
|
||||
Reticulating spline 3...
|
||||
...
|
||||
|
||||
You can manage images:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.images.pull('nginx')
|
||||
<Image 'nginx'>
|
||||
|
||||
>>> client.images.list()
|
||||
[<Image 'ubuntu'>, <Image 'nginx'>, ...]
|
||||
|
||||
That's just a taster of what you can do with the Docker SDK for Python. For more, :doc:`take a look at the reference <client>`.
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:maxdepth: 2
|
||||
|
||||
Home <index>
|
||||
client
|
||||
containers
|
||||
images
|
||||
networks
|
||||
nodes
|
||||
services
|
||||
swarm
|
||||
volumes
|
||||
api
|
||||
tls
|
||||
change-log
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# Using with Docker Toolbox and Machine
|
||||
|
||||
In development, Docker recommends using
|
||||
[Docker Toolbox](https://www.docker.com/products/docker-toolbox) to set up
|
||||
Docker. It includes a tool called Machine which will create a VM running
|
||||
Docker Engine and point your shell at it using environment variables.
|
||||
|
||||
To configure docker-py with these environment variables
|
||||
|
||||
First use Machine to set up the environment variables:
|
||||
```bash
|
||||
$ eval "$(docker-machine env)"
|
||||
```
|
||||
|
||||
You can then use docker-py like this:
|
||||
```python
|
||||
import docker
|
||||
client = docker.from_env(assert_hostname=False)
|
||||
print client.version()
|
||||
```
|
||||
|
||||
**Note:** This snippet is disabling TLS hostname checking with
|
||||
`assert\_hostname=False`. Machine provides us with the exact certificate
|
||||
the server is using so this is safe. If you are not using Machine and verifying
|
||||
the host against a certificate authority, you'll want to enable hostname
|
||||
verification.
|
||||
177
docs/networks.md
177
docs/networks.md
|
|
@ -1,177 +0,0 @@
|
|||
# Using Networks
|
||||
|
||||
## Network creation
|
||||
|
||||
With the release of Docker 1.9 you can now manage custom networks.
|
||||
|
||||
|
||||
Here you can see how to create a network named `network1` using
|
||||
the `bridge` driver
|
||||
|
||||
```python
|
||||
docker_client.create_network("network1", driver="bridge")
|
||||
```
|
||||
|
||||
You can also create more advanced networks with custom IPAM configurations.
|
||||
For example, setting the subnet to `192.168.52.0/24` and gateway address
|
||||
to `192.168.52.254`
|
||||
|
||||
```python
|
||||
ipam_pool = docker.utils.create_ipam_pool(
|
||||
subnet='192.168.52.0/24',
|
||||
gateway='192.168.52.254'
|
||||
)
|
||||
ipam_config = docker.utils.create_ipam_config(
|
||||
pool_configs=[ipam_pool]
|
||||
)
|
||||
|
||||
docker_client.create_network("network1", driver="bridge", ipam=ipam_config)
|
||||
```
|
||||
|
||||
By default, when you connect a container to an overlay network, Docker also
|
||||
connects a bridge network to it to provide external connectivity. If you want
|
||||
to create an externally isolated overlay network, with Docker 1.10 you can
|
||||
create an internal network.
|
||||
|
||||
```python
|
||||
|
||||
docker_client.create_network("network1", driver="bridge", internal=True)
|
||||
```
|
||||
|
||||
## Container network configuration
|
||||
|
||||
In order to specify which network a container will be connected to, and
|
||||
additional configuration, use the `networking_config` parameter in
|
||||
`Client.create_container`. Note that at the time of creation, you can
|
||||
only connect a container to a single network. Later on, you may create more
|
||||
connections using `Client.connect_container_to_network`.
|
||||
|
||||
|
||||
```python
|
||||
networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config(
|
||||
ipv4_address='172.28.0.124',
|
||||
aliases=['foo', 'bar'],
|
||||
links=['container2']
|
||||
)
|
||||
})
|
||||
|
||||
ctnr = docker_client.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
## Network API documentation
|
||||
|
||||
### Client.create_networking_config
|
||||
|
||||
Create a networking config dictionary to be used as the `networking_config`
|
||||
parameter in `Client.create_container_config`
|
||||
|
||||
**Params**:
|
||||
|
||||
* endpoints_config (dict): A dictionary of `network_name -> endpoint_config`
|
||||
relationships. Values should be endpoint config dictionaries created by
|
||||
`Client.create_endpoint_config`. Defaults to `None` (default config).
|
||||
|
||||
**Returns** A networking config dictionary.
|
||||
|
||||
```python
|
||||
|
||||
docker_client.create_network('network1')
|
||||
|
||||
networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config()
|
||||
})
|
||||
|
||||
container = docker_client.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### Client.create_endpoint_config
|
||||
|
||||
Create an endpoint config dictionary to be used with
|
||||
`Client.create_networking_config`.
|
||||
|
||||
**Params**:
|
||||
|
||||
* aliases (list): A list of aliases for this endpoint. Names in that list can
|
||||
be used within the network to reach the container. Defaults to `None`.
|
||||
* links (list): A list of links for this endpoint. Containers declared in this
|
||||
list will be [linked](https://docs.docker.com/engine/userguide/networking/work-with-networks/#linking-containers-in-user-defined-networks)
|
||||
to this container. Defaults to `None`.
|
||||
* ipv4_address (str): The IP address of this container on the network,
|
||||
using the IPv4 protocol. Defaults to `None`.
|
||||
* ipv6_address (str): The IP address of this container on the network,
|
||||
using the IPv6 protocol. Defaults to `None`.
|
||||
* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
|
||||
|
||||
**Returns** An endpoint config dictionary.
|
||||
|
||||
```python
|
||||
endpoint_config = docker_client.create_endpoint_config(
|
||||
aliases=['web', 'app'],
|
||||
links=['app_db'],
|
||||
ipv4_address='132.65.0.123'
|
||||
)
|
||||
|
||||
docker_client.create_network('network1')
|
||||
networking_config = docker_client.create_networking_config({
|
||||
'network1': endpoint_config
|
||||
})
|
||||
container = docker_client.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
```
|
||||
### docker.utils.create_ipam_config
|
||||
|
||||
Create an IPAM (IP Address Management) config dictionary to be used with
|
||||
`Client.create_network`.
|
||||
|
||||
|
||||
**Params**:
|
||||
|
||||
* driver (str): The IPAM driver to use. Defaults to `'default'`.
|
||||
* pool_configs (list): A list of pool configuration dictionaries as created
|
||||
by `docker.utils.create_ipam_pool`. Defaults to empty list.
|
||||
|
||||
**Returns** An IPAM config dictionary
|
||||
|
||||
```python
|
||||
ipam_config = docker.utils.create_ipam_config(driver='default')
|
||||
network = docker_client.create_network('network1', ipam=ipam_config)
|
||||
```
|
||||
|
||||
### docker.utils.create_ipam_pool
|
||||
|
||||
Create an IPAM pool config dictionary to be added to the `pool_configs` param
|
||||
in `docker.utils.create_ipam_config`.
|
||||
|
||||
**Params**:
|
||||
|
||||
* subnet (str): Custom subnet for this IPAM pool using the CIDR notation.
|
||||
Defaults to `None`.
|
||||
* iprange (str): Custom IP range for endpoints in this IPAM pool using the
|
||||
CIDR notation. Defaults to `None`.
|
||||
* gateway (str): Custom IP address for the pool's gateway.
|
||||
* aux_addresses (dict): A dictionary of `key -> ip_address` relationships
|
||||
specifying auxiliary addresses that need to be allocated by the
|
||||
IPAM driver.
|
||||
|
||||
**Returns** An IPAM pool config dictionary
|
||||
|
||||
```python
|
||||
ipam_pool = docker.utils.create_ipam_pool(
|
||||
subnet='124.42.0.0/16',
|
||||
iprange='124.42.0.0/24',
|
||||
gateway='124.42.0.254',
|
||||
aux_addresses={
|
||||
'reserved1': '124.42.1.1'
|
||||
}
|
||||
)
|
||||
ipam_config = docker.utils.create_ipam_config(pool_configs=[ipam_pool])
|
||||
network = docker_client.create_network('network1', ipam=ipam_config)
|
||||
```
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
Networks
|
||||
========
|
||||
|
||||
.. py:module:: docker.models.networks
|
||||
|
||||
Create and manage networks on the server. For more information about networks, `see the Engine documentation <https://docs.docker.com/engine/userguide/networking/>`_.
|
||||
|
||||
Methods available on ``client.networks``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: NetworkCollection
|
||||
|
||||
.. automethod:: create
|
||||
.. automethod:: get
|
||||
.. automethod:: list
|
||||
|
||||
Network objects
|
||||
-----------------
|
||||
|
||||
.. autoclass:: Network()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. autoattribute:: name
|
||||
.. autoattribute:: containers
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
.. automethod:: connect
|
||||
.. automethod:: disconnect
|
||||
.. automethod:: reload
|
||||
.. automethod:: remove
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
Nodes
|
||||
=====
|
||||
|
||||
.. py:module:: docker.models.nodes
|
||||
|
||||
Get and list nodes in a swarm. Before you can use these methods, you first need to :doc:`join or initialize a swarm <swarm>`.
|
||||
|
||||
Methods available on ``client.nodes``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: NodeCollection
|
||||
|
||||
.. automethod:: get(id_or_name)
|
||||
.. automethod:: list(**kwargs)
|
||||
|
||||
Node objects
|
||||
------------
|
||||
|
||||
.. autoclass:: Node()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
.. autoattribute:: version
|
||||
|
||||
.. automethod:: reload
|
||||
.. automethod:: update
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# Port bindings
|
||||
Port bindings is done in two parts. Firstly, by providing a list of ports to
|
||||
open inside the container in the `Client().create_container()` method.
|
||||
Bindings are declared in the `host_config` parameter.
|
||||
|
||||
```python
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', ports=[1111, 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
1111: 4567,
|
||||
2222: None
|
||||
})
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
You can limit the host address on which the port will be exposed like such:
|
||||
|
||||
```python
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
|
||||
```
|
||||
|
||||
Or without host port assignment:
|
||||
|
||||
```python
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
|
||||
```
|
||||
|
||||
If you wish to use UDP instead of TCP (default), you need to declare ports
|
||||
as such in both the config and host config:
|
||||
|
||||
```python
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
'1111/udp': 4567, 2222: None
|
||||
})
|
||||
)
|
||||
```
|
||||
|
||||
To bind multiple host ports to a single container port, use the following syntax:
|
||||
|
||||
```python
|
||||
cli.create_host_config(port_bindings={
|
||||
1111: [1234, 4567]
|
||||
})
|
||||
```
|
||||
|
||||
You can also bind multiple IPs to a single container port:
|
||||
|
||||
```python
|
||||
cli.create_host_config(port_bindings={
|
||||
1111: [
|
||||
('192.168.0.100', 1234),
|
||||
('192.168.0.101', 1234)
|
||||
]
|
||||
})
|
||||
```
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
Services
|
||||
========
|
||||
|
||||
.. py:module:: docker.models.services
|
||||
|
||||
Manage services on a swarm. For more information about services, `see the Engine documentation <https://docs.docker.com/engine/swarm/services/>`_.
|
||||
|
||||
Before you can use any of these methods, you first need to :doc:`join or initialize a swarm <swarm>`.
|
||||
|
||||
Methods available on ``client.services``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: ServiceCollection
|
||||
|
||||
.. automethod:: create
|
||||
.. automethod:: get
|
||||
.. automethod:: list
|
||||
|
||||
Service objects
|
||||
---------------
|
||||
|
||||
.. autoclass:: Service()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. autoattribute:: name
|
||||
.. autoattribute:: version
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
|
||||
.. automethod:: reload
|
||||
.. automethod:: remove
|
||||
.. automethod:: tasks
|
||||
.. automethod:: update
|
||||
274
docs/swarm.md
274
docs/swarm.md
|
|
@ -1,274 +0,0 @@
|
|||
# Swarm management
|
||||
|
||||
Starting with Engine version 1.12 (API 1.24), it is possible to manage the
|
||||
engine's associated Swarm cluster using the API.
|
||||
|
||||
## Initializing a new Swarm
|
||||
|
||||
You can initialize a new Swarm by calling `Client.init_swarm`. An advertising
|
||||
address needs to be provided, usually simply by indicating which network
|
||||
interface needs to be used. Advanced options are provided using the
|
||||
`swarm_spec` parameter, which can easily be created using
|
||||
`Client.create_swarm_spec`.
|
||||
|
||||
```python
|
||||
spec = client.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
client.init_swarm(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000', force_new_cluster=False,
|
||||
swarm_spec=spec
|
||||
)
|
||||
```
|
||||
|
||||
## Joining an existing Swarm
|
||||
|
||||
If you're looking to have the engine your client is connected to join an
|
||||
existing Swarm, this can be accomplished by using the `Client.join_swarm`
|
||||
method. You will need to provide a list of at least one remote address
|
||||
corresponding to other machines already part of the swarm as well as the
|
||||
`join_token`. In most cases, a `listen_addr` and `advertise_addr` for your
|
||||
node are also required.
|
||||
|
||||
```python
|
||||
client.join_swarm(
|
||||
remote_addrs=['192.168.14.221:2377'], join_token='SWMTKN-1-redacted',
|
||||
listen_addr='0.0.0.0:5000', advertise_addr='eth0:5000'
|
||||
)
|
||||
```
|
||||
|
||||
## Leaving the Swarm
|
||||
|
||||
To leave the swarm you are currently a member of, simply use
|
||||
`Client.leave_swarm`. Note that if your engine is the Swarm's manager,
|
||||
you will need to specify `force=True` to be able to leave.
|
||||
|
||||
```python
|
||||
client.leave_swarm(force=False)
|
||||
```
|
||||
|
||||
## Retrieving Swarm status
|
||||
|
||||
You can retrieve information about your current Swarm status by calling
|
||||
`Client.inspect_swarm`. This method takes no arguments.
|
||||
|
||||
```python
|
||||
client.inspect_swarm()
|
||||
```
|
||||
|
||||
## Listing Swarm nodes
|
||||
|
||||
List all nodes that are part of the current Swarm using `Client.nodes`.
|
||||
The `filters` argument allows to filter the results.
|
||||
|
||||
```python
|
||||
client.nodes(filters={'role': 'manager'})
|
||||
```
|
||||
|
||||
## Swarm API documentation
|
||||
|
||||
### Client.init_swarm
|
||||
|
||||
Initialize a new Swarm using the current connected engine as the first node.
|
||||
|
||||
**Params:**
|
||||
|
||||
* advertise_addr (string): Externally reachable address advertised to other
|
||||
nodes. This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the port number from the listen
|
||||
address is used. If `advertise_addr` is not specified, it will be
|
||||
automatically detected when possible. Default: None
|
||||
* listen_addr (string): Listen address used for inter-manager communication,
|
||||
as well as determining the networking interface used for the VXLAN Tunnel
|
||||
Endpoint (VTEP). This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the default swarm listening port
|
||||
is used. Default: '0.0.0.0:2377'
|
||||
* force_new_cluster (bool): Force creating a new Swarm, even if already part of
|
||||
one. Default: False
|
||||
* swarm_spec (dict): Configuration settings of the new Swarm. Use
|
||||
`Client.create_swarm_spec` to generate a valid configuration. Default: None
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
#### Client.create_swarm_spec
|
||||
|
||||
Create a `docker.types.SwarmSpec` instance that can be used as the `swarm_spec`
|
||||
argument in `Client.init_swarm`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* task_history_retention_limit (int): Maximum number of tasks history stored.
|
||||
* snapshot_interval (int): Number of logs entries between snapshot.
|
||||
* keep_old_snapshots (int): Number of snapshots to keep beyond the current
|
||||
snapshot.
|
||||
* log_entries_for_slow_followers (int): Number of log entries to keep around
|
||||
to sync up slow followers after a snapshot is created.
|
||||
* heartbeat_tick (int): Amount of ticks (in seconds) between each heartbeat.
|
||||
* election_tick (int): Amount of ticks (in seconds) needed without a leader to
|
||||
trigger a new election.
|
||||
* dispatcher_heartbeat_period (int): The delay for an agent to send a
|
||||
heartbeat to the dispatcher.
|
||||
* node_cert_expiry (int): Automatic expiry for nodes certificates.
|
||||
* external_ca (dict): Configuration for forwarding signing requests to an
|
||||
external certificate authority. Use `docker.types.SwarmExternalCA`.
|
||||
* name (string): Swarm's name
|
||||
|
||||
**Returns:** `docker.types.SwarmSpec` instance.
|
||||
|
||||
#### docker.types.SwarmExternalCA
|
||||
|
||||
Create a configuration dictionary for the `external_ca` argument in a
|
||||
`SwarmSpec`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* protocol (string): Protocol for communication with the external CA (currently
|
||||
only “cfssl” is supported).
|
||||
* url (string): URL where certificate signing requests should be sent.
|
||||
* options (dict): An object with key/value pairs that are interpreted as
|
||||
protocol-specific options for the external CA driver.
|
||||
|
||||
### Client.inspect_node
|
||||
|
||||
Retrieve low-level information about a Swarm node
|
||||
|
||||
**Params:**
|
||||
|
||||
* node_id (string): ID of the node to be inspected.
|
||||
|
||||
**Returns:** A dictionary containing data about this node. See sample below.
|
||||
|
||||
```python
|
||||
{u'CreatedAt': u'2016-08-11T23:28:39.695834296Z',
|
||||
u'Description': {u'Engine': {u'EngineVersion': u'1.12.0',
|
||||
u'Plugins': [{u'Name': u'bridge', u'Type': u'Network'},
|
||||
{u'Name': u'host', u'Type': u'Network'},
|
||||
{u'Name': u'null', u'Type': u'Network'},
|
||||
{u'Name': u'overlay', u'Type': u'Network'},
|
||||
{u'Name': u'local', u'Type': u'Volume'}]},
|
||||
u'Hostname': u'dockerserv-1.local.net',
|
||||
u'Platform': {u'Architecture': u'x86_64', u'OS': u'linux'},
|
||||
u'Resources': {u'MemoryBytes': 8052109312, u'NanoCPUs': 4000000000}},
|
||||
u'ID': u'1kqami616p23dz4hd7km35w63',
|
||||
u'ManagerStatus': {u'Addr': u'10.0.131.127:2377',
|
||||
u'Leader': True,
|
||||
u'Reachability': u'reachable'},
|
||||
u'Spec': {u'Availability': u'active', u'Role': u'manager'},
|
||||
u'Status': {u'State': u'ready'},
|
||||
u'UpdatedAt': u'2016-08-11T23:28:39.979829529Z',
|
||||
u'Version': {u'Index': 9}}
|
||||
```
|
||||
|
||||
### Client.inspect_swarm
|
||||
|
||||
Retrieve information about the current Swarm.
|
||||
|
||||
**Returns:** A dictionary containing information about the Swarm. See sample
|
||||
below.
|
||||
|
||||
```python
|
||||
{u'CreatedAt': u'2016-08-04T21:26:18.779800579Z',
|
||||
u'ID': u'8hk6e9wh4iq214qtbgvbp84a9',
|
||||
u'JoinTokens': {u'Manager': u'SWMTKN-1-redacted-1',
|
||||
u'Worker': u'SWMTKN-1-redacted-2'},
|
||||
u'Spec': {u'CAConfig': {u'NodeCertExpiry': 7776000000000000},
|
||||
u'Dispatcher': {u'HeartbeatPeriod': 5000000000},
|
||||
u'Name': u'default',
|
||||
u'Orchestration': {u'TaskHistoryRetentionLimit': 10},
|
||||
u'Raft': {u'ElectionTick': 3,
|
||||
u'HeartbeatTick': 1,
|
||||
u'LogEntriesForSlowFollowers': 500,
|
||||
u'SnapshotInterval': 10000},
|
||||
u'TaskDefaults': {}},
|
||||
u'UpdatedAt': u'2016-08-04T21:26:19.391623265Z',
|
||||
u'Version': {u'Index': 11}}
|
||||
```
|
||||
|
||||
### Client.join_swarm
|
||||
|
||||
Join an existing Swarm.
|
||||
|
||||
**Params:**
|
||||
|
||||
* remote_addrs (list): Addresses of one or more manager nodes already
|
||||
participating in the Swarm to join.
|
||||
* join_token (string): Secret token for joining this Swarm.
|
||||
* listen_addr (string): Listen address used for inter-manager communication
|
||||
if the node gets promoted to manager, as well as determining the networking
|
||||
interface used for the VXLAN Tunnel Endpoint (VTEP). Default: `None`
|
||||
* advertise_addr (string): Externally reachable address advertised to other
|
||||
nodes. This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the port number from the listen
|
||||
address is used. If AdvertiseAddr is not specified, it will be automatically
|
||||
detected when possible. Default: `None`
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
### Client.leave_swarm
|
||||
|
||||
Leave a Swarm.
|
||||
|
||||
**Params:**
|
||||
|
||||
* force (bool): Leave the Swarm even if this node is a manager.
|
||||
Default: `False`
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
### Client.nodes
|
||||
|
||||
List Swarm nodes
|
||||
|
||||
**Params:**
|
||||
|
||||
* filters (dict): Filters to process on the nodes list. Valid filters:
|
||||
`id`, `name`, `membership` and `role`. Default: `None`
|
||||
|
||||
**Returns:** A list of dictionaries containing data about each swarm node.
|
||||
|
||||
### Client.update_node
|
||||
|
||||
Update the Node's configuration
|
||||
|
||||
**Params:**
|
||||
|
||||
* version (int): The version number of the node object being updated. This
|
||||
is required to avoid conflicting writes.
|
||||
* node_spec (dict): Configuration settings to update. Any values not provided
|
||||
will be removed. See the official [Docker API documentation](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/update-a-node) for more details.
|
||||
Default: `None`.
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
```python
|
||||
node_spec = {'Availability': 'active',
|
||||
'Name': 'node-name',
|
||||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
}
|
||||
client.update_node(node_id='24ifsmvkjbyhk', version=8, node_spec=node_spec)
|
||||
```
|
||||
|
||||
### Client.update_swarm
|
||||
|
||||
Update the Swarm's configuration
|
||||
|
||||
**Params:**
|
||||
|
||||
* version (int): The version number of the swarm object being updated. This
|
||||
is required to avoid conflicting writes.
|
||||
* swarm_spec (dict): Configuration settings to update. Use
|
||||
`Client.create_swarm_spec` to generate a valid configuration.
|
||||
Default: `None`.
|
||||
* rotate_worker_token (bool): Rotate the worker join token. Default: `False`.
|
||||
* rotate_manager_token (bool): Rotate the manager join token. Default: `False`.
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
Swarm
|
||||
=====
|
||||
|
||||
.. py:module:: docker.models.swarm
|
||||
|
||||
Manage `Docker Engine's swarm mode <https://docs.docker.com/engine/swarm/>`_.
|
||||
|
||||
To use any swarm methods, you first need to make the Engine part of a swarm. This can be done by either initializing a new swarm with :py:meth:`~Swarm.init`, or joining an existing swarm with :py:meth:`~Swarm.join`.
|
||||
|
||||
These methods are available on ``client.swarm``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: Swarm
|
||||
|
||||
.. automethod:: init()
|
||||
.. automethod:: join()
|
||||
.. automethod:: leave()
|
||||
.. automethod:: update()
|
||||
.. automethod:: reload()
|
||||
|
||||
.. autoattribute:: version
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
86
docs/tls.md
86
docs/tls.md
|
|
@ -1,86 +0,0 @@
|
|||
## Connection to daemon using HTTPS
|
||||
|
||||
**Note:** *These instructions are docker-py specific. Please refer to
|
||||
[http://docs.docker.com/articles/https/](http://docs.docker.com/articles/https/)
|
||||
first.*
|
||||
|
||||
## TLSConfig
|
||||
|
||||
**Params**:
|
||||
|
||||
* client_cert (tuple of str): Path to client cert, path to client key
|
||||
* ca_cert (str): Path to CA cert file
|
||||
* verify (bool or str): This can be `False` or a path to a CA Cert file
|
||||
* ssl_version (int): A valid [SSL version](
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1)
|
||||
* assert_hostname (bool): Verify hostname of docker daemon
|
||||
|
||||
### configure_client
|
||||
|
||||
**Params**:
|
||||
|
||||
* client: ([Client](api.md#client-api)): A client to apply this config to
|
||||
|
||||
|
||||
## Authenticate server based on public/default CA pool
|
||||
|
||||
```python
|
||||
client = docker.Client(base_url='<https_url>', tls=True)
|
||||
```
|
||||
|
||||
Equivalent CLI options:
|
||||
```bash
|
||||
docker --tls ...
|
||||
```
|
||||
|
||||
If you want to use TLS but don't want to verify the server certificate
|
||||
(for example when testing with a self-signed certificate):
|
||||
|
||||
```python
|
||||
tls_config = docker.tls.TLSConfig(verify=False)
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
```
|
||||
|
||||
## Authenticate server based on given CA
|
||||
|
||||
```python
|
||||
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
```
|
||||
|
||||
Equivalent CLI options:
|
||||
```bash
|
||||
docker --tlsverify --tlscacert /path/to/ca.pem ...
|
||||
```
|
||||
|
||||
## Authenticate with client certificate, do not authenticate server based on given CA
|
||||
|
||||
```python
|
||||
tls_config = docker.tls.TLSConfig(
|
||||
client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem')
|
||||
)
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
```
|
||||
|
||||
Equivalent CLI options:
|
||||
```bash
|
||||
docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...
|
||||
```
|
||||
|
||||
## Authenticate with client certificate, authenticate server based on given CA
|
||||
|
||||
```python
|
||||
tls_config = docker.tls.TLSConfig(
|
||||
client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem'),
|
||||
verify='/path/to/ca.pem'
|
||||
)
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
```
|
||||
|
||||
Equivalent CLI options:
|
||||
```bash
|
||||
docker --tlsverify \
|
||||
--tlscert /path/to/client-cert.pem \
|
||||
--tlskey /path/to/client-key.pem \
|
||||
--tlscacert /path/to/ca.pem ...
|
||||
```
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
Using TLS
|
||||
=========
|
||||
|
||||
.. py:module:: docker.tls
|
||||
|
||||
Both the main :py:class:`~docker.client.Client` and low-level
|
||||
:py:class:`~docker.api.client.APIClient` can connect to the Docker daemon with TLS.
|
||||
|
||||
This is all configured automatically for you if you're using :py:func:`~docker.client.from_env`, but if you need some extra control it is possible to configure it manually by using a :py:class:`TLSConfig` object.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
For example, to check the server against a specific CA certificate:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
|
||||
This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``.
|
||||
|
||||
To authenticate with client certs:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
tls_config = docker.tls.TLSConfig(
|
||||
client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem')
|
||||
)
|
||||
client = docker.Client(base_url='<https_url>', tls=tls_config)
|
||||
|
||||
This is the equivalent of ``docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...``.
|
||||
|
||||
Reference
|
||||
---------
|
||||
|
||||
.. autoclass:: TLSConfig()
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
# Using tmpfs
|
||||
|
||||
When creating a container, you can specify paths to be mounted with tmpfs using
|
||||
the `tmpfs` argument to `create_host_config`, similarly to the `--tmpfs`
|
||||
argument to `docker run`.
|
||||
|
||||
This capability is supported in Docker Engine 1.10 and up.
|
||||
|
||||
`tmpfs` can be either a list or a dictionary. If it's a list, each item is a
|
||||
string specifying the path and (optionally) any configuration for the mount:
|
||||
|
||||
```python
|
||||
client.create_container(
|
||||
'busybox', 'ls',
|
||||
host_config=client.create_host_config(tmpfs=[
|
||||
'/mnt/vol2',
|
||||
'/mnt/vol1:size=3G,uid=1000'
|
||||
])
|
||||
)
|
||||
```
|
||||
|
||||
Alternatively, if it's a dictionary, each key is a path and each value contains
|
||||
the mount options:
|
||||
|
||||
```python
|
||||
client.create_container(
|
||||
'busybox', 'ls',
|
||||
host_config=client.create_host_config(tmpfs={
|
||||
'/mnt/vol2': '',
|
||||
'/mnt/vol1': 'size=3G,uid=1000'
|
||||
})
|
||||
)
|
||||
```
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
# Using volumes
|
||||
|
||||
Volume declaration is done in two parts. Provide a list of mountpoints to
|
||||
the `Client().create_container()` method, and declare mappings in the
|
||||
`host_config` section.
|
||||
|
||||
```python
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds={
|
||||
'/home/user1/': {
|
||||
'bind': '/mnt/vol2',
|
||||
'mode': 'rw',
|
||||
},
|
||||
'/var/www': {
|
||||
'bind': '/mnt/vol1',
|
||||
'mode': 'ro',
|
||||
}
|
||||
})
|
||||
)
|
||||
```
|
||||
|
||||
You can alternatively specify binds as a list. This code is equivalent to the
|
||||
example above:
|
||||
|
||||
```python
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds=[
|
||||
'/home/user1/:/mnt/vol2',
|
||||
'/var/www:/mnt/vol1:ro',
|
||||
])
|
||||
)
|
||||
```
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
Volumes
|
||||
=======
|
||||
|
||||
.. py:module:: docker.models.volumes
|
||||
|
||||
Manage volumes on the server.
|
||||
|
||||
Methods available on ``client.volumes``:
|
||||
|
||||
.. rst-class:: hide-signature
|
||||
.. py:class:: VolumeCollection
|
||||
|
||||
.. automethod:: create
|
||||
.. automethod:: get
|
||||
.. automethod:: list
|
||||
|
||||
Volume objects
|
||||
--------------
|
||||
|
||||
.. autoclass:: Volume()
|
||||
|
||||
.. autoattribute:: id
|
||||
.. autoattribute:: short_id
|
||||
.. autoattribute:: name
|
||||
.. py:attribute:: attrs
|
||||
|
||||
The raw representation of this object from the server.
|
||||
|
||||
|
||||
.. automethod:: reload
|
||||
.. automethod:: remove
|
||||
21
mkdocs.yml
21
mkdocs.yml
|
|
@ -1,21 +0,0 @@
|
|||
site_name: docker-py Documentation
|
||||
site_description: An API client for Docker written in Python
|
||||
site_favicon: favicon_whale.png
|
||||
site_url: https://docker-py.readthedocs.io
|
||||
repo_url: https://github.com/docker/docker-py/
|
||||
theme: readthedocs
|
||||
pages:
|
||||
- Home: index.md
|
||||
- Client API: api.md
|
||||
- Port Bindings: port-bindings.md
|
||||
- Using Volumes: volumes.md
|
||||
- Using TLS: tls.md
|
||||
- Host devices: host-devices.md
|
||||
- Host configuration: hostconfig.md
|
||||
- Network configuration: networks.md
|
||||
- Swarm management: swarm.md
|
||||
- Swarm services: services.md
|
||||
- Using tmpfs: tmpfs.md
|
||||
- Using with Docker Machine: machine.md
|
||||
- Change Log: change_log.md
|
||||
- Contributing: contributing.md
|
||||
4
setup.py
4
setup.py
|
|
@ -49,8 +49,7 @@ setup(
|
|||
long_description=long_description,
|
||||
url='https://github.com/docker/docker-py/',
|
||||
packages=[
|
||||
'docker', 'docker.api', 'docker.auth', 'docker.transport',
|
||||
'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
|
||||
'docker', 'docker.api', 'docker.transport', 'docker.utils',
|
||||
'docker.types',
|
||||
],
|
||||
install_requires=requirements,
|
||||
|
|
@ -65,7 +64,6 @@ setup(
|
|||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
|
|
|
|||
|
|
@ -1,36 +0,0 @@
|
|||
import sys
|
||||
import unittest
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class BaseTestCase(unittest.TestCase):
|
||||
def assertIn(self, object, collection):
|
||||
if six.PY2 and sys.version_info[1] <= 6:
|
||||
return self.assertTrue(object in collection)
|
||||
return super(BaseTestCase, self).assertIn(object, collection)
|
||||
|
||||
|
||||
class Cleanup(object):
|
||||
if sys.version_info < (2, 7):
|
||||
# Provide a basic implementation of addCleanup for Python < 2.7
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Cleanup, self).__init__(*args, **kwargs)
|
||||
self._cleanups = []
|
||||
|
||||
def tearDown(self):
|
||||
super(Cleanup, self).tearDown()
|
||||
ok = True
|
||||
while self._cleanups:
|
||||
fn, args, kwargs = self._cleanups.pop(-1)
|
||||
try:
|
||||
fn(*args, **kwargs)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
ok = False
|
||||
if not ok:
|
||||
raise
|
||||
|
||||
def addCleanup(self, function, *args, **kwargs):
|
||||
self._cleanups.append((function, args, kwargs))
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import os.path
|
||||
import random
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
|
|
@ -56,3 +57,20 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
|
|||
if time.time() - start_time > timeout:
|
||||
raise AssertionError("Timeout: %s" % condition)
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
def random_name():
|
||||
return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
|
||||
|
||||
|
||||
def force_leave_swarm(client):
|
||||
"""Actually force leave a Swarm. There seems to be a bug in Swarm that
|
||||
occasionally throws "context deadline exceeded" errors when leaving."""
|
||||
while True:
|
||||
try:
|
||||
return client.swarm.leave(force=True)
|
||||
except docker.errors.APIError as e:
|
||||
if e.explanation == "context deadline exceeded":
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
|
|
|
|||
|
|
@ -8,10 +8,10 @@ import six
|
|||
from docker import errors
|
||||
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
||||
|
||||
class BuildTest(BaseIntegrationTest):
|
||||
class BuildTest(BaseAPIIntegrationTest):
|
||||
def test_build_streaming(self):
|
||||
script = io.BytesIO('\n'.join([
|
||||
'FROM busybox',
|
||||
|
|
@ -8,10 +8,10 @@ import warnings
|
|||
import docker
|
||||
from docker.utils import kwargs_from_env
|
||||
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class InformationTest(BaseIntegrationTest):
|
||||
class InformationTest(BaseAPIIntegrationTest):
|
||||
def test_version(self):
|
||||
res = self.client.version()
|
||||
self.assertIn('GoVersion', res)
|
||||
|
|
@ -25,7 +25,7 @@ class InformationTest(BaseIntegrationTest):
|
|||
self.assertIn('Debug', res)
|
||||
|
||||
def test_search(self):
|
||||
client = docker.from_env(timeout=10)
|
||||
client = docker.APIClient(timeout=10, **kwargs_from_env())
|
||||
res = client.search('busybox')
|
||||
self.assertTrue(len(res) >= 1)
|
||||
base_img = [x for x in res if x['name'] == 'busybox']
|
||||
|
|
@ -33,7 +33,7 @@ class InformationTest(BaseIntegrationTest):
|
|||
self.assertIn('description', base_img[0])
|
||||
|
||||
|
||||
class LinkTest(BaseIntegrationTest):
|
||||
class LinkTest(BaseAPIIntegrationTest):
|
||||
def test_remove_link(self):
|
||||
# Create containers
|
||||
container1 = self.client.create_container(
|
||||
|
|
@ -75,7 +75,7 @@ class LinkTest(BaseIntegrationTest):
|
|||
self.assertEqual(len(retrieved), 2)
|
||||
|
||||
|
||||
class LoadConfigTest(BaseIntegrationTest):
|
||||
class LoadConfigTest(BaseAPIIntegrationTest):
|
||||
def test_load_legacy_config(self):
|
||||
folder = tempfile.mkdtemp()
|
||||
self.tmp_folders.append(folder)
|
||||
|
|
@ -114,7 +114,7 @@ class LoadConfigTest(BaseIntegrationTest):
|
|||
|
||||
class AutoDetectVersionTest(unittest.TestCase):
|
||||
def test_client_init(self):
|
||||
client = docker.from_env(version='auto')
|
||||
client = docker.APIClient(version='auto', **kwargs_from_env())
|
||||
client_version = client._version
|
||||
api_version = client.version(api_version=False)['ApiVersion']
|
||||
self.assertEqual(client_version, api_version)
|
||||
|
|
@ -122,22 +122,11 @@ class AutoDetectVersionTest(unittest.TestCase):
|
|||
self.assertEqual(client_version, api_version_2)
|
||||
client.close()
|
||||
|
||||
def test_auto_client(self):
|
||||
client = docker.AutoVersionClient(**kwargs_from_env())
|
||||
client_version = client._version
|
||||
api_version = client.version(api_version=False)['ApiVersion']
|
||||
self.assertEqual(client_version, api_version)
|
||||
api_version_2 = client.version()['ApiVersion']
|
||||
self.assertEqual(client_version, api_version_2)
|
||||
client.close()
|
||||
with self.assertRaises(docker.errors.DockerException):
|
||||
docker.AutoVersionClient(version='1.11', **kwargs_from_env())
|
||||
|
||||
|
||||
class ConnectionTimeoutTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.timeout = 0.5
|
||||
self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
|
||||
self.client = docker.api.APIClient(base_url='http://192.168.10.2:4243',
|
||||
timeout=self.timeout)
|
||||
|
||||
def test_timeout(self):
|
||||
|
|
@ -166,7 +155,7 @@ class UnixconnTest(unittest.TestCase):
|
|||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
|
||||
client = docker.from_env()
|
||||
client = docker.APIClient(**kwargs_from_env())
|
||||
client.images()
|
||||
client.close()
|
||||
del client
|
||||
|
|
@ -11,10 +11,10 @@ import six
|
|||
|
||||
from ..helpers import requires_api_version
|
||||
from .. import helpers
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class ListContainersTest(BaseIntegrationTest):
|
||||
class ListContainersTest(BaseAPIIntegrationTest):
|
||||
def test_list_containers(self):
|
||||
res0 = self.client.containers(all=True)
|
||||
size = len(res0)
|
||||
|
|
@ -34,7 +34,7 @@ class ListContainersTest(BaseIntegrationTest):
|
|||
self.assertIn('Status', retrieved)
|
||||
|
||||
|
||||
class CreateContainerTest(BaseIntegrationTest):
|
||||
class CreateContainerTest(BaseAPIIntegrationTest):
|
||||
|
||||
def test_create(self):
|
||||
res = self.client.create_container(BUSYBOX, 'true')
|
||||
|
|
@ -409,7 +409,7 @@ class CreateContainerTest(BaseIntegrationTest):
|
|||
assert config['HostConfig']['Isolation'] == 'default'
|
||||
|
||||
|
||||
class VolumeBindTest(BaseIntegrationTest):
|
||||
class VolumeBindTest(BaseAPIIntegrationTest):
|
||||
def setUp(self):
|
||||
super(VolumeBindTest, self).setUp()
|
||||
|
||||
|
|
@ -504,7 +504,7 @@ class VolumeBindTest(BaseIntegrationTest):
|
|||
|
||||
|
||||
@requires_api_version('1.20')
|
||||
class ArchiveTest(BaseIntegrationTest):
|
||||
class ArchiveTest(BaseAPIIntegrationTest):
|
||||
def test_get_file_archive_from_container(self):
|
||||
data = 'The Maid and the Pocket Watch of Blood'
|
||||
ctnr = self.client.create_container(
|
||||
|
|
@ -584,7 +584,7 @@ class ArchiveTest(BaseIntegrationTest):
|
|||
self.assertIn('bar/', results)
|
||||
|
||||
|
||||
class RenameContainerTest(BaseIntegrationTest):
|
||||
class RenameContainerTest(BaseAPIIntegrationTest):
|
||||
def test_rename_container(self):
|
||||
version = self.client.version()['Version']
|
||||
name = 'hong_meiling'
|
||||
|
|
@ -600,7 +600,7 @@ class RenameContainerTest(BaseIntegrationTest):
|
|||
self.assertEqual('/{0}'.format(name), inspect['Name'])
|
||||
|
||||
|
||||
class StartContainerTest(BaseIntegrationTest):
|
||||
class StartContainerTest(BaseAPIIntegrationTest):
|
||||
def test_start_container(self):
|
||||
res = self.client.create_container(BUSYBOX, 'true')
|
||||
self.assertIn('Id', res)
|
||||
|
|
@ -654,7 +654,7 @@ class StartContainerTest(BaseIntegrationTest):
|
|||
self.assertEqual(exitcode, 0, msg=cmd)
|
||||
|
||||
|
||||
class WaitTest(BaseIntegrationTest):
|
||||
class WaitTest(BaseAPIIntegrationTest):
|
||||
def test_wait(self):
|
||||
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
|
||||
id = res['Id']
|
||||
|
|
@ -682,7 +682,7 @@ class WaitTest(BaseIntegrationTest):
|
|||
self.assertEqual(inspect['State']['ExitCode'], exitcode)
|
||||
|
||||
|
||||
class LogsTest(BaseIntegrationTest):
|
||||
class LogsTest(BaseAPIIntegrationTest):
|
||||
def test_logs(self):
|
||||
snippet = 'Flowering Nights (Sakuya Iyazoi)'
|
||||
container = self.client.create_container(
|
||||
|
|
@ -754,7 +754,7 @@ Line2'''
|
|||
self.assertEqual(logs, ''.encode(encoding='ascii'))
|
||||
|
||||
|
||||
class DiffTest(BaseIntegrationTest):
|
||||
class DiffTest(BaseAPIIntegrationTest):
|
||||
def test_diff(self):
|
||||
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
|
||||
id = container['Id']
|
||||
|
|
@ -782,7 +782,7 @@ class DiffTest(BaseIntegrationTest):
|
|||
self.assertEqual(test_diff[0]['Kind'], 1)
|
||||
|
||||
|
||||
class StopTest(BaseIntegrationTest):
|
||||
class StopTest(BaseAPIIntegrationTest):
|
||||
def test_stop(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
id = container['Id']
|
||||
|
|
@ -809,7 +809,7 @@ class StopTest(BaseIntegrationTest):
|
|||
self.assertEqual(state['Running'], False)
|
||||
|
||||
|
||||
class KillTest(BaseIntegrationTest):
|
||||
class KillTest(BaseAPIIntegrationTest):
|
||||
def test_kill(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
id = container['Id']
|
||||
|
|
@ -886,7 +886,7 @@ class KillTest(BaseIntegrationTest):
|
|||
self.assertEqual(state['Running'], False, state)
|
||||
|
||||
|
||||
class PortTest(BaseIntegrationTest):
|
||||
class PortTest(BaseAPIIntegrationTest):
|
||||
def test_port(self):
|
||||
|
||||
port_bindings = {
|
||||
|
|
@ -917,7 +917,7 @@ class PortTest(BaseIntegrationTest):
|
|||
self.client.kill(id)
|
||||
|
||||
|
||||
class ContainerTopTest(BaseIntegrationTest):
|
||||
class ContainerTopTest(BaseAPIIntegrationTest):
|
||||
def test_top(self):
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, ['sleep', '60']
|
||||
|
|
@ -957,7 +957,7 @@ class ContainerTopTest(BaseIntegrationTest):
|
|||
self.assertEqual(res['Processes'][0][10], 'sleep 60')
|
||||
|
||||
|
||||
class RestartContainerTest(BaseIntegrationTest):
|
||||
class RestartContainerTest(BaseAPIIntegrationTest):
|
||||
def test_restart(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
id = container['Id']
|
||||
|
|
@ -998,7 +998,7 @@ class RestartContainerTest(BaseIntegrationTest):
|
|||
self.client.kill(id)
|
||||
|
||||
|
||||
class RemoveContainerTest(BaseIntegrationTest):
|
||||
class RemoveContainerTest(BaseAPIIntegrationTest):
|
||||
def test_remove(self):
|
||||
container = self.client.create_container(BUSYBOX, ['true'])
|
||||
id = container['Id']
|
||||
|
|
@ -1020,7 +1020,7 @@ class RemoveContainerTest(BaseIntegrationTest):
|
|||
self.assertEqual(len(res), 0)
|
||||
|
||||
|
||||
class AttachContainerTest(BaseIntegrationTest):
|
||||
class AttachContainerTest(BaseAPIIntegrationTest):
|
||||
def test_run_container_streaming(self):
|
||||
container = self.client.create_container(BUSYBOX, '/bin/sh',
|
||||
detach=True, stdin_open=True)
|
||||
|
|
@ -1051,7 +1051,7 @@ class AttachContainerTest(BaseIntegrationTest):
|
|||
self.assertEqual(data.decode('utf-8'), line)
|
||||
|
||||
|
||||
class PauseTest(BaseIntegrationTest):
|
||||
class PauseTest(BaseAPIIntegrationTest):
|
||||
def test_pause_unpause(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
id = container['Id']
|
||||
|
|
@ -1080,7 +1080,7 @@ class PauseTest(BaseIntegrationTest):
|
|||
self.assertEqual(state['Paused'], False)
|
||||
|
||||
|
||||
class GetContainerStatsTest(BaseIntegrationTest):
|
||||
class GetContainerStatsTest(BaseAPIIntegrationTest):
|
||||
@requires_api_version('1.19')
|
||||
def test_get_container_stats_no_stream(self):
|
||||
container = self.client.create_container(
|
||||
|
|
@ -1111,7 +1111,7 @@ class GetContainerStatsTest(BaseIntegrationTest):
|
|||
self.assertIn(key, chunk)
|
||||
|
||||
|
||||
class ContainerUpdateTest(BaseIntegrationTest):
|
||||
class ContainerUpdateTest(BaseAPIIntegrationTest):
|
||||
@requires_api_version('1.22')
|
||||
def test_update_container(self):
|
||||
old_mem_limit = 400 * 1024 * 1024
|
||||
|
|
@ -1158,7 +1158,7 @@ class ContainerUpdateTest(BaseIntegrationTest):
|
|||
)
|
||||
|
||||
|
||||
class ContainerCPUTest(BaseIntegrationTest):
|
||||
class ContainerCPUTest(BaseAPIIntegrationTest):
|
||||
@requires_api_version('1.18')
|
||||
def test_container_cpu_shares(self):
|
||||
cpu_shares = 512
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
from docker.utils.socket import next_frame_size
|
||||
from docker.utils.socket import read_exactly
|
||||
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class ExecTest(BaseIntegrationTest):
|
||||
class ExecTest(BaseAPIIntegrationTest):
|
||||
def test_execute_command(self):
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
|
|
@ -1,5 +1,4 @@
|
|||
from .base import BaseIntegrationTest
|
||||
from .base import BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
from .. import helpers
|
||||
|
||||
SECOND = 1000000000
|
||||
|
|
@ -12,7 +11,7 @@ def wait_on_health_status(client, container, status):
|
|||
return helpers.wait_on_condition(condition)
|
||||
|
||||
|
||||
class HealthcheckTest(BaseIntegrationTest):
|
||||
class HealthcheckTest(BaseAPIIntegrationTest):
|
||||
|
||||
@helpers.requires_api_version('1.24')
|
||||
def test_healthcheck_shell_command(self):
|
||||
|
|
@ -14,10 +14,10 @@ from six.moves import socketserver
|
|||
|
||||
import docker
|
||||
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class ListImagesTest(BaseIntegrationTest):
|
||||
class ListImagesTest(BaseAPIIntegrationTest):
|
||||
def test_images(self):
|
||||
res1 = self.client.images(all=True)
|
||||
self.assertIn('Id', res1[0])
|
||||
|
|
@ -35,7 +35,7 @@ class ListImagesTest(BaseIntegrationTest):
|
|||
self.assertEqual(type(res1[0]), six.text_type)
|
||||
|
||||
|
||||
class PullImageTest(BaseIntegrationTest):
|
||||
class PullImageTest(BaseAPIIntegrationTest):
|
||||
def test_pull(self):
|
||||
try:
|
||||
self.client.remove_image('hello-world')
|
||||
|
|
@ -66,7 +66,7 @@ class PullImageTest(BaseIntegrationTest):
|
|||
self.assertIn('Id', img_info)
|
||||
|
||||
|
||||
class CommitTest(BaseIntegrationTest):
|
||||
class CommitTest(BaseAPIIntegrationTest):
|
||||
def test_commit(self):
|
||||
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
|
||||
id = container['Id']
|
||||
|
|
@ -101,7 +101,7 @@ class CommitTest(BaseIntegrationTest):
|
|||
assert img['Config']['Cmd'] == ['bash']
|
||||
|
||||
|
||||
class RemoveImageTest(BaseIntegrationTest):
|
||||
class RemoveImageTest(BaseAPIIntegrationTest):
|
||||
def test_remove(self):
|
||||
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
|
||||
id = container['Id']
|
||||
|
|
@ -117,7 +117,7 @@ class RemoveImageTest(BaseIntegrationTest):
|
|||
self.assertEqual(len(res), 0)
|
||||
|
||||
|
||||
class ImportImageTest(BaseIntegrationTest):
|
||||
class ImportImageTest(BaseAPIIntegrationTest):
|
||||
'''Base class for `docker import` test cases.'''
|
||||
|
||||
TAR_SIZE = 512 * 1024
|
||||
|
|
@ -1,17 +1,15 @@
|
|||
import random
|
||||
|
||||
import docker
|
||||
from docker.utils import create_ipam_config
|
||||
from docker.utils import create_ipam_pool
|
||||
import pytest
|
||||
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from ..helpers import random_name, requires_api_version
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
||||
|
||||
class TestNetworks(BaseIntegrationTest):
|
||||
class TestNetworks(BaseAPIIntegrationTest):
|
||||
def create_network(self, *args, **kwargs):
|
||||
net_name = u'dockerpy{}'.format(random.getrandbits(24))[:14]
|
||||
net_name = random_name()
|
||||
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
|
||||
self.tmp_networks.append(net_id)
|
||||
return (net_name, net_id)
|
||||
|
|
@ -84,10 +82,8 @@ class TestNetworks(BaseIntegrationTest):
|
|||
|
||||
@requires_api_version('1.21')
|
||||
def test_create_network_with_host_driver_fails(self):
|
||||
net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14]
|
||||
|
||||
with pytest.raises(docker.errors.APIError):
|
||||
self.client.create_network(net_name, driver='host')
|
||||
self.client.create_network(random_name(), driver='host')
|
||||
|
||||
@requires_api_version('1.21')
|
||||
def test_remove_network(self):
|
||||
|
|
@ -3,10 +3,10 @@ import random
|
|||
import docker
|
||||
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
||||
|
||||
class ServiceTest(BaseIntegrationTest):
|
||||
class ServiceTest(BaseAPIIntegrationTest):
|
||||
def setUp(self):
|
||||
super(ServiceTest, self).setUp()
|
||||
self.client.leave_swarm(force=True)
|
||||
|
|
@ -3,10 +3,10 @@ import docker
|
|||
import pytest
|
||||
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
||||
|
||||
class SwarmTest(BaseIntegrationTest):
|
||||
class SwarmTest(BaseAPIIntegrationTest):
|
||||
def setUp(self):
|
||||
super(SwarmTest, self).setUp()
|
||||
self.client.leave_swarm(force=True)
|
||||
|
|
@ -2,11 +2,11 @@ import docker
|
|||
import pytest
|
||||
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
||||
|
||||
@requires_api_version('1.21')
|
||||
class TestVolumes(BaseIntegrationTest):
|
||||
class TestVolumes(BaseAPIIntegrationTest):
|
||||
def test_create_volume(self):
|
||||
name = 'perfectcherryblossom'
|
||||
self.tmp_volumes.append(name)
|
||||
|
|
@ -2,6 +2,7 @@ import shutil
|
|||
import unittest
|
||||
|
||||
import docker
|
||||
from docker.utils import kwargs_from_env
|
||||
import six
|
||||
|
||||
|
||||
|
|
@ -10,20 +11,14 @@ BUSYBOX = 'busybox:buildroot-2014.02'
|
|||
|
||||
class BaseIntegrationTest(unittest.TestCase):
|
||||
"""
|
||||
A base class for integration test cases.
|
||||
|
||||
It sets up a Docker client and cleans up the Docker server after itself.
|
||||
A base class for integration test cases. It cleans up the Docker server
|
||||
after itself.
|
||||
"""
|
||||
tmp_imgs = []
|
||||
tmp_containers = []
|
||||
tmp_folders = []
|
||||
tmp_volumes = []
|
||||
|
||||
def setUp(self):
|
||||
if six.PY2:
|
||||
self.assertRegex = self.assertRegexpMatches
|
||||
self.assertCountEqual = self.assertItemsEqual
|
||||
self.client = docker.from_env(timeout=60)
|
||||
self.tmp_imgs = []
|
||||
self.tmp_containers = []
|
||||
self.tmp_folders = []
|
||||
|
|
@ -31,32 +26,41 @@ class BaseIntegrationTest(unittest.TestCase):
|
|||
self.tmp_networks = []
|
||||
|
||||
def tearDown(self):
|
||||
client = docker.from_env()
|
||||
for img in self.tmp_imgs:
|
||||
try:
|
||||
self.client.remove_image(img)
|
||||
client.api.remove_image(img)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
for container in self.tmp_containers:
|
||||
try:
|
||||
self.client.stop(container, timeout=1)
|
||||
self.client.remove_container(container)
|
||||
client.api.remove_container(container, force=True)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
for network in self.tmp_networks:
|
||||
try:
|
||||
self.client.remove_network(network)
|
||||
client.api.remove_network(network)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
for volume in self.tmp_volumes:
|
||||
try:
|
||||
client.api.remove_volume(volume)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
|
||||
for folder in self.tmp_folders:
|
||||
shutil.rmtree(folder)
|
||||
|
||||
for volume in self.tmp_volumes:
|
||||
try:
|
||||
self.client.remove_volume(volume)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
|
||||
self.client.close()
|
||||
class BaseAPIIntegrationTest(BaseIntegrationTest):
|
||||
"""
|
||||
A test case for `APIClient` integration tests. It sets up an `APIClient`
|
||||
as `self.client`.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(BaseAPIIntegrationTest, self).setUp()
|
||||
self.client = docker.APIClient(timeout=60, **kwargs_from_env())
|
||||
|
||||
def run_container(self, *args, **kwargs):
|
||||
container = self.client.create_container(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
import unittest
|
||||
|
||||
import docker
|
||||
|
||||
|
||||
class ClientTest(unittest.TestCase):
|
||||
|
||||
def test_info(self):
|
||||
client = docker.from_env()
|
||||
info = client.info()
|
||||
assert 'ID' in info
|
||||
assert 'Name' in info
|
||||
|
||||
def test_ping(self):
|
||||
client = docker.from_env()
|
||||
assert client.ping() is True
|
||||
|
||||
def test_version(self):
|
||||
client = docker.from_env()
|
||||
assert 'Version' in client.version()
|
||||
|
|
@ -4,6 +4,7 @@ import sys
|
|||
import warnings
|
||||
|
||||
import docker.errors
|
||||
from docker.utils import kwargs_from_env
|
||||
import pytest
|
||||
|
||||
from .base import BUSYBOX
|
||||
|
|
@ -12,7 +13,7 @@ from .base import BUSYBOX
|
|||
@pytest.fixture(autouse=True, scope='session')
|
||||
def setup_test_session():
|
||||
warnings.simplefilter('error')
|
||||
c = docker.from_env()
|
||||
c = docker.APIClient(**kwargs_from_env())
|
||||
try:
|
||||
c.inspect_image(BUSYBOX)
|
||||
except docker.errors.NotFound:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from docker.errors import APIError
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class ErrorsTest(BaseIntegrationTest):
|
||||
class ErrorsTest(BaseAPIIntegrationTest):
|
||||
def test_api_error_parses_json(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '10'])
|
||||
self.client.start(container['Id'])
|
||||
|
|
|
|||
|
|
@ -0,0 +1,204 @@
|
|||
import docker
|
||||
from .base import BaseIntegrationTest
|
||||
|
||||
|
||||
class ContainerCollectionTest(BaseIntegrationTest):
|
||||
|
||||
def test_run(self):
|
||||
client = docker.from_env()
|
||||
self.assertEqual(
|
||||
client.containers.run("alpine", "echo hello world", remove=True),
|
||||
b'hello world\n'
|
||||
)
|
||||
|
||||
def test_run_detach(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 300", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.attrs['Config']['Image'] == "alpine"
|
||||
assert container.attrs['Config']['Cmd'] == ['sleep', '300']
|
||||
|
||||
def test_run_with_error(self):
|
||||
client = docker.from_env()
|
||||
with self.assertRaises(docker.errors.ContainerError) as cm:
|
||||
client.containers.run("alpine", "cat /test", remove=True)
|
||||
assert cm.exception.exit_status == 1
|
||||
assert "cat /test" in str(cm.exception)
|
||||
assert "alpine" in str(cm.exception)
|
||||
assert "No such file or directory" in str(cm.exception)
|
||||
|
||||
def test_run_with_image_that_does_not_exist(self):
|
||||
client = docker.from_env()
|
||||
with self.assertRaises(docker.errors.ImageNotFound):
|
||||
client.containers.run("dockerpytest_does_not_exist")
|
||||
|
||||
def test_get(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 300", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert client.containers.get(container.id).attrs[
|
||||
'Config']['Image'] == "alpine"
|
||||
|
||||
def test_list(self):
|
||||
client = docker.from_env()
|
||||
container_id = client.containers.run(
|
||||
"alpine", "sleep 300", detach=True).id
|
||||
self.tmp_containers.append(container_id)
|
||||
containers = [c for c in client.containers.list() if c.id ==
|
||||
container_id]
|
||||
assert len(containers) == 1
|
||||
|
||||
container = containers[0]
|
||||
assert container.attrs['Config']['Image'] == 'alpine'
|
||||
|
||||
container.kill()
|
||||
container.remove()
|
||||
assert container_id not in [c.id for c in client.containers.list()]
|
||||
|
||||
|
||||
class ContainerTest(BaseIntegrationTest):
|
||||
|
||||
def test_commit(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run(
|
||||
"alpine", "sh -c 'echo \"hello\" > /test'",
|
||||
detach=True
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.wait()
|
||||
image = container.commit()
|
||||
self.assertEqual(
|
||||
client.containers.run(image.id, "cat /test", remove=True),
|
||||
b"hello\n"
|
||||
)
|
||||
|
||||
def test_diff(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "touch /test", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.wait()
|
||||
assert container.diff() == [{'Path': '/test', 'Kind': 1}]
|
||||
|
||||
def test_exec_run(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run(
|
||||
"alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.exec_run("cat /test") == b"hello\n"
|
||||
|
||||
def test_kill(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 300", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
while container.status != 'running':
|
||||
container.reload()
|
||||
assert container.status == 'running'
|
||||
container.kill()
|
||||
container.reload()
|
||||
assert container.status == 'exited'
|
||||
|
||||
def test_logs(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "echo hello world",
|
||||
detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.wait()
|
||||
assert container.logs() == b"hello world\n"
|
||||
|
||||
def test_pause(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 300", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.pause()
|
||||
container.reload()
|
||||
assert container.status == "paused"
|
||||
container.unpause()
|
||||
container.reload()
|
||||
assert container.status == "running"
|
||||
|
||||
def test_remove(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "echo hello", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.id in [c.id for c in client.containers.list(all=True)]
|
||||
container.wait()
|
||||
container.remove()
|
||||
containers = client.containers.list(all=True)
|
||||
assert container.id not in [c.id for c in containers]
|
||||
|
||||
def test_rename(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "echo hello", name="test1",
|
||||
detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.name == "test1"
|
||||
container.rename("test2")
|
||||
container.reload()
|
||||
assert container.name == "test2"
|
||||
|
||||
def test_restart(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 100", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
first_started_at = container.attrs['State']['StartedAt']
|
||||
container.restart()
|
||||
container.reload()
|
||||
second_started_at = container.attrs['State']['StartedAt']
|
||||
assert first_started_at != second_started_at
|
||||
|
||||
def test_start(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.create("alpine", "sleep 50", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.status == "created"
|
||||
container.start()
|
||||
container.reload()
|
||||
assert container.status == "running"
|
||||
|
||||
def test_stats(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 100", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
stats = container.stats(stream=False)
|
||||
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
|
||||
'memory_stats', 'blkio_stats']:
|
||||
assert key in stats
|
||||
|
||||
def test_stop(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "top", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.status in ("running", "created")
|
||||
container.stop(timeout=2)
|
||||
container.reload()
|
||||
assert container.status == "exited"
|
||||
|
||||
def test_top(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 60", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
top = container.top()
|
||||
assert len(top['Processes']) == 1
|
||||
assert 'sleep 60' in top['Processes'][0]
|
||||
|
||||
def test_update(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 60", detach=True,
|
||||
cpu_shares=2)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.attrs['HostConfig']['CpuShares'] == 2
|
||||
container.update(cpu_shares=3)
|
||||
container.reload()
|
||||
assert container.attrs['HostConfig']['CpuShares'] == 3
|
||||
|
||||
def test_wait(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sh -c 'exit 0'",
|
||||
detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.wait() == 0
|
||||
container = client.containers.run("alpine", "sh -c 'exit 1'",
|
||||
detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.wait() == 1
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
import io
|
||||
import docker
|
||||
from .base import BaseIntegrationTest
|
||||
|
||||
|
||||
class ImageCollectionTest(BaseIntegrationTest):
|
||||
|
||||
def test_build(self):
|
||||
client = docker.from_env()
|
||||
image = client.images.build(fileobj=io.BytesIO(
|
||||
"FROM alpine\n"
|
||||
"CMD echo hello world".encode('ascii')
|
||||
))
|
||||
self.tmp_imgs.append(image.id)
|
||||
assert client.containers.run(image) == b"hello world\n"
|
||||
|
||||
def test_build_with_error(self):
|
||||
client = docker.from_env()
|
||||
with self.assertRaises(docker.errors.BuildError) as cm:
|
||||
client.images.build(fileobj=io.BytesIO(
|
||||
"FROM alpine\n"
|
||||
"NOTADOCKERFILECOMMAND".encode('ascii')
|
||||
))
|
||||
assert str(cm.exception) == ("Unknown instruction: "
|
||||
"NOTADOCKERFILECOMMAND")
|
||||
|
||||
def test_list(self):
|
||||
client = docker.from_env()
|
||||
image = client.images.pull('alpine:latest')
|
||||
assert image.id in get_ids(client.images.list())
|
||||
|
||||
def test_list_with_repository(self):
|
||||
client = docker.from_env()
|
||||
image = client.images.pull('alpine:latest')
|
||||
assert image.id in get_ids(client.images.list('alpine'))
|
||||
assert image.id in get_ids(client.images.list('alpine:latest'))
|
||||
|
||||
def test_pull(self):
|
||||
client = docker.from_env()
|
||||
image = client.images.pull('alpine:latest')
|
||||
assert 'alpine:latest' in image.attrs['RepoTags']
|
||||
|
||||
|
||||
class ImageTest(BaseIntegrationTest):
|
||||
|
||||
def test_tag_and_remove(self):
|
||||
repo = 'dockersdk.tests.images.test_tag'
|
||||
tag = 'some-tag'
|
||||
identifier = '{}:{}'.format(repo, tag)
|
||||
|
||||
client = docker.from_env()
|
||||
image = client.images.pull('alpine:latest')
|
||||
|
||||
image.tag(repo, tag)
|
||||
self.tmp_imgs.append(identifier)
|
||||
assert image.id in get_ids(client.images.list(repo))
|
||||
assert image.id in get_ids(client.images.list(identifier))
|
||||
|
||||
client.images.remove(identifier)
|
||||
assert image.id not in get_ids(client.images.list(repo))
|
||||
assert image.id not in get_ids(client.images.list(identifier))
|
||||
|
||||
assert image.id in get_ids(client.images.list('alpine:latest'))
|
||||
|
||||
|
||||
def get_ids(images):
|
||||
return [i.id for i in images]
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
import docker
|
||||
from .. import helpers
|
||||
from .base import BaseIntegrationTest
|
||||
|
||||
|
||||
class ImageCollectionTest(BaseIntegrationTest):
|
||||
|
||||
def test_create(self):
|
||||
client = docker.from_env()
|
||||
name = helpers.random_name()
|
||||
network = client.networks.create(name, labels={'foo': 'bar'})
|
||||
self.tmp_networks.append(network.id)
|
||||
assert network.name == name
|
||||
assert network.attrs['Labels']['foo'] == "bar"
|
||||
|
||||
def test_get(self):
|
||||
client = docker.from_env()
|
||||
name = helpers.random_name()
|
||||
network_id = client.networks.create(name).id
|
||||
self.tmp_networks.append(network_id)
|
||||
network = client.networks.get(network_id)
|
||||
assert network.name == name
|
||||
|
||||
def test_list_remove(self):
|
||||
client = docker.from_env()
|
||||
name = helpers.random_name()
|
||||
network = client.networks.create(name)
|
||||
self.tmp_networks.append(network.id)
|
||||
assert network.id in [n.id for n in client.networks.list()]
|
||||
assert network.id not in [
|
||||
n.id for n in
|
||||
client.networks.list(ids=["fdhjklfdfdshjkfds"])
|
||||
]
|
||||
assert network.id in [
|
||||
n.id for n in
|
||||
client.networks.list(ids=[network.id])
|
||||
]
|
||||
assert network.id not in [
|
||||
n.id for n in
|
||||
client.networks.list(names=["fdshjklfdsjhkl"])
|
||||
]
|
||||
assert network.id in [
|
||||
n.id for n in
|
||||
client.networks.list(names=[name])
|
||||
]
|
||||
network.remove()
|
||||
assert network.id not in [n.id for n in client.networks.list()]
|
||||
|
||||
|
||||
class ImageTest(BaseIntegrationTest):
|
||||
|
||||
def test_connect_disconnect(self):
|
||||
client = docker.from_env()
|
||||
network = client.networks.create(helpers.random_name())
|
||||
self.tmp_networks.append(network.id)
|
||||
container = client.containers.create("alpine", "sleep 300")
|
||||
self.tmp_containers.append(container.id)
|
||||
assert network.containers == []
|
||||
network.connect(container)
|
||||
container.start()
|
||||
assert client.networks.get(network.id).containers == [container]
|
||||
network.disconnect(container)
|
||||
assert network.containers == []
|
||||
assert client.networks.get(network.id).containers == []
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
import unittest
|
||||
import docker
|
||||
from .. import helpers
|
||||
|
||||
|
||||
class NodesTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
helpers.force_leave_swarm(docker.from_env())
|
||||
|
||||
def tearDown(self):
|
||||
helpers.force_leave_swarm(docker.from_env())
|
||||
|
||||
def test_list_get_update(self):
|
||||
client = docker.from_env()
|
||||
client.swarm.init()
|
||||
nodes = client.nodes.list()
|
||||
assert len(nodes) == 1
|
||||
assert nodes[0].attrs['Spec']['Role'] == 'manager'
|
||||
|
||||
node = client.nodes.get(nodes[0].id)
|
||||
assert node.id == nodes[0].id
|
||||
assert node.attrs['Spec']['Role'] == 'manager'
|
||||
assert node.version > 0
|
||||
|
||||
node = client.nodes.list()[0]
|
||||
assert not node.attrs['Spec'].get('Labels')
|
||||
node.update({
|
||||
'Availability': 'active',
|
||||
'Name': 'node-name',
|
||||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
})
|
||||
node.reload()
|
||||
assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
import docker
|
||||
from .base import BaseIntegrationTest
|
||||
|
||||
|
||||
class ModelTest(BaseIntegrationTest):
|
||||
|
||||
def test_reload(self):
|
||||
client = docker.from_env()
|
||||
container = client.containers.run("alpine", "sleep 300", detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
first_started_at = container.attrs['State']['StartedAt']
|
||||
container.kill()
|
||||
container.start()
|
||||
assert container.attrs['State']['StartedAt'] == first_started_at
|
||||
container.reload()
|
||||
assert container.attrs['State']['StartedAt'] != first_started_at
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
import unittest
|
||||
import docker
|
||||
from .. import helpers
|
||||
|
||||
|
||||
class ServiceTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
client = docker.from_env()
|
||||
helpers.force_leave_swarm(client)
|
||||
client.swarm.init()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
helpers.force_leave_swarm(docker.from_env())
|
||||
|
||||
def test_create(self):
|
||||
client = docker.from_env()
|
||||
name = helpers.random_name()
|
||||
service = client.services.create(
|
||||
# create arguments
|
||||
name=name,
|
||||
labels={'foo': 'bar'},
|
||||
# ContainerSpec arguments
|
||||
image="alpine",
|
||||
command="sleep 300",
|
||||
container_labels={'container': 'label'}
|
||||
)
|
||||
assert service.name == name
|
||||
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
|
||||
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
|
||||
assert container_spec['Image'] == "alpine"
|
||||
assert container_spec['Labels'] == {'container': 'label'}
|
||||
|
||||
def test_get(self):
|
||||
client = docker.from_env()
|
||||
name = helpers.random_name()
|
||||
service = client.services.create(
|
||||
name=name,
|
||||
image="alpine",
|
||||
command="sleep 300"
|
||||
)
|
||||
service = client.services.get(service.id)
|
||||
assert service.name == name
|
||||
|
||||
def test_list_remove(self):
|
||||
client = docker.from_env()
|
||||
service = client.services.create(
|
||||
name=helpers.random_name(),
|
||||
image="alpine",
|
||||
command="sleep 300"
|
||||
)
|
||||
assert service in client.services.list()
|
||||
service.remove()
|
||||
assert service not in client.services.list()
|
||||
|
||||
def test_tasks(self):
|
||||
client = docker.from_env()
|
||||
service1 = client.services.create(
|
||||
name=helpers.random_name(),
|
||||
image="alpine",
|
||||
command="sleep 300"
|
||||
)
|
||||
service2 = client.services.create(
|
||||
name=helpers.random_name(),
|
||||
image="alpine",
|
||||
command="sleep 300"
|
||||
)
|
||||
tasks = []
|
||||
while len(tasks) == 0:
|
||||
tasks = service1.tasks()
|
||||
assert len(tasks) == 1
|
||||
assert tasks[0]['ServiceID'] == service1.id
|
||||
|
||||
tasks = []
|
||||
while len(tasks) == 0:
|
||||
tasks = service2.tasks()
|
||||
assert len(tasks) == 1
|
||||
assert tasks[0]['ServiceID'] == service2.id
|
||||
|
||||
def test_update(self):
|
||||
client = docker.from_env()
|
||||
service = client.services.create(
|
||||
# create arguments
|
||||
name=helpers.random_name(),
|
||||
# ContainerSpec arguments
|
||||
image="alpine",
|
||||
command="sleep 300"
|
||||
)
|
||||
new_name = helpers.random_name()
|
||||
service.update(
|
||||
# create argument
|
||||
name=new_name,
|
||||
# ContainerSpec argument
|
||||
command="sleep 600"
|
||||
)
|
||||
service.reload()
|
||||
assert service.name == new_name
|
||||
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
|
||||
assert container_spec['Command'] == ["sleep", "600"]
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import unittest
|
||||
import docker
|
||||
from .. import helpers
|
||||
|
||||
|
||||
class SwarmTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
helpers.force_leave_swarm(docker.from_env())
|
||||
|
||||
def tearDown(self):
|
||||
helpers.force_leave_swarm(docker.from_env())
|
||||
|
||||
def test_init_update_leave(self):
|
||||
client = docker.from_env()
|
||||
client.swarm.init(snapshot_interval=5000)
|
||||
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
|
||||
client.swarm.update(snapshot_interval=10000)
|
||||
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
|
||||
assert client.swarm.leave(force=True)
|
||||
with self.assertRaises(docker.errors.APIError) as cm:
|
||||
client.swarm.reload()
|
||||
assert cm.exception.response.status_code == 406
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import docker
|
||||
from .base import BaseIntegrationTest
|
||||
|
||||
|
||||
class VolumesTest(BaseIntegrationTest):
|
||||
def test_create_get(self):
|
||||
client = docker.from_env()
|
||||
volume = client.volumes.create(
|
||||
'dockerpytest_1',
|
||||
driver='local',
|
||||
labels={'labelkey': 'labelvalue'}
|
||||
)
|
||||
self.tmp_volumes.append(volume.id)
|
||||
assert volume.id
|
||||
assert volume.name == 'dockerpytest_1'
|
||||
assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
|
||||
|
||||
volume = client.volumes.get(volume.id)
|
||||
assert volume.name == 'dockerpytest_1'
|
||||
|
||||
def test_list_remove(self):
|
||||
client = docker.from_env()
|
||||
volume = client.volumes.create('dockerpytest_1')
|
||||
self.tmp_volumes.append(volume.id)
|
||||
assert volume in client.volumes.list()
|
||||
assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
|
||||
assert volume not in client.volumes.list(filters={'name': 'foobar'})
|
||||
|
||||
volume.remove()
|
||||
assert volume not in client.volumes.list()
|
||||
|
|
@ -4,10 +4,10 @@ import random
|
|||
import docker
|
||||
import six
|
||||
|
||||
from .base import BaseIntegrationTest, BUSYBOX
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
|
||||
|
||||
class TestRegressions(BaseIntegrationTest):
|
||||
class TestRegressions(BaseAPIIntegrationTest):
|
||||
def test_443_handle_nonchunked_response_in_stream(self):
|
||||
dfile = io.BytesIO()
|
||||
with self.assertRaises(docker.errors.APIError) as exc:
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ import io
|
|||
import docker
|
||||
from docker import auth
|
||||
|
||||
from .api_test import DockerClientTest, fake_request, url_prefix
|
||||
from .api_test import BaseAPIClientTest, fake_request, url_prefix
|
||||
|
||||
|
||||
class BuildTest(DockerClientTest):
|
||||
class BuildTest(BaseAPIClientTest):
|
||||
def test_build_container(self):
|
||||
script = io.BytesIO('\n'.join([
|
||||
'FROM busybox',
|
||||
|
|
@ -11,7 +11,7 @@ import six
|
|||
from . import fake_api
|
||||
from ..helpers import requires_api_version
|
||||
from .api_test import (
|
||||
DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
|
||||
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
|
||||
fake_inspect_container
|
||||
)
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ def fake_inspect_container_tty(self, container):
|
|||
return fake_inspect_container(self, container, tty=True)
|
||||
|
||||
|
||||
class StartContainerTest(DockerClientTest):
|
||||
class StartContainerTest(BaseAPIClientTest):
|
||||
def test_start_container(self):
|
||||
self.client.start(fake_api.FAKE_CONTAINER_ID)
|
||||
|
||||
|
|
@ -168,7 +168,7 @@ class StartContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
|
||||
class CreateContainerTest(DockerClientTest):
|
||||
class CreateContainerTest(BaseAPIClientTest):
|
||||
def test_create_container(self):
|
||||
self.client.create_container('busybox', 'true')
|
||||
|
||||
|
|
@ -1180,7 +1180,7 @@ class CreateContainerTest(DockerClientTest):
|
|||
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
|
||||
|
||||
|
||||
class ContainerTest(DockerClientTest):
|
||||
class ContainerTest(BaseAPIClientTest):
|
||||
def test_list_containers(self):
|
||||
self.client.containers(all=True)
|
||||
|
||||
|
|
@ -1244,7 +1244,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_logs(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
|
||||
|
||||
|
|
@ -1263,7 +1263,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_logs_with_dict_instead_of_id(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
|
||||
|
||||
|
|
@ -1282,7 +1282,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_log_streaming(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
|
||||
follow=False)
|
||||
|
|
@ -1297,7 +1297,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_log_following(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||
follow=True)
|
||||
|
|
@ -1312,7 +1312,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_log_following_backwards(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
|
||||
|
||||
|
|
@ -1326,7 +1326,7 @@ class ContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_log_streaming_and_following(self):
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
|
||||
follow=True)
|
||||
|
|
@ -1342,7 +1342,7 @@ class ContainerTest(DockerClientTest):
|
|||
|
||||
def test_log_tail(self):
|
||||
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||
follow=False, tail=10)
|
||||
|
|
@ -1358,7 +1358,7 @@ class ContainerTest(DockerClientTest):
|
|||
|
||||
def test_log_since(self):
|
||||
ts = 809222400
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||
follow=False, since=ts)
|
||||
|
|
@ -1375,7 +1375,7 @@ class ContainerTest(DockerClientTest):
|
|||
def test_log_since_with_datetime(self):
|
||||
ts = 809222400
|
||||
time = datetime.datetime.utcfromtimestamp(ts)
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||
follow=False, since=time)
|
||||
|
|
@ -1391,9 +1391,9 @@ class ContainerTest(DockerClientTest):
|
|||
|
||||
def test_log_tty(self):
|
||||
m = mock.Mock()
|
||||
with mock.patch('docker.Client.inspect_container',
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container_tty):
|
||||
with mock.patch('docker.Client._stream_raw_result',
|
||||
with mock.patch('docker.api.client.APIClient._stream_raw_result',
|
||||
m):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID,
|
||||
follow=True, stream=True)
|
||||
|
|
@ -2,11 +2,11 @@ import json
|
|||
|
||||
from . import fake_api
|
||||
from .api_test import (
|
||||
DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
|
||||
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
|
||||
)
|
||||
|
||||
|
||||
class ExecTest(DockerClientTest):
|
||||
class ExecTest(BaseAPIClientTest):
|
||||
def test_exec_create(self):
|
||||
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
|
||||
|
||||
|
|
@ -4,7 +4,7 @@ import pytest
|
|||
from . import fake_api
|
||||
from docker import auth
|
||||
from .api_test import (
|
||||
DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
|
||||
BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
|
||||
fake_resolve_authconfig
|
||||
)
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ except ImportError:
|
|||
import mock
|
||||
|
||||
|
||||
class ImageTest(DockerClientTest):
|
||||
class ImageTest(BaseAPIClientTest):
|
||||
def test_image_viz(self):
|
||||
with pytest.raises(Exception):
|
||||
self.client.images('busybox', viz=True)
|
||||
|
|
@ -228,7 +228,7 @@ class ImageTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_push_image(self):
|
||||
with mock.patch('docker.auth.auth.resolve_authconfig',
|
||||
with mock.patch('docker.auth.resolve_authconfig',
|
||||
fake_resolve_authconfig):
|
||||
self.client.push(fake_api.FAKE_IMAGE_NAME)
|
||||
|
||||
|
|
@ -245,7 +245,7 @@ class ImageTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_push_image_with_tag(self):
|
||||
with mock.patch('docker.auth.auth.resolve_authconfig',
|
||||
with mock.patch('docker.auth.resolve_authconfig',
|
||||
fake_resolve_authconfig):
|
||||
self.client.push(
|
||||
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
|
||||
|
|
@ -289,7 +289,7 @@ class ImageTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_push_image_stream(self):
|
||||
with mock.patch('docker.auth.auth.resolve_authconfig',
|
||||
with mock.patch('docker.auth.resolve_authconfig',
|
||||
fake_resolve_authconfig):
|
||||
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
|
||||
|
||||
|
|
@ -2,8 +2,8 @@ import json
|
|||
|
||||
import six
|
||||
|
||||
from .api_test import BaseAPIClientTest, url_prefix, response
|
||||
from ..helpers import requires_api_version
|
||||
from .api_test import DockerClientTest, url_prefix, response
|
||||
from docker.utils import create_ipam_config, create_ipam_pool
|
||||
|
||||
try:
|
||||
|
|
@ -12,7 +12,7 @@ except ImportError:
|
|||
import mock
|
||||
|
||||
|
||||
class NetworkTest(DockerClientTest):
|
||||
class NetworkTest(BaseAPIClientTest):
|
||||
@requires_api_version('1.21')
|
||||
def test_list_networks(self):
|
||||
networks = [
|
||||
|
|
@ -33,7 +33,7 @@ class NetworkTest(DockerClientTest):
|
|||
get = mock.Mock(return_value=response(
|
||||
status_code=200, content=json.dumps(networks).encode('utf-8')))
|
||||
|
||||
with mock.patch('docker.Client.get', get):
|
||||
with mock.patch('docker.api.client.APIClient.get', get):
|
||||
self.assertEqual(self.client.networks(), networks)
|
||||
|
||||
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
|
||||
|
|
@ -59,7 +59,7 @@ class NetworkTest(DockerClientTest):
|
|||
network_response = response(status_code=200, content=network_data)
|
||||
post = mock.Mock(return_value=network_response)
|
||||
|
||||
with mock.patch('docker.Client.post', post):
|
||||
with mock.patch('docker.api.client.APIClient.post', post):
|
||||
result = self.client.create_network('foo')
|
||||
self.assertEqual(result, network_data)
|
||||
|
||||
|
|
@ -109,7 +109,7 @@ class NetworkTest(DockerClientTest):
|
|||
network_id = 'abc12345'
|
||||
delete = mock.Mock(return_value=response(status_code=200))
|
||||
|
||||
with mock.patch('docker.Client.delete', delete):
|
||||
with mock.patch('docker.api.client.APIClient.delete', delete):
|
||||
self.client.remove_network(network_id)
|
||||
|
||||
args = delete.call_args
|
||||
|
|
@ -130,7 +130,7 @@ class NetworkTest(DockerClientTest):
|
|||
network_response = response(status_code=200, content=network_data)
|
||||
get = mock.Mock(return_value=network_response)
|
||||
|
||||
with mock.patch('docker.Client.get', get):
|
||||
with mock.patch('docker.api.client.APIClient.get', get):
|
||||
result = self.client.inspect_network(network_id)
|
||||
self.assertEqual(result, network_data)
|
||||
|
||||
|
|
@ -145,7 +145,7 @@ class NetworkTest(DockerClientTest):
|
|||
|
||||
post = mock.Mock(return_value=response(status_code=201))
|
||||
|
||||
with mock.patch('docker.Client.post', post):
|
||||
with mock.patch('docker.api.client.APIClient.post', post):
|
||||
self.client.connect_container_to_network(
|
||||
{'Id': container_id},
|
||||
network_id,
|
||||
|
|
@ -174,7 +174,7 @@ class NetworkTest(DockerClientTest):
|
|||
|
||||
post = mock.Mock(return_value=response(status_code=201))
|
||||
|
||||
with mock.patch('docker.Client.post', post):
|
||||
with mock.patch('docker.api.client.APIClient.post', post):
|
||||
self.client.disconnect_container_from_network(
|
||||
{'Id': container_id}, network_id)
|
||||
|
||||
|
|
@ -1,21 +1,21 @@
|
|||
import datetime
|
||||
import json
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import io
|
||||
import unittest
|
||||
|
||||
import docker
|
||||
from docker.api import APIClient
|
||||
import requests
|
||||
from requests.packages import urllib3
|
||||
import six
|
||||
|
||||
from .. import base
|
||||
from . import fake_api
|
||||
|
||||
import pytest
|
||||
|
|
@ -93,15 +93,18 @@ url_prefix = '{0}v{1}/'.format(
|
|||
docker.constants.DEFAULT_DOCKER_API_VERSION)
|
||||
|
||||
|
||||
class DockerClientTest(base.Cleanup, base.BaseTestCase):
|
||||
class BaseAPIClientTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.patcher = mock.patch.multiple(
|
||||
'docker.Client', get=fake_get, post=fake_post, put=fake_put,
|
||||
'docker.api.client.APIClient',
|
||||
get=fake_get,
|
||||
post=fake_post,
|
||||
put=fake_put,
|
||||
delete=fake_delete,
|
||||
_read_from_socket=fake_read_from_socket
|
||||
)
|
||||
self.patcher.start()
|
||||
self.client = docker.Client()
|
||||
self.client = APIClient()
|
||||
# Force-clear authconfig to avoid tampering with the tests
|
||||
self.client._cfg = {'Configs': {}}
|
||||
|
||||
|
|
@ -109,11 +112,6 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
|
|||
self.client.close()
|
||||
self.patcher.stop()
|
||||
|
||||
def assertIn(self, object, collection):
|
||||
if six.PY2 and sys.version_info[1] <= 6:
|
||||
return self.assertTrue(object in collection)
|
||||
return super(DockerClientTest, self).assertIn(object, collection)
|
||||
|
||||
def base_create_payload(self, img='busybox', cmd=None):
|
||||
if not cmd:
|
||||
cmd = ['true']
|
||||
|
|
@ -125,10 +123,10 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
|
|||
}
|
||||
|
||||
|
||||
class DockerApiTest(DockerClientTest):
|
||||
class DockerApiTest(BaseAPIClientTest):
|
||||
def test_ctor(self):
|
||||
with pytest.raises(docker.errors.DockerException) as excinfo:
|
||||
docker.Client(version=1.12)
|
||||
APIClient(version=1.12)
|
||||
|
||||
self.assertEqual(
|
||||
str(excinfo.value),
|
||||
|
|
@ -195,7 +193,7 @@ class DockerApiTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_retrieve_server_version(self):
|
||||
client = docker.Client(version="auto")
|
||||
client = APIClient(version="auto")
|
||||
self.assertTrue(isinstance(client._version, six.string_types))
|
||||
self.assertFalse(client._version == "auto")
|
||||
client.close()
|
||||
|
|
@ -275,27 +273,27 @@ class DockerApiTest(DockerClientTest):
|
|||
return socket_adapter.socket_path
|
||||
|
||||
def test_url_compatibility_unix(self):
|
||||
c = docker.Client(base_url="unix://socket")
|
||||
c = APIClient(base_url="unix://socket")
|
||||
|
||||
assert self._socket_path_for_client_session(c) == '/socket'
|
||||
|
||||
def test_url_compatibility_unix_triple_slash(self):
|
||||
c = docker.Client(base_url="unix:///socket")
|
||||
c = APIClient(base_url="unix:///socket")
|
||||
|
||||
assert self._socket_path_for_client_session(c) == '/socket'
|
||||
|
||||
def test_url_compatibility_http_unix_triple_slash(self):
|
||||
c = docker.Client(base_url="http+unix:///socket")
|
||||
c = APIClient(base_url="http+unix:///socket")
|
||||
|
||||
assert self._socket_path_for_client_session(c) == '/socket'
|
||||
|
||||
def test_url_compatibility_http(self):
|
||||
c = docker.Client(base_url="http://hostname:1234")
|
||||
c = APIClient(base_url="http://hostname:1234")
|
||||
|
||||
assert c.base_url == "http://hostname:1234"
|
||||
|
||||
def test_url_compatibility_tcp(self):
|
||||
c = docker.Client(base_url="tcp://hostname:1234")
|
||||
c = APIClient(base_url="tcp://hostname:1234")
|
||||
|
||||
assert c.base_url == "http://hostname:1234"
|
||||
|
||||
|
|
@ -356,7 +354,7 @@ class DockerApiTest(DockerClientTest):
|
|||
self.assertEqual(result, content)
|
||||
|
||||
|
||||
class StreamTest(base.Cleanup, base.BaseTestCase):
|
||||
class StreamTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
socket_dir = tempfile.mkdtemp()
|
||||
self.build_context = tempfile.mkdtemp()
|
||||
|
|
@ -441,7 +439,7 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
|
|||
b'\r\n'
|
||||
) + b'\r\n'.join(lines)
|
||||
|
||||
with docker.Client(base_url="http+unix://" + self.socket_file) \
|
||||
with APIClient(base_url="http+unix://" + self.socket_file) \
|
||||
as client:
|
||||
for i in range(5):
|
||||
try:
|
||||
|
|
@ -458,10 +456,10 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
|
|||
str(i).encode() for i in range(50)])
|
||||
|
||||
|
||||
class UserAgentTest(base.BaseTestCase):
|
||||
class UserAgentTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.patcher = mock.patch.object(
|
||||
docker.Client,
|
||||
APIClient,
|
||||
'send',
|
||||
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
|
||||
)
|
||||
|
|
@ -471,7 +469,7 @@ class UserAgentTest(base.BaseTestCase):
|
|||
self.patcher.stop()
|
||||
|
||||
def test_default_user_agent(self):
|
||||
client = docker.Client()
|
||||
client = APIClient()
|
||||
client.version()
|
||||
|
||||
self.assertEqual(self.mock_send.call_count, 1)
|
||||
|
|
@ -480,9 +478,53 @@ class UserAgentTest(base.BaseTestCase):
|
|||
self.assertEqual(headers['User-Agent'], expected)
|
||||
|
||||
def test_custom_user_agent(self):
|
||||
client = docker.Client(user_agent='foo/bar')
|
||||
client = APIClient(user_agent='foo/bar')
|
||||
client.version()
|
||||
|
||||
self.assertEqual(self.mock_send.call_count, 1)
|
||||
headers = self.mock_send.call_args[0][0].headers
|
||||
self.assertEqual(headers['User-Agent'], 'foo/bar')
|
||||
|
||||
|
||||
class DisableSocketTest(unittest.TestCase):
|
||||
class DummySocket(object):
|
||||
def __init__(self, timeout=60):
|
||||
self.timeout = timeout
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self.timeout = timeout
|
||||
|
||||
def gettimeout(self):
|
||||
return self.timeout
|
||||
|
||||
def setUp(self):
|
||||
self.client = APIClient()
|
||||
|
||||
def test_disable_socket_timeout(self):
|
||||
"""Test that the timeout is disabled on a generic socket object."""
|
||||
socket = self.DummySocket()
|
||||
|
||||
self.client._disable_socket_timeout(socket)
|
||||
|
||||
self.assertEqual(socket.timeout, None)
|
||||
|
||||
def test_disable_socket_timeout2(self):
|
||||
"""Test that the timeouts are disabled on a generic socket object
|
||||
and it's _sock object if present."""
|
||||
socket = self.DummySocket()
|
||||
socket._sock = self.DummySocket()
|
||||
|
||||
self.client._disable_socket_timeout(socket)
|
||||
|
||||
self.assertEqual(socket.timeout, None)
|
||||
self.assertEqual(socket._sock.timeout, None)
|
||||
|
||||
def test_disable_socket_timout_non_blocking(self):
|
||||
"""Test that a non-blocking socket does not get set to blocking."""
|
||||
socket = self.DummySocket()
|
||||
socket._sock = self.DummySocket(0.0)
|
||||
|
||||
self.client._disable_socket_timeout(socket)
|
||||
|
||||
self.assertEqual(socket.timeout, None)
|
||||
self.assertEqual(socket._sock.timeout, 0.0)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue