mirror of https://github.com/docker/docker-py.git
commit
706e2cad65
|
@ -18,7 +18,7 @@ class BuildApiMixin(object):
|
|||
custom_context=False, encoding=None, pull=False,
|
||||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None, cache_from=None):
|
||||
labels=None, cache_from=None, target=None, network_mode=None):
|
||||
"""
|
||||
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
|
||||
needs to be set. ``path`` can be a local path (to a directory
|
||||
|
@ -88,12 +88,16 @@ class BuildApiMixin(object):
|
|||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``.
|
||||
decoded into dicts on the fly. Default ``False``
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB.
|
||||
labels (dict): A dictionary of labels to set on the image.
|
||||
greater than 0. If omitted the system uses 64MB
|
||||
labels (dict): A dictionary of labels to set on the image
|
||||
cache_from (list): A list of images used for build cache
|
||||
resolution.
|
||||
resolution
|
||||
target (str): Name of the build-stage to build in a multi-stage
|
||||
Dockerfile
|
||||
network_mode (str): networking mode for the run commands during
|
||||
build
|
||||
|
||||
Returns:
|
||||
A generator for the build output.
|
||||
|
@ -198,6 +202,22 @@ class BuildApiMixin(object):
|
|||
'cache_from was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if target:
|
||||
if utils.version_gte(self._version, '1.29'):
|
||||
params.update({'target': target})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'target was only introduced in API version 1.29'
|
||||
)
|
||||
|
||||
if network_mode:
|
||||
if utils.version_gte(self._version, '1.25'):
|
||||
params.update({'networkmode': network_mode})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'network_mode was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if context is not None:
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
if encoding:
|
||||
|
|
|
@ -83,6 +83,12 @@ class APIClient(
|
|||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
"""
|
||||
|
||||
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
|
||||
'_version',
|
||||
'base_url',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
|
||||
|
@ -248,7 +254,7 @@ class APIClient(
|
|||
'stream': 1
|
||||
}
|
||||
|
||||
@check_resource
|
||||
@check_resource('container')
|
||||
def _attach_websocket(self, container, params=None):
|
||||
url = self._url("/containers/{0}/attach/ws", container)
|
||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||
|
|
|
@ -10,7 +10,7 @@ from ..types import (
|
|||
|
||||
|
||||
class ContainerApiMixin(object):
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def attach(self, container, stdout=True, stderr=True,
|
||||
stream=False, logs=False):
|
||||
"""
|
||||
|
@ -54,7 +54,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
return self._read_from_socket(response, stream)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def attach_socket(self, container, params=None, ws=False):
|
||||
"""
|
||||
Like ``attach``, but returns the underlying socket-like object for the
|
||||
|
@ -93,7 +93,7 @@ class ContainerApiMixin(object):
|
|||
)
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def commit(self, container, repository=None, tag=None, message=None,
|
||||
author=None, changes=None, conf=None):
|
||||
"""
|
||||
|
@ -195,7 +195,7 @@ class ContainerApiMixin(object):
|
|||
x['Id'] = x['Id'][:12]
|
||||
return res
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def copy(self, container, resource):
|
||||
"""
|
||||
Identical to the ``docker cp`` command. Get files/folders from the
|
||||
|
@ -238,7 +238,7 @@ class ContainerApiMixin(object):
|
|||
memswap_limit=None, cpuset=None, host_config=None,
|
||||
mac_address=None, labels=None, volume_driver=None,
|
||||
stop_signal=None, networking_config=None,
|
||||
healthcheck=None, stop_timeout=None):
|
||||
healthcheck=None, stop_timeout=None, runtime=None):
|
||||
"""
|
||||
Creates a container. Parameters are similar to those for the ``docker
|
||||
run`` command except it doesn't support the attach options (``-a``).
|
||||
|
@ -391,8 +391,6 @@ class ContainerApiMixin(object):
|
|||
``{"PASSWORD": "xxx"}``.
|
||||
dns (:py:class:`list`): DNS name servers. Deprecated since API
|
||||
version 1.10. Use ``host_config`` instead.
|
||||
dns_opt (:py:class:`list`): Additional options to be added to the
|
||||
container's ``resolv.conf`` file
|
||||
volumes (str or list): List of paths inside the container to use
|
||||
as volumes.
|
||||
volumes_from (:py:class:`list`): List of container names or Ids to
|
||||
|
@ -417,6 +415,9 @@ class ContainerApiMixin(object):
|
|||
Default: 10
|
||||
networking_config (dict): A networking configuration generated
|
||||
by :py:meth:`create_networking_config`.
|
||||
runtime (str): Runtime to use with this container.
|
||||
healthcheck (dict): Specify a test to perform to check that the
|
||||
container is healthy.
|
||||
|
||||
Returns:
|
||||
A dictionary with an image 'Id' key and a 'Warnings' key.
|
||||
|
@ -441,7 +442,7 @@ class ContainerApiMixin(object):
|
|||
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
|
||||
memswap_limit, cpuset, host_config, mac_address, labels,
|
||||
volume_driver, stop_signal, networking_config, healthcheck,
|
||||
stop_timeout
|
||||
stop_timeout, runtime
|
||||
)
|
||||
return self.create_container_from_config(config, name)
|
||||
|
||||
|
@ -495,6 +496,8 @@ class ContainerApiMixin(object):
|
|||
to have read-write access to the host's ``/dev/sda`` via a
|
||||
node named ``/dev/xvda`` inside the container.
|
||||
dns (:py:class:`list`): Set custom DNS servers.
|
||||
dns_opt (:py:class:`list`): Additional options to be added to the
|
||||
container's ``resolv.conf`` file
|
||||
dns_search (:py:class:`list`): DNS search domains.
|
||||
extra_hosts (dict): Addtional hostnames to resolve inside the
|
||||
container, as a mapping of hostname to IP address.
|
||||
|
@ -576,6 +579,7 @@ class ContainerApiMixin(object):
|
|||
values are: ``host``
|
||||
volumes_from (:py:class:`list`): List of container names or IDs to
|
||||
get volumes from.
|
||||
runtime (str): Runtime to use with this container.
|
||||
|
||||
|
||||
Returns:
|
||||
|
@ -659,7 +663,7 @@ class ContainerApiMixin(object):
|
|||
"""
|
||||
return EndpointConfig(self._version, *args, **kwargs)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def diff(self, container):
|
||||
"""
|
||||
Inspect changes on a container's filesystem.
|
||||
|
@ -678,7 +682,7 @@ class ContainerApiMixin(object):
|
|||
self._get(self._url("/containers/{0}/changes", container)), True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def export(self, container):
|
||||
"""
|
||||
Export the contents of a filesystem as a tar archive.
|
||||
|
@ -699,7 +703,7 @@ class ContainerApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
@utils.minimum_version('1.20')
|
||||
def get_archive(self, container, path):
|
||||
"""
|
||||
|
@ -730,7 +734,7 @@ class ContainerApiMixin(object):
|
|||
utils.decode_json_header(encoded_stat) if encoded_stat else None
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def inspect_container(self, container):
|
||||
"""
|
||||
Identical to the `docker inspect` command, but only for containers.
|
||||
|
@ -750,7 +754,7 @@ class ContainerApiMixin(object):
|
|||
self._get(self._url("/containers/{0}/json", container)), True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def kill(self, container, signal=None):
|
||||
"""
|
||||
Kill a container or send a signal to a container.
|
||||
|
@ -773,7 +777,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def logs(self, container, stdout=True, stderr=True, stream=False,
|
||||
timestamps=False, tail='all', since=None, follow=None):
|
||||
"""
|
||||
|
@ -825,6 +829,11 @@ class ContainerApiMixin(object):
|
|||
params['since'] = utils.datetime_to_timestamp(since)
|
||||
elif (isinstance(since, int) and since > 0):
|
||||
params['since'] = since
|
||||
else:
|
||||
raise errors.InvalidArgument(
|
||||
'since value should be datetime or int, not {}'.
|
||||
format(type(since))
|
||||
)
|
||||
url = self._url("/containers/{0}/logs", container)
|
||||
res = self._get(url, params=params, stream=stream)
|
||||
return self._get_result(container, stream, res)
|
||||
|
@ -836,7 +845,7 @@ class ContainerApiMixin(object):
|
|||
logs=True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def pause(self, container):
|
||||
"""
|
||||
Pauses all processes within a container.
|
||||
|
@ -852,7 +861,7 @@ class ContainerApiMixin(object):
|
|||
res = self._post(url)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def port(self, container, private_port):
|
||||
"""
|
||||
Lookup the public-facing port that is NAT-ed to ``private_port``.
|
||||
|
@ -901,7 +910,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
return h_ports
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
@utils.minimum_version('1.20')
|
||||
def put_archive(self, container, path, data):
|
||||
"""
|
||||
|
@ -949,7 +958,7 @@ class ContainerApiMixin(object):
|
|||
url = self._url('/containers/prune')
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def remove_container(self, container, v=False, link=False, force=False):
|
||||
"""
|
||||
Remove a container. Similar to the ``docker rm`` command.
|
||||
|
@ -973,7 +982,7 @@ class ContainerApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@utils.minimum_version('1.17')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def rename(self, container, name):
|
||||
"""
|
||||
Rename a container. Similar to the ``docker rename`` command.
|
||||
|
@ -991,7 +1000,7 @@ class ContainerApiMixin(object):
|
|||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def resize(self, container, height, width):
|
||||
"""
|
||||
Resize the tty session.
|
||||
|
@ -1010,7 +1019,7 @@ class ContainerApiMixin(object):
|
|||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def restart(self, container, timeout=10):
|
||||
"""
|
||||
Restart a container. Similar to the ``docker restart`` command.
|
||||
|
@ -1031,7 +1040,7 @@ class ContainerApiMixin(object):
|
|||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def start(self, container, *args, **kwargs):
|
||||
"""
|
||||
Start a container. Similar to the ``docker start`` command, but
|
||||
|
@ -1070,7 +1079,7 @@ class ContainerApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@utils.minimum_version('1.17')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def stats(self, container, decode=None, stream=True):
|
||||
"""
|
||||
Stream statistics for a specific container. Similar to the
|
||||
|
@ -1096,7 +1105,7 @@ class ContainerApiMixin(object):
|
|||
return self._result(self._get(url, params={'stream': False}),
|
||||
json=True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def stop(self, container, timeout=10):
|
||||
"""
|
||||
Stops a container. Similar to the ``docker stop`` command.
|
||||
|
@ -1117,7 +1126,7 @@ class ContainerApiMixin(object):
|
|||
timeout=(timeout + (self.timeout or 0)))
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def top(self, container, ps_args=None):
|
||||
"""
|
||||
Display the running processes of a container.
|
||||
|
@ -1139,7 +1148,7 @@ class ContainerApiMixin(object):
|
|||
params['ps_args'] = ps_args
|
||||
return self._result(self._get(u, params=params), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def unpause(self, container):
|
||||
"""
|
||||
Unpause all processes within a container.
|
||||
|
@ -1152,7 +1161,7 @@ class ContainerApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@utils.minimum_version('1.22')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def update_container(
|
||||
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
|
||||
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
|
||||
|
@ -1217,7 +1226,7 @@ class ContainerApiMixin(object):
|
|||
res = self._post_json(url, data=data)
|
||||
return self._result(res, True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def wait(self, container, timeout=None):
|
||||
"""
|
||||
Block until a container stops, then return its exit code. Similar to
|
||||
|
|
|
@ -6,7 +6,7 @@ from .. import utils
|
|||
|
||||
class ExecApiMixin(object):
|
||||
@utils.minimum_version('1.15')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('container')
|
||||
def exec_create(self, container, cmd, stdout=True, stderr=True,
|
||||
stdin=False, tty=False, privileged=False, user='',
|
||||
environment=None):
|
||||
|
@ -110,7 +110,7 @@ class ExecApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@utils.minimum_version('1.15')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('exec_id')
|
||||
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
|
||||
socket=False):
|
||||
"""
|
||||
|
|
|
@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
class ImageApiMixin(object):
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def get_image(self, image):
|
||||
"""
|
||||
Get a tarball of an image. Similar to the ``docker save`` command.
|
||||
|
@ -39,7 +39,7 @@ class ImageApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def history(self, image):
|
||||
"""
|
||||
Show the history of an image.
|
||||
|
@ -228,7 +228,7 @@ class ImageApiMixin(object):
|
|||
image=image, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def insert(self, image, url, path):
|
||||
if utils.compare_version('1.12', self._version) >= 0:
|
||||
raise errors.DeprecatedMethod(
|
||||
|
@ -241,7 +241,7 @@ class ImageApiMixin(object):
|
|||
}
|
||||
return self._result(self._post(api_url, params=params))
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def inspect_image(self, image):
|
||||
"""
|
||||
Get detailed information about an image. Similar to the ``docker
|
||||
|
@ -443,7 +443,7 @@ class ImageApiMixin(object):
|
|||
|
||||
return self._result(response)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def remove_image(self, image, force=False, noprune=False):
|
||||
"""
|
||||
Remove an image. Similar to the ``docker rmi`` command.
|
||||
|
@ -477,7 +477,7 @@ class ImageApiMixin(object):
|
|||
True
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('image')
|
||||
def tag(self, image, repository, tag=None, force=False):
|
||||
"""
|
||||
Tag an image into a repository. Similar to the ``docker tag`` command.
|
||||
|
|
|
@ -41,7 +41,8 @@ class NetworkApiMixin(object):
|
|||
@minimum_version('1.21')
|
||||
def create_network(self, name, driver=None, options=None, ipam=None,
|
||||
check_duplicate=None, internal=False, labels=None,
|
||||
enable_ipv6=False, attachable=None, scope=None):
|
||||
enable_ipv6=False, attachable=None, scope=None,
|
||||
ingress=None):
|
||||
"""
|
||||
Create a network. Similar to the ``docker network create``.
|
||||
|
||||
|
@ -60,6 +61,8 @@ class NetworkApiMixin(object):
|
|||
attachable (bool): If enabled, and the network is in the global
|
||||
scope, non-service containers on worker nodes will be able to
|
||||
connect to the network.
|
||||
ingress (bool): If set, create an ingress network which provides
|
||||
the routing-mesh in swarm mode.
|
||||
|
||||
Returns:
|
||||
(dict): The created network reference object
|
||||
|
@ -129,6 +132,14 @@ class NetworkApiMixin(object):
|
|||
)
|
||||
data['Attachable'] = attachable
|
||||
|
||||
if ingress is not None:
|
||||
if version_lt(self._version, '1.29'):
|
||||
raise InvalidVersion(
|
||||
'ingress is not supported in API version < 1.29'
|
||||
)
|
||||
|
||||
data['Ingress'] = ingress
|
||||
|
||||
url = self._url("/networks/create")
|
||||
res = self._post_json(url, data=data)
|
||||
return self._result(res, json=True)
|
||||
|
@ -156,6 +167,7 @@ class NetworkApiMixin(object):
|
|||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
@minimum_version('1.21')
|
||||
@check_resource('net_id')
|
||||
def remove_network(self, net_id):
|
||||
"""
|
||||
Remove a network. Similar to the ``docker network rm`` command.
|
||||
|
@ -168,6 +180,7 @@ class NetworkApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@minimum_version('1.21')
|
||||
@check_resource('net_id')
|
||||
def inspect_network(self, net_id, verbose=None):
|
||||
"""
|
||||
Get detailed information about a network.
|
||||
|
@ -187,7 +200,7 @@ class NetworkApiMixin(object):
|
|||
res = self._get(url, params=params)
|
||||
return self._result(res, json=True)
|
||||
|
||||
@check_resource
|
||||
@check_resource('image')
|
||||
@minimum_version('1.21')
|
||||
def connect_container_to_network(self, container, net_id,
|
||||
ipv4_address=None, ipv6_address=None,
|
||||
|
@ -224,7 +237,7 @@ class NetworkApiMixin(object):
|
|||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@check_resource
|
||||
@check_resource('image')
|
||||
@minimum_version('1.21')
|
||||
def disconnect_container_from_network(self, container, net_id,
|
||||
force=False):
|
||||
|
|
|
@ -5,7 +5,7 @@ from .. import auth, utils
|
|||
|
||||
class PluginApiMixin(object):
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('name')
|
||||
def configure_plugin(self, name, options):
|
||||
"""
|
||||
Configure a plugin.
|
||||
|
@ -171,7 +171,7 @@ class PluginApiMixin(object):
|
|||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('name')
|
||||
def push_plugin(self, name):
|
||||
"""
|
||||
Push a plugin to the registry.
|
||||
|
@ -195,7 +195,7 @@ class PluginApiMixin(object):
|
|||
return self._stream_helper(res, decode=True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('name')
|
||||
def remove_plugin(self, name, force=False):
|
||||
"""
|
||||
Remove an installed plugin.
|
||||
|
@ -215,7 +215,7 @@ class PluginApiMixin(object):
|
|||
return True
|
||||
|
||||
@utils.minimum_version('1.26')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('name')
|
||||
def upgrade_plugin(self, name, remote, privileges):
|
||||
"""
|
||||
Upgrade an installed plugin.
|
||||
|
|
|
@ -36,7 +36,7 @@ class SecretApiMixin(object):
|
|||
)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('id')
|
||||
def inspect_secret(self, id):
|
||||
"""
|
||||
Retrieve secret metadata
|
||||
|
@ -54,7 +54,7 @@ class SecretApiMixin(object):
|
|||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('id')
|
||||
def remove_secret(self, id):
|
||||
"""
|
||||
Remove a secret
|
||||
|
|
|
@ -3,6 +3,48 @@ from .. import auth, errors, utils
|
|||
from ..types import ServiceMode
|
||||
|
||||
|
||||
def _check_api_features(version, task_template, update_config):
|
||||
if update_config is not None:
|
||||
if utils.version_lt(version, '1.25'):
|
||||
if 'MaxFailureRatio' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.max_failure_ratio is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
if 'Monitor' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.monitor is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
|
||||
if task_template is not None:
|
||||
if 'ForceUpdate' in task_template and utils.version_lt(
|
||||
version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'force_update is not supported in API version < 1.25'
|
||||
)
|
||||
|
||||
if task_template.get('Placement'):
|
||||
if utils.version_lt(version, '1.30'):
|
||||
if task_template['Placement'].get('Platforms'):
|
||||
raise errors.InvalidVersion(
|
||||
'Placement.platforms is not supported in'
|
||||
' API version < 1.30'
|
||||
)
|
||||
|
||||
if utils.version_lt(version, '1.27'):
|
||||
if task_template['Placement'].get('Preferences'):
|
||||
raise errors.InvalidVersion(
|
||||
'Placement.preferences is not supported in'
|
||||
' API version < 1.27'
|
||||
)
|
||||
if task_template.container_spec.get('TTY'):
|
||||
if utils.version_lt(version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'ContainerSpec.TTY is not supported in API version < 1.25'
|
||||
)
|
||||
|
||||
|
||||
class ServiceApiMixin(object):
|
||||
@utils.minimum_version('1.24')
|
||||
def create_service(
|
||||
|
@ -43,6 +85,8 @@ class ServiceApiMixin(object):
|
|||
)
|
||||
endpoint_spec = endpoint_config
|
||||
|
||||
_check_api_features(self._version, task_template, update_config)
|
||||
|
||||
url = self._url('/services/create')
|
||||
headers = {}
|
||||
image = task_template.get('ContainerSpec', {}).get('Image', None)
|
||||
|
@ -67,17 +111,6 @@ class ServiceApiMixin(object):
|
|||
}
|
||||
|
||||
if update_config is not None:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
if 'MaxFailureRatio' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.max_failure_ratio is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
if 'Monitor' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.monitor is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
data['UpdateConfig'] = update_config
|
||||
|
||||
return self._result(
|
||||
|
@ -85,7 +118,7 @@ class ServiceApiMixin(object):
|
|||
)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('service')
|
||||
def inspect_service(self, service):
|
||||
"""
|
||||
Return information about a service.
|
||||
|
@ -104,7 +137,7 @@ class ServiceApiMixin(object):
|
|||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('task')
|
||||
def inspect_task(self, task):
|
||||
"""
|
||||
Retrieve information about a task.
|
||||
|
@ -123,7 +156,7 @@ class ServiceApiMixin(object):
|
|||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('service')
|
||||
def remove_service(self, service):
|
||||
"""
|
||||
Stop and remove a service.
|
||||
|
@ -167,7 +200,7 @@ class ServiceApiMixin(object):
|
|||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('service')
|
||||
def service_logs(self, service, details=False, follow=False, stdout=False,
|
||||
stderr=False, since=0, timestamps=False, tail='all',
|
||||
is_tty=None):
|
||||
|
@ -241,7 +274,7 @@ class ServiceApiMixin(object):
|
|||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
@utils.check_resource('service')
|
||||
def update_service(self, service, version, task_template=None, name=None,
|
||||
labels=None, mode=None, update_config=None,
|
||||
networks=None, endpoint_config=None,
|
||||
|
@ -282,6 +315,8 @@ class ServiceApiMixin(object):
|
|||
)
|
||||
endpoint_spec = endpoint_config
|
||||
|
||||
_check_api_features(self._version, task_template, update_config)
|
||||
|
||||
url = self._url('/services/{0}/update', service)
|
||||
data = {}
|
||||
headers = {}
|
||||
|
@ -294,12 +329,6 @@ class ServiceApiMixin(object):
|
|||
mode = ServiceMode(mode)
|
||||
data['Mode'] = mode
|
||||
if task_template is not None:
|
||||
if 'ForceUpdate' in task_template and utils.version_lt(
|
||||
self._version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'force_update is not supported in API version < 1.25'
|
||||
)
|
||||
|
||||
image = task_template.get('ContainerSpec', {}).get('Image', None)
|
||||
if image is not None:
|
||||
registry, repo_name = auth.resolve_repository_name(image)
|
||||
|
@ -308,17 +337,6 @@ class ServiceApiMixin(object):
|
|||
headers['X-Registry-Auth'] = auth_header
|
||||
data['TaskTemplate'] = task_template
|
||||
if update_config is not None:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
if 'MaxFailureRatio' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.max_failure_ratio is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
if 'Monitor' in update_config:
|
||||
raise errors.InvalidVersion(
|
||||
'UpdateConfig.monitor is not supported in'
|
||||
' API version < 1.25'
|
||||
)
|
||||
data['UpdateConfig'] = update_config
|
||||
|
||||
if networks is not None:
|
||||
|
|
|
@ -117,7 +117,7 @@ class SwarmApiMixin(object):
|
|||
url = self._url('/swarm')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('node_id')
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_node(self, node_id):
|
||||
"""
|
||||
|
@ -228,7 +228,7 @@ class SwarmApiMixin(object):
|
|||
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.check_resource('node_id')
|
||||
@utils.minimum_version('1.24')
|
||||
def remove_node(self, node_id, force=False):
|
||||
"""
|
||||
|
|
|
@ -70,6 +70,15 @@ def split_repo_name(repo_name):
|
|||
return tuple(parts)
|
||||
|
||||
|
||||
def get_credential_store(authconfig, registry):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
registry = 'https://index.docker.io/v1/'
|
||||
|
||||
return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
|
||||
'credsStore'
|
||||
)
|
||||
|
||||
|
||||
def resolve_authconfig(authconfig, registry=None):
|
||||
"""
|
||||
Returns the authentication data from the given auth configuration for a
|
||||
|
@ -77,13 +86,17 @@ def resolve_authconfig(authconfig, registry=None):
|
|||
with full URLs are stripped down to hostnames before checking for a match.
|
||||
Returns None if no match was found.
|
||||
"""
|
||||
if 'credsStore' in authconfig:
|
||||
log.debug(
|
||||
'Using credentials store "{0}"'.format(authconfig['credsStore'])
|
||||
)
|
||||
return _resolve_authconfig_credstore(
|
||||
authconfig, registry, authconfig['credsStore']
|
||||
)
|
||||
|
||||
if 'credHelpers' in authconfig or 'credsStore' in authconfig:
|
||||
store_name = get_credential_store(authconfig, registry)
|
||||
if store_name is not None:
|
||||
log.debug(
|
||||
'Using credentials store "{0}"'.format(store_name)
|
||||
)
|
||||
return _resolve_authconfig_credstore(
|
||||
authconfig, registry, store_name
|
||||
)
|
||||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for {0}".format(repr(registry)))
|
||||
|
@ -274,6 +287,9 @@ def load_config(config_path=None):
|
|||
if data.get('credsStore'):
|
||||
log.debug("Found 'credsStore' section")
|
||||
res.update({'credsStore': data['credsStore']})
|
||||
if data.get('credHelpers'):
|
||||
log.debug("Found 'credHelpers' section")
|
||||
res.update({'credHelpers': data['credHelpers']})
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from .api.client import APIClient
|
||||
from .constants import DEFAULT_TIMEOUT_SECONDS
|
||||
from .models.containers import ContainerCollection
|
||||
from .models.images import ImageCollection
|
||||
from .models.networks import NetworkCollection
|
||||
|
@ -73,7 +74,7 @@ class DockerClient(object):
|
|||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
timeout = kwargs.pop('timeout', None)
|
||||
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
|
||||
version = kwargs.pop('version', None)
|
||||
return cls(timeout=timeout, version=version,
|
||||
**kwargs_from_env(**kwargs))
|
||||
|
@ -119,6 +120,7 @@ class DockerClient(object):
|
|||
"""
|
||||
return PluginCollection(client=self)
|
||||
|
||||
@property
|
||||
def secrets(self):
|
||||
"""
|
||||
An object for managing secrets on the server. See the
|
||||
|
|
|
@ -516,6 +516,8 @@ class ContainerCollection(Collection):
|
|||
container, as a mapping of hostname to IP address.
|
||||
group_add (:py:class:`list`): List of additional group names and/or
|
||||
IDs that the container process will run as.
|
||||
healthcheck (dict): Specify a test to perform to check that the
|
||||
container is healthy.
|
||||
hostname (str): Optional hostname for the container.
|
||||
init (bool): Run an init inside the container that forwards
|
||||
signals and reaps processes
|
||||
|
@ -659,6 +661,7 @@ class ContainerCollection(Collection):
|
|||
volumes_from (:py:class:`list`): List of container names or IDs to
|
||||
get volumes from.
|
||||
working_dir (str): Path to the working directory.
|
||||
runtime (str): Runtime to use with this container.
|
||||
|
||||
Returns:
|
||||
The container logs, either ``STDOUT``, ``STDERR``, or both,
|
||||
|
@ -885,6 +888,7 @@ RUN_HOST_CONFIG_KWARGS = [
|
|||
'userns_mode',
|
||||
'version',
|
||||
'volumes_from',
|
||||
'runtime'
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ class Image(Model):
|
|||
Returns:
|
||||
(bool): ``True`` if successful
|
||||
"""
|
||||
self.client.api.tag(self.id, repository, tag=tag, **kwargs)
|
||||
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
|
||||
|
||||
|
||||
class ImageCollection(Collection):
|
||||
|
@ -126,9 +126,6 @@ class ImageCollection(Collection):
|
|||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
stream (bool): *Deprecated for API version > 1.8 (always True)*.
|
||||
Return a blocking generator you can iterate over to retrieve
|
||||
build output as it happens
|
||||
timeout (int): HTTP timeout
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
|
@ -147,10 +144,15 @@ class ImageCollection(Collection):
|
|||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``.
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB
|
||||
labels (dict): A dictionary of labels to set on the image
|
||||
cache_from (list): A list of images used for build cache
|
||||
resolution.
|
||||
resolution
|
||||
target (str): Name of the build-stage to build in a multi-stage
|
||||
Dockerfile
|
||||
network_mode (str): networking mode for the run commands during
|
||||
build
|
||||
|
||||
Returns:
|
||||
(:py:class:`Image`): The built image.
|
||||
|
@ -167,19 +169,20 @@ class ImageCollection(Collection):
|
|||
if isinstance(resp, six.string_types):
|
||||
return self.get(resp)
|
||||
last_event = None
|
||||
image_id = None
|
||||
for chunk in json_stream(resp):
|
||||
if 'error' in chunk:
|
||||
raise BuildError(chunk['error'])
|
||||
if 'stream' in chunk:
|
||||
match = re.search(
|
||||
r'(Successfully built |sha256:)([0-9a-f]+)',
|
||||
r'(^Successfully built |sha256:)([0-9a-f]+)$',
|
||||
chunk['stream']
|
||||
)
|
||||
if match:
|
||||
image_id = match.group(2)
|
||||
return self.get(image_id)
|
||||
last_event = chunk
|
||||
|
||||
if image_id:
|
||||
return self.get(image_id)
|
||||
raise BuildError(last_event or 'Unknown')
|
||||
|
||||
def get(self, name):
|
||||
|
|
|
@ -111,6 +111,8 @@ class NetworkCollection(Collection):
|
|||
labels (dict): Map of labels to set on the network. Default
|
||||
``None``.
|
||||
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
|
||||
ingress (bool): If set, create an ingress network which provides
|
||||
the routing-mesh in swarm mode.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Network`): The network that was created.
|
||||
|
|
|
@ -146,6 +146,7 @@ class ServiceCollection(Collection):
|
|||
of the service. Default: ``None``
|
||||
user (str): User to run commands as.
|
||||
workdir (str): Working directory for commands to run.
|
||||
tty (boolean): Whether a pseudo-TTY should be allocated.
|
||||
|
||||
Returns:
|
||||
(:py:class:`Service`) The created service.
|
||||
|
@ -212,6 +213,7 @@ CONTAINER_SPEC_KWARGS = [
|
|||
'mounts',
|
||||
'stop_grace_period',
|
||||
'secrets',
|
||||
'tty'
|
||||
]
|
||||
|
||||
# kwargs to copy straight over to TaskTemplate
|
||||
|
|
|
@ -69,6 +69,11 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
|
||||
|
||||
class NpipeAdapter(requests.adapters.HTTPAdapter):
|
||||
|
||||
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
|
||||
'pools',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS):
|
||||
self.npipe_path = base_url.replace('npipe://', '')
|
||||
|
|
|
@ -25,6 +25,10 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
|
|||
class SSLAdapter(HTTPAdapter):
|
||||
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
|
||||
'assert_hostname',
|
||||
'ssl_version']
|
||||
|
||||
def __init__(self, ssl_version=None, assert_hostname=None,
|
||||
assert_fingerprint=None, **kwargs):
|
||||
self.ssl_version = ssl_version
|
||||
|
|
|
@ -50,6 +50,11 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
|
||||
|
||||
class UnixAdapter(requests.adapters.HTTPAdapter):
|
||||
|
||||
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
|
||||
'socket_path',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, socket_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS):
|
||||
socket_path = socket_url.replace('http+unix://', '')
|
||||
|
|
|
@ -3,7 +3,7 @@ from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
|
|||
from .healthcheck import Healthcheck
|
||||
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
|
||||
from .services import (
|
||||
ContainerSpec, DriverConfig, EndpointSpec, Mount, Resources, RestartPolicy,
|
||||
SecretReference, ServiceMode, TaskTemplate, UpdateConfig
|
||||
ContainerSpec, DriverConfig, EndpointSpec, Mount, Placement, Resources,
|
||||
RestartPolicy, SecretReference, ServiceMode, TaskTemplate, UpdateConfig
|
||||
)
|
||||
from .swarm import SwarmSpec, SwarmExternalCA
|
||||
|
|
|
@ -120,7 +120,7 @@ class HostConfig(dict):
|
|||
isolation=None, auto_remove=False, storage_opt=None,
|
||||
init=None, init_path=None, volume_driver=None,
|
||||
cpu_count=None, cpu_percent=None, nano_cpus=None,
|
||||
cpuset_mems=None):
|
||||
cpuset_mems=None, runtime=None):
|
||||
|
||||
if mem_limit is not None:
|
||||
self['Memory'] = parse_bytes(mem_limit)
|
||||
|
@ -466,13 +466,18 @@ class HostConfig(dict):
|
|||
self['CpuPercent'] = cpu_percent
|
||||
|
||||
if nano_cpus:
|
||||
if not isinstance(nano_cpus, int):
|
||||
if not isinstance(nano_cpus, six.integer_types):
|
||||
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
|
||||
if version_lt(version, '1.25'):
|
||||
raise host_config_version_error('nano_cpus', '1.25')
|
||||
|
||||
self['NanoCpus'] = nano_cpus
|
||||
|
||||
if runtime:
|
||||
if version_lt(version, '1.25'):
|
||||
raise host_config_version_error('runtime', '1.25')
|
||||
self['Runtime'] = runtime
|
||||
|
||||
|
||||
def host_config_type_error(param, param_value, expected):
|
||||
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
|
||||
|
@ -499,7 +504,7 @@ class ContainerConfig(dict):
|
|||
working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
|
||||
host_config=None, mac_address=None, labels=None, volume_driver=None,
|
||||
stop_signal=None, networking_config=None, healthcheck=None,
|
||||
stop_timeout=None
|
||||
stop_timeout=None, runtime=None
|
||||
):
|
||||
if version_gte(version, '1.10'):
|
||||
message = ('{0!r} parameter has no effect on create_container().'
|
||||
|
@ -560,10 +565,17 @@ class ContainerConfig(dict):
|
|||
'stop_timeout was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if healthcheck is not None and version_lt(version, '1.24'):
|
||||
raise errors.InvalidVersion(
|
||||
'Health options were only introduced in API version 1.24'
|
||||
)
|
||||
if healthcheck is not None:
|
||||
if version_lt(version, '1.24'):
|
||||
raise errors.InvalidVersion(
|
||||
'Health options were only introduced in API version 1.24'
|
||||
)
|
||||
|
||||
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
|
||||
raise errors.InvalidVersion(
|
||||
'healthcheck start period was introduced in API '
|
||||
'version 1.29'
|
||||
)
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
command = split_command(command)
|
||||
|
@ -659,5 +671,6 @@ class ContainerConfig(dict):
|
|||
'VolumeDriver': volume_driver,
|
||||
'StopSignal': stop_signal,
|
||||
'Healthcheck': healthcheck,
|
||||
'StopTimeout': stop_timeout
|
||||
'StopTimeout': stop_timeout,
|
||||
'Runtime': runtime
|
||||
})
|
||||
|
|
|
@ -12,12 +12,14 @@ class Healthcheck(DictType):
|
|||
interval = kwargs.get('interval', kwargs.get('Interval'))
|
||||
timeout = kwargs.get('timeout', kwargs.get('Timeout'))
|
||||
retries = kwargs.get('retries', kwargs.get('Retries'))
|
||||
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
|
||||
|
||||
super(Healthcheck, self).__init__({
|
||||
'Test': test,
|
||||
'Interval': interval,
|
||||
'Timeout': timeout,
|
||||
'Retries': retries
|
||||
'Retries': retries,
|
||||
'StartPeriod': start_period
|
||||
})
|
||||
|
||||
@property
|
||||
|
@ -51,3 +53,11 @@ class Healthcheck(DictType):
|
|||
@retries.setter
|
||||
def retries(self, value):
|
||||
self['Retries'] = value
|
||||
|
||||
@property
|
||||
def start_period(self):
|
||||
return self['StartPeriod']
|
||||
|
||||
@start_period.setter
|
||||
def start_period(self, value):
|
||||
self['StartPeriod'] = value
|
||||
|
|
|
@ -20,7 +20,9 @@ class TaskTemplate(dict):
|
|||
individual container created as part of the service.
|
||||
restart_policy (RestartPolicy): Specification for the restart policy
|
||||
which applies to containers created as part of this service.
|
||||
placement (:py:class:`list`): A list of constraints.
|
||||
placement (Placement): Placement instructions for the scheduler.
|
||||
If a list is passed instead, it is assumed to be a list of
|
||||
constraints as part of a :py:class:`Placement` object.
|
||||
force_update (int): A counter that triggers an update even if no
|
||||
relevant parameters have been changed.
|
||||
"""
|
||||
|
@ -33,7 +35,7 @@ class TaskTemplate(dict):
|
|||
self['RestartPolicy'] = restart_policy
|
||||
if placement:
|
||||
if isinstance(placement, list):
|
||||
placement = {'Constraints': placement}
|
||||
placement = Placement(constraints=placement)
|
||||
self['Placement'] = placement
|
||||
if log_driver:
|
||||
self['LogDriver'] = log_driver
|
||||
|
@ -82,10 +84,11 @@ class ContainerSpec(dict):
|
|||
terminate before forcefully killing it.
|
||||
secrets (list of py:class:`SecretReference`): List of secrets to be
|
||||
made available inside the containers.
|
||||
tty (boolean): Whether a pseudo-TTY should be allocated.
|
||||
"""
|
||||
def __init__(self, image, command=None, args=None, hostname=None, env=None,
|
||||
workdir=None, user=None, labels=None, mounts=None,
|
||||
stop_grace_period=None, secrets=None):
|
||||
stop_grace_period=None, secrets=None, tty=None):
|
||||
self['Image'] = image
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
|
@ -123,6 +126,9 @@ class ContainerSpec(dict):
|
|||
raise TypeError('secrets must be a list')
|
||||
self['Secrets'] = secrets
|
||||
|
||||
if tty is not None:
|
||||
self['TTY'] = tty
|
||||
|
||||
|
||||
class Mount(dict):
|
||||
"""
|
||||
|
@ -441,7 +447,7 @@ class SecretReference(dict):
|
|||
gid (string): GID of the secret file's group. Default: 0
|
||||
mode (int): File access mode inside the container. Default: 0o444
|
||||
"""
|
||||
@check_resource
|
||||
@check_resource('secret_id')
|
||||
def __init__(self, secret_id, secret_name, filename=None, uid=None,
|
||||
gid=None, mode=0o444):
|
||||
self['SecretName'] = secret_name
|
||||
|
@ -452,3 +458,28 @@ class SecretReference(dict):
|
|||
'GID': gid or '0',
|
||||
'Mode': mode
|
||||
}
|
||||
|
||||
|
||||
class Placement(dict):
|
||||
"""
|
||||
Placement constraints to be used as part of a :py:class:`TaskTemplate`
|
||||
|
||||
Args:
|
||||
constraints (list): A list of constraints
|
||||
preferences (list): Preferences provide a way to make the
|
||||
scheduler aware of factors such as topology. They are provided
|
||||
in order from highest to lowest precedence.
|
||||
platforms (list): A list of platforms expressed as ``(arch, os)``
|
||||
tuples
|
||||
"""
|
||||
def __init__(self, constraints=None, preferences=None, platforms=None):
|
||||
if constraints is not None:
|
||||
self['Constraints'] = constraints
|
||||
if preferences is not None:
|
||||
self['Preferences'] = preferences
|
||||
if platforms:
|
||||
self['Platforms'] = []
|
||||
for plat in platforms:
|
||||
self['Platforms'].append({
|
||||
'Architecture': plat[0], 'OS': plat[1]
|
||||
})
|
||||
|
|
|
@ -4,22 +4,21 @@ from .. import errors
|
|||
from . import utils
|
||||
|
||||
|
||||
def check_resource(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, resource_id=None, *args, **kwargs):
|
||||
if resource_id is None:
|
||||
if kwargs.get('container'):
|
||||
resource_id = kwargs.pop('container')
|
||||
elif kwargs.get('image'):
|
||||
resource_id = kwargs.pop('image')
|
||||
if isinstance(resource_id, dict):
|
||||
resource_id = resource_id.get('Id', resource_id.get('ID'))
|
||||
if not resource_id:
|
||||
raise errors.NullResource(
|
||||
'Resource ID was not provided'
|
||||
)
|
||||
return f(self, resource_id, *args, **kwargs)
|
||||
return wrapped
|
||||
def check_resource(resource_name):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, resource_id=None, *args, **kwargs):
|
||||
if resource_id is None and kwargs.get(resource_name):
|
||||
resource_id = kwargs.pop(resource_name)
|
||||
if isinstance(resource_id, dict):
|
||||
resource_id = resource_id.get('Id', resource_id.get('ID'))
|
||||
if not resource_id:
|
||||
raise errors.NullResource(
|
||||
'Resource ID was not provided'
|
||||
)
|
||||
return f(self, resource_id, *args, **kwargs)
|
||||
return wrapped
|
||||
return decorator
|
||||
|
||||
|
||||
def minimum_version(version):
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
import re
|
||||
|
||||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
"((?P<host>[a-fA-F\d.:]+):)?" # Address
|
||||
"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
||||
def add_port_mapping(port_bindings, internal_port, external):
|
||||
if internal_port in port_bindings:
|
||||
|
@ -24,81 +37,41 @@ def build_port_bindings(ports):
|
|||
return port_bindings
|
||||
|
||||
|
||||
def to_port_range(port, randomly_available_port=False):
|
||||
if not port:
|
||||
return None
|
||||
|
||||
protocol = ""
|
||||
if "/" in port:
|
||||
parts = port.split("/")
|
||||
if len(parts) != 2:
|
||||
_raise_invalid_port(port)
|
||||
|
||||
port, protocol = parts
|
||||
protocol = "/" + protocol
|
||||
|
||||
if randomly_available_port:
|
||||
return ["%s%s" % (port, protocol)]
|
||||
|
||||
parts = str(port).split('-')
|
||||
|
||||
if len(parts) == 1:
|
||||
return ["%s%s" % (port, protocol)]
|
||||
|
||||
if len(parts) == 2:
|
||||
full_port_range = range(int(parts[0]), int(parts[1]) + 1)
|
||||
return ["%s%s" % (p, protocol) for p in full_port_range]
|
||||
|
||||
raise ValueError('Invalid port range "%s", should be '
|
||||
'port or startport-endport' % port)
|
||||
|
||||
|
||||
def _raise_invalid_port(port):
|
||||
raise ValueError('Invalid port "%s", should be '
|
||||
'[[remote_ip:]remote_port[-remote_port]:]'
|
||||
'port[/protocol]' % port)
|
||||
|
||||
|
||||
def port_range(start, end, proto, randomly_available_port=False):
|
||||
if not start:
|
||||
return start
|
||||
if not end:
|
||||
return [start + proto]
|
||||
if randomly_available_port:
|
||||
return ['{}-{}'.format(start, end) + proto]
|
||||
return [str(port) + proto for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
def split_port(port):
|
||||
parts = str(port).split(':')
|
||||
|
||||
if not 1 <= len(parts) <= 3:
|
||||
match = PORT_SPEC.match(port)
|
||||
if match is None:
|
||||
_raise_invalid_port(port)
|
||||
parts = match.groupdict()
|
||||
|
||||
if len(parts) == 1:
|
||||
internal_port, = parts
|
||||
if not internal_port:
|
||||
_raise_invalid_port(port)
|
||||
return to_port_range(internal_port), None
|
||||
if len(parts) == 2:
|
||||
external_port, internal_port = parts
|
||||
host = parts['host']
|
||||
proto = parts['proto'] or ''
|
||||
internal = port_range(parts['int'], parts['int_end'], proto)
|
||||
external = port_range(
|
||||
parts['ext'], parts['ext_end'], '', len(internal) == 1)
|
||||
|
||||
internal_range = to_port_range(internal_port)
|
||||
if internal_range is None:
|
||||
_raise_invalid_port(port)
|
||||
|
||||
external_range = to_port_range(external_port, len(internal_range) == 1)
|
||||
if external_range is None:
|
||||
_raise_invalid_port(port)
|
||||
|
||||
if len(internal_range) != len(external_range):
|
||||
if host is None:
|
||||
if external is not None and len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
|
||||
return internal_range, external_range
|
||||
|
||||
external_ip, external_port, internal_port = parts
|
||||
|
||||
if not internal_port:
|
||||
_raise_invalid_port(port)
|
||||
|
||||
internal_range = to_port_range(internal_port)
|
||||
external_range = to_port_range(external_port, len(internal_range) == 1)
|
||||
|
||||
if not external_range:
|
||||
external_range = [None] * len(internal_range)
|
||||
|
||||
if len(internal_range) != len(external_range):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
|
||||
return internal_range, [(external_ip, ex_port or None)
|
||||
for ex_port in external_range]
|
||||
return internal, external
|
||||
else:
|
||||
if not external:
|
||||
external = [None] * len(internal)
|
||||
elif len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
return internal, [(host, ext_port) for ext_port in external]
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
version = "2.3.0"
|
||||
version = "2.4.0"
|
||||
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
||||
|
|
|
@ -128,6 +128,7 @@ Configuration types
|
|||
.. autoclass:: DriverConfig
|
||||
.. autoclass:: EndpointSpec
|
||||
.. autoclass:: Mount
|
||||
.. autoclass:: Placement
|
||||
.. autoclass:: Resources
|
||||
.. autoclass:: RestartPolicy
|
||||
.. autoclass:: SecretReference
|
||||
|
|
|
@ -1,6 +1,44 @@
|
|||
Change log
|
||||
==========
|
||||
|
||||
2.4.0
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/33?closed=1)
|
||||
|
||||
### Features
|
||||
|
||||
* Added support for the `target` and `network_mode` parameters in
|
||||
`APIClient.build` and `DockerClient.images.build`.
|
||||
* Added support for the `runtime` parameter in `APIClient.create_container`
|
||||
and `DockerClient.containers.run`.
|
||||
* Added support for the `ingress` parameter in `APIClient.create_network` and
|
||||
`DockerClient.networks.create`.
|
||||
* Added support for `placement` configuration in `docker.types.TaskTemplate`.
|
||||
* Added support for `tty` configuration in `docker.types.ContainerSpec`.
|
||||
* Added support for `start_period` configuration in `docker.types.Healthcheck`.
|
||||
* The `credHelpers` section in Docker's configuration file is now recognized.
|
||||
* Port specifications including IPv6 endpoints are now supported.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
* Fixed a bug where instantiating a `DockerClient` using `docker.from_env`
|
||||
wouldn't correctly set the default timeout value.
|
||||
* Fixed a bug where `DockerClient.secrets` was not accessible as a property.
|
||||
* Fixed a bug where `DockerClient.build` would sometimes return the wrong
|
||||
image.
|
||||
* Fixed a bug where values for `HostConfig.nano_cpus` exceeding 2^32 would
|
||||
raise a type error.
|
||||
* `Image.tag` now properly returns `True` when the operation is successful.
|
||||
* `APIClient.logs` and `Container.logs` now raise an exception if the `since`
|
||||
argument uses an unsupported type instead of ignoring the value.
|
||||
* Fixed a bug where some methods would raise a `NullResource` exception when
|
||||
the resource ID was provided using a keyword argument.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
* `APIClient` instances can now be pickled.
|
||||
|
||||
2.3.0
|
||||
-----
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -20,7 +20,7 @@ ROOT_DIR = os.path.dirname(__file__)
|
|||
SOURCE_DIR = os.path.join(ROOT_DIR)
|
||||
|
||||
requirements = [
|
||||
'requests >= 2.5.2, != 2.11.0, != 2.12.2',
|
||||
'requests >= 2.5.2, != 2.11.0, != 2.12.2, != 2.18.0',
|
||||
'six >= 1.4.0',
|
||||
'websocket-client >= 0.32.0',
|
||||
'docker-pycreds >= 0.2.1'
|
||||
|
|
|
@ -5,6 +5,7 @@ import tempfile
|
|||
|
||||
from docker import errors
|
||||
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from .base import BaseAPIIntegrationTest
|
||||
|
@ -189,6 +190,60 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
counter += 1
|
||||
assert counter == 0
|
||||
|
||||
@requires_api_version('1.29')
|
||||
def test_build_container_with_target(self):
|
||||
script = io.BytesIO('\n'.join([
|
||||
'FROM busybox as first',
|
||||
'RUN mkdir -p /tmp/test',
|
||||
'RUN touch /tmp/silence.tar.gz',
|
||||
'FROM alpine:latest',
|
||||
'WORKDIR /root/'
|
||||
'COPY --from=first /tmp/silence.tar.gz .',
|
||||
'ONBUILD RUN echo "This should not be in the final image"'
|
||||
]).encode('ascii'))
|
||||
|
||||
stream = self.client.build(
|
||||
fileobj=script, target='first', tag='build1'
|
||||
)
|
||||
self.tmp_imgs.append('build1')
|
||||
for chunk in stream:
|
||||
pass
|
||||
|
||||
info = self.client.inspect_image('build1')
|
||||
self.assertEqual(info['Config']['OnBuild'], [])
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_build_with_network_mode(self):
|
||||
script = io.BytesIO('\n'.join([
|
||||
'FROM busybox',
|
||||
'RUN wget http://google.com'
|
||||
]).encode('ascii'))
|
||||
|
||||
stream = self.client.build(
|
||||
fileobj=script, network_mode='bridge',
|
||||
tag='dockerpytest_bridgebuild'
|
||||
)
|
||||
|
||||
self.tmp_imgs.append('dockerpytest_bridgebuild')
|
||||
for chunk in stream:
|
||||
pass
|
||||
|
||||
assert self.client.inspect_image('dockerpytest_bridgebuild')
|
||||
|
||||
script.seek(0)
|
||||
stream = self.client.build(
|
||||
fileobj=script, network_mode='none',
|
||||
tag='dockerpytest_nonebuild', nocache=True, decode=True
|
||||
)
|
||||
|
||||
self.tmp_imgs.append('dockerpytest_nonebuild')
|
||||
logs = [chunk for chunk in stream]
|
||||
assert 'errorDetail' in logs[-1]
|
||||
assert logs[-1]['errorDetail']['code'] == 1
|
||||
|
||||
with pytest.raises(errors.NotFound):
|
||||
self.client.inspect_image('dockerpytest_nonebuild')
|
||||
|
||||
def test_build_stderr_data(self):
|
||||
control_chars = ['\x1b[91m', '\x1b[0m']
|
||||
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
|
||||
|
|
|
@ -1139,7 +1139,9 @@ class PauseTest(BaseAPIIntegrationTest):
|
|||
class PruneTest(BaseAPIIntegrationTest):
|
||||
@requires_api_version('1.25')
|
||||
def test_prune_containers(self):
|
||||
container1 = self.client.create_container(BUSYBOX, ['echo', 'hello'])
|
||||
container1 = self.client.create_container(
|
||||
BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
|
||||
)
|
||||
container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
self.client.start(container1)
|
||||
self.client.start(container2)
|
||||
|
@ -1255,6 +1257,15 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
|
|||
inspect_data = self.client.inspect_container(container)
|
||||
self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_create_with_runtime(self):
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, ['echo', 'test'], runtime='runc'
|
||||
)
|
||||
self.tmp_containers.append(container['Id'])
|
||||
config = self.client.inspect_container(container)
|
||||
assert config['HostConfig']['Runtime'] == 'runc'
|
||||
|
||||
|
||||
class LinkTest(BaseAPIIntegrationTest):
|
||||
def test_remove_link(self):
|
||||
|
|
|
@ -28,8 +28,8 @@ class HealthcheckTest(BaseAPIIntegrationTest):
|
|||
container = self.client.create_container(
|
||||
BUSYBOX, 'top', healthcheck=dict(
|
||||
test="true",
|
||||
interval=1*SECOND,
|
||||
timeout=1*SECOND,
|
||||
interval=1 * SECOND,
|
||||
timeout=1 * SECOND,
|
||||
retries=1,
|
||||
))
|
||||
self.tmp_containers.append(container)
|
||||
|
@ -41,10 +41,27 @@ class HealthcheckTest(BaseAPIIntegrationTest):
|
|||
container = self.client.create_container(
|
||||
BUSYBOX, 'top', healthcheck=dict(
|
||||
test="false",
|
||||
interval=1*SECOND,
|
||||
timeout=1*SECOND,
|
||||
interval=1 * SECOND,
|
||||
timeout=1 * SECOND,
|
||||
retries=1,
|
||||
))
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
wait_on_health_status(self.client, container, "unhealthy")
|
||||
|
||||
@helpers.requires_api_version('1.29')
|
||||
def test_healthcheck_start_period(self):
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'top', healthcheck=dict(
|
||||
test="echo 'x' >> /counter.txt && "
|
||||
"test `cat /counter.txt | wc -l` -ge 3",
|
||||
interval=1 * SECOND,
|
||||
timeout=1 * SECOND,
|
||||
retries=1,
|
||||
start_period=3 * SECOND
|
||||
)
|
||||
)
|
||||
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
wait_on_health_status(self.client, container, "healthy")
|
||||
|
|
|
@ -42,7 +42,7 @@ class PullImageTest(BaseAPIIntegrationTest):
|
|||
self.client.remove_image('hello-world')
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
res = self.client.pull('hello-world')
|
||||
res = self.client.pull('hello-world', tag='latest')
|
||||
self.tmp_imgs.append('hello-world')
|
||||
self.assertEqual(type(res), six.text_type)
|
||||
self.assertGreaterEqual(
|
||||
|
@ -56,7 +56,8 @@ class PullImageTest(BaseAPIIntegrationTest):
|
|||
self.client.remove_image('hello-world')
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
stream = self.client.pull('hello-world', stream=True, decode=True)
|
||||
stream = self.client.pull(
|
||||
'hello-world', tag='latest', stream=True, decode=True)
|
||||
self.tmp_imgs.append('hello-world')
|
||||
for chunk in stream:
|
||||
assert isinstance(chunk, dict)
|
||||
|
@ -300,7 +301,7 @@ class PruneImagesTest(BaseAPIIntegrationTest):
|
|||
ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
self.tmp_containers.append(ctnr)
|
||||
|
||||
self.client.pull('hello-world')
|
||||
self.client.pull('hello-world', tag='latest')
|
||||
self.tmp_imgs.append('hello-world')
|
||||
img_id = self.client.inspect_image('hello-world')['Id']
|
||||
result = self.client.prune_images()
|
||||
|
|
|
@ -452,6 +452,14 @@ class TestNetworks(BaseAPIIntegrationTest):
|
|||
net = self.client.inspect_network(net_id)
|
||||
assert net['Attachable'] is True
|
||||
|
||||
@requires_api_version('1.29')
|
||||
def test_create_network_ingress(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
self.client.remove_network('ingress')
|
||||
_, net_id = self.create_network(driver='overlay', ingress=True)
|
||||
net = self.client.inspect_network(net_id)
|
||||
assert net['Ingress'] is True
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_prune_networks(self):
|
||||
net_name, _ = self.create_network()
|
||||
|
|
|
@ -270,6 +270,49 @@ class ServiceTest(BaseAPIIntegrationTest):
|
|||
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
|
||||
{'Constraints': ['node.id=={}'.format(node_id)]})
|
||||
|
||||
def test_create_service_with_placement_object(self):
|
||||
node_id = self.client.nodes()[0]['ID']
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
placemt = docker.types.Placement(
|
||||
constraints=['node.id=={}'.format(node_id)]
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, placement=placemt
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'Placement' in svc_info['Spec']['TaskTemplate']
|
||||
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
|
||||
|
||||
@requires_api_version('1.30')
|
||||
def test_create_service_with_placement_platform(self):
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, placement=placemt
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'Placement' in svc_info['Spec']['TaskTemplate']
|
||||
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
|
||||
|
||||
@requires_api_version('1.27')
|
||||
def test_create_service_with_placement_preferences(self):
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
placemt = docker.types.Placement(preferences=[
|
||||
{'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
|
||||
])
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, placement=placemt
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'Placement' in svc_info['Spec']['TaskTemplate']
|
||||
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
|
||||
|
||||
def test_create_service_with_endpoint_spec(self):
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
|
@ -316,6 +359,23 @@ class ServiceTest(BaseAPIIntegrationTest):
|
|||
assert 'Env' in con_spec
|
||||
assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_create_service_with_tty(self):
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
BUSYBOX, ['true'], tty=True
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec,
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'TaskTemplate' in svc_info['Spec']
|
||||
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
|
||||
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
|
||||
assert 'TTY' in con_spec
|
||||
assert con_spec['TTY'] is True
|
||||
|
||||
def test_create_service_global_mode(self):
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
BUSYBOX, ['echo', 'hello']
|
||||
|
|
|
@ -173,4 +173,4 @@ class SwarmTest(BaseAPIIntegrationTest):
|
|||
with pytest.raises(docker.errors.APIError) as e:
|
||||
self.client.remove_node(node_id, True)
|
||||
|
||||
assert e.value.response.status_code == 500
|
||||
assert e.value.response.status_code >= 400
|
||||
|
|
|
@ -39,6 +39,17 @@ class ImageCollectionTest(BaseIntegrationTest):
|
|||
self.tmp_imgs.append(image.id)
|
||||
assert client.containers.run(image) == b"hello world\n"
|
||||
|
||||
def test_build_with_success_build_output(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
image = client.images.build(
|
||||
tag='dup-txt-tag', fileobj=io.BytesIO(
|
||||
"FROM alpine\n"
|
||||
"CMD echo Successfully built abcd1234".encode('ascii')
|
||||
)
|
||||
)
|
||||
self.tmp_imgs.append(image.id)
|
||||
assert client.containers.run(image) == b"Successfully built abcd1234\n"
|
||||
|
||||
def test_list(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
image = client.images.pull('alpine:latest')
|
||||
|
@ -71,7 +82,8 @@ class ImageTest(BaseIntegrationTest):
|
|||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
image = client.images.pull('alpine:latest')
|
||||
|
||||
image.tag(repo, tag)
|
||||
result = image.tag(repo, tag)
|
||||
assert result is True
|
||||
self.tmp_imgs.append(identifier)
|
||||
assert image.id in get_ids(client.images.list(repo))
|
||||
assert image.id in get_ids(client.images.list(identifier))
|
||||
|
|
|
@ -1421,6 +1421,13 @@ class ContainerTest(BaseAPIClientTest):
|
|||
stream=False
|
||||
)
|
||||
|
||||
def test_log_since_with_invalid_value_raises_error(self):
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
fake_inspect_container):
|
||||
with self.assertRaises(docker.errors.InvalidArgument):
|
||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||
follow=False, since=42.42)
|
||||
|
||||
def test_log_tty(self):
|
||||
m = mock.Mock()
|
||||
with mock.patch('docker.api.client.APIClient.inspect_container',
|
||||
|
|
|
@ -272,6 +272,57 @@ class ResolveAuthTest(unittest.TestCase):
|
|||
)
|
||||
|
||||
|
||||
class CredStoreTest(unittest.TestCase):
|
||||
def test_get_credential_store(self):
|
||||
auth_config = {
|
||||
'credHelpers': {
|
||||
'registry1.io': 'truesecret',
|
||||
'registry2.io': 'powerlock'
|
||||
},
|
||||
'credsStore': 'blackbox',
|
||||
}
|
||||
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'registry1.io'
|
||||
) == 'truesecret'
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'registry2.io'
|
||||
) == 'powerlock'
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'registry3.io'
|
||||
) == 'blackbox'
|
||||
|
||||
def test_get_credential_store_no_default(self):
|
||||
auth_config = {
|
||||
'credHelpers': {
|
||||
'registry1.io': 'truesecret',
|
||||
'registry2.io': 'powerlock'
|
||||
},
|
||||
}
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'registry2.io'
|
||||
) == 'powerlock'
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'registry3.io'
|
||||
) is None
|
||||
|
||||
def test_get_credential_store_default_index(self):
|
||||
auth_config = {
|
||||
'credHelpers': {
|
||||
'https://index.docker.io/v1/': 'powerlock'
|
||||
},
|
||||
'credsStore': 'truesecret'
|
||||
}
|
||||
|
||||
assert auth.get_credential_store(auth_config, None) == 'powerlock'
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'docker.io'
|
||||
) == 'powerlock'
|
||||
assert auth.get_credential_store(
|
||||
auth_config, 'images.io'
|
||||
) == 'truesecret'
|
||||
|
||||
|
||||
class FindConfigFileTest(unittest.TestCase):
|
||||
def tmpdir(self, name):
|
||||
tmpdir = ensuretemp(name)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
import datetime
|
||||
import docker
|
||||
from docker.utils import kwargs_from_env
|
||||
from docker.constants import (
|
||||
DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
|
||||
)
|
||||
import os
|
||||
import unittest
|
||||
|
||||
|
@ -96,3 +99,13 @@ class FromEnvTest(unittest.TestCase):
|
|||
client = docker.from_env(version='2.32')
|
||||
self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
|
||||
self.assertEqual(client.api._version, '2.32')
|
||||
|
||||
def test_from_env_without_version_uses_default(self):
|
||||
client = docker.from_env()
|
||||
|
||||
self.assertEqual(client.api._version, DEFAULT_DOCKER_API_VERSION)
|
||||
|
||||
def test_from_env_without_timeout_uses_default(self):
|
||||
client = docker.from_env()
|
||||
|
||||
self.assertEqual(client.api.timeout, DEFAULT_TIMEOUT_SECONDS)
|
||||
|
|
|
@ -552,6 +552,12 @@ class PortsTest(unittest.TestCase):
|
|||
self.assertEqual(external_port,
|
||||
[("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
|
||||
|
||||
def test_split_port_with_ipv6_address(self):
|
||||
internal_port, external_port = split_port(
|
||||
"2001:abcd:ef00::2:1000:2000")
|
||||
self.assertEqual(internal_port, ["2000"])
|
||||
self.assertEqual(external_port, [("2001:abcd:ef00::2", "1000")])
|
||||
|
||||
def test_split_port_invalid(self):
|
||||
self.assertRaises(ValueError,
|
||||
lambda: split_port("0.0.0.0:1000:2000:tcp"))
|
||||
|
|
Loading…
Reference in New Issue