mirror of https://github.com/docker/docker-py.git
commit
5d42ab81c3
|
|
@ -4,8 +4,6 @@ matrix:
|
|||
include:
|
||||
- python: 2.7
|
||||
env: TOXENV=py27
|
||||
- python: 3.4
|
||||
env: TOXENV=py34
|
||||
- python: 3.5
|
||||
env: TOXENV=py35
|
||||
- python: 3.6
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ def buildImages = { ->
|
|||
imageNamePy2 = "${imageNameBase}:py2-${gitCommit()}"
|
||||
imageNamePy3 = "${imageNameBase}:py3-${gitCommit()}"
|
||||
|
||||
buildImage(imageNamePy2, ".", "py2.7")
|
||||
buildImage(imageNamePy3, "-f Dockerfile-py3 .", "py3.6")
|
||||
buildImage(imageNamePy2, "-f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 .", "py2.7")
|
||||
buildImage(imageNamePy3, "-f tests/Dockerfile --build-arg PYTHON_VERSION=3.6 .", "py3.6")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,3 +6,4 @@ include LICENSE
|
|||
recursive-include tests *.py
|
||||
recursive-include tests/unit/testdata *
|
||||
recursive-include tests/integration/testdata *
|
||||
recursive-include tests/gpg-keys *
|
||||
|
|
|
|||
8
Makefile
8
Makefile
|
|
@ -8,11 +8,11 @@ clean:
|
|||
|
||||
.PHONY: build
|
||||
build:
|
||||
docker build -t docker-sdk-python .
|
||||
docker build -t docker-sdk-python -f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 .
|
||||
|
||||
.PHONY: build-py3
|
||||
build-py3:
|
||||
docker build -t docker-sdk-python3 -f Dockerfile-py3 .
|
||||
docker build -t docker-sdk-python3 -f tests/Dockerfile .
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
|
|
@ -39,10 +39,10 @@ integration-test: build
|
|||
|
||||
.PHONY: integration-test-py3
|
||||
integration-test-py3: build-py3
|
||||
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test tests/integration/${file}
|
||||
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file}
|
||||
|
||||
TEST_API_VERSION ?= 1.35
|
||||
TEST_ENGINE_VERSION ?= 17.12.0-ce
|
||||
TEST_ENGINE_VERSION ?= 18.09.5
|
||||
|
||||
.PHONY: setup-network
|
||||
setup-network:
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class BuildApiMixin(object):
|
|||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None, cache_from=None, target=None, network_mode=None,
|
||||
squash=None, extra_hosts=None, platform=None, isolation=None,
|
||||
use_config_proxy=False):
|
||||
use_config_proxy=True):
|
||||
"""
|
||||
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
|
||||
needs to be set. ``path`` can be a local path (to a directory
|
||||
|
|
@ -121,6 +121,7 @@ class BuildApiMixin(object):
|
|||
remote = context = None
|
||||
headers = {}
|
||||
container_limits = container_limits or {}
|
||||
buildargs = buildargs or {}
|
||||
if path is None and fileobj is None:
|
||||
raise TypeError("Either path or fileobj needs to be provided.")
|
||||
if gzip and encoding is not None:
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ class APIClient(
|
|||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.30``
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
import six
|
||||
from datetime import datetime
|
||||
|
||||
import six
|
||||
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
from ..types import (
|
||||
CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
|
||||
NetworkingConfig
|
||||
)
|
||||
from ..types import CancellableStream
|
||||
from ..types import ContainerConfig
|
||||
from ..types import EndpointConfig
|
||||
from ..types import HostConfig
|
||||
from ..types import NetworkingConfig
|
||||
|
||||
|
||||
class ContainerApiMixin(object):
|
||||
|
|
@ -222,7 +224,7 @@ class ContainerApiMixin(object):
|
|||
mac_address=None, labels=None, stop_signal=None,
|
||||
networking_config=None, healthcheck=None,
|
||||
stop_timeout=None, runtime=None,
|
||||
use_config_proxy=False):
|
||||
use_config_proxy=True):
|
||||
"""
|
||||
Creates a container. Parameters are similar to those for the ``docker
|
||||
run`` command except it doesn't support the attach options (``-a``).
|
||||
|
|
@ -414,7 +416,7 @@ class ContainerApiMixin(object):
|
|||
if use_config_proxy:
|
||||
environment = self._proxy_configs.inject_proxy_environment(
|
||||
environment
|
||||
)
|
||||
) or None
|
||||
|
||||
config = self.create_container_config(
|
||||
image, command, hostname, user, detach, stdin_open, tty,
|
||||
|
|
@ -487,7 +489,6 @@ class ContainerApiMixin(object):
|
|||
IDs that the container process will run as.
|
||||
init (bool): Run an init inside the container that forwards
|
||||
signals and reaps processes
|
||||
init_path (str): Path to the docker-init binary
|
||||
ipc_mode (str): Set the IPC mode for the container.
|
||||
isolation (str): Isolation technology to use. Default: ``None``.
|
||||
links (dict): Mapping of links using the
|
||||
|
|
@ -512,7 +513,7 @@ class ContainerApiMixin(object):
|
|||
network_mode (str): One of:
|
||||
|
||||
- ``bridge`` Create a new network stack for the container on
|
||||
on the bridge network.
|
||||
the bridge network.
|
||||
- ``none`` No networking for this container.
|
||||
- ``container:<name|id>`` Reuse another container's network
|
||||
stack.
|
||||
|
|
@ -915,9 +916,10 @@ class ContainerApiMixin(object):
|
|||
if '/' in private_port:
|
||||
return port_settings.get(private_port)
|
||||
|
||||
h_ports = port_settings.get(private_port + '/tcp')
|
||||
if h_ports is None:
|
||||
h_ports = port_settings.get(private_port + '/udp')
|
||||
for protocol in ['tcp', 'udp', 'sctp']:
|
||||
h_ports = port_settings.get(private_port + '/' + protocol)
|
||||
if h_ports:
|
||||
break
|
||||
|
||||
return h_ports
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,8 @@ class ExecApiMixin(object):
|
|||
(generator or str or tuple): If ``stream=True``, a generator
|
||||
yielding response chunks. If ``socket=True``, a socket object for
|
||||
the connection. A string containing response data otherwise. If
|
||||
``demux=True``, stdout and stderr are separated.
|
||||
``demux=True``, a tuple with two elements of type byte: stdout and
|
||||
stderr.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
|
|
|||
|
|
@ -247,12 +247,15 @@ class ImageApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.30')
|
||||
@utils.check_resource('image')
|
||||
def inspect_distribution(self, image):
|
||||
def inspect_distribution(self, image, auth_config=None):
|
||||
"""
|
||||
Get image digest and platform information by contacting the registry.
|
||||
|
||||
Args:
|
||||
image (str): The image name to inspect
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(dict): A dict containing distribution data
|
||||
|
|
@ -261,9 +264,21 @@ class ImageApiMixin(object):
|
|||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
registry, _ = auth.resolve_repository_name(image)
|
||||
|
||||
headers = {}
|
||||
if auth_config is None:
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
||||
url = self._url("/distribution/{0}/json", image)
|
||||
|
||||
return self._result(
|
||||
self._get(self._url("/distribution/{0}/json", image)), True
|
||||
self._get(url, headers=headers), True
|
||||
)
|
||||
|
||||
def load_image(self, data, quiet=None):
|
||||
|
|
@ -336,10 +351,9 @@ class ImageApiMixin(object):
|
|||
tag (str): The tag to pull
|
||||
stream (bool): Stream the output as a generator. Make sure to
|
||||
consume the generator, otherwise pull might get cancelled.
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
decode (bool): Decode the JSON data from the server into dicts.
|
||||
Only applies with ``stream=True``
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
|
|
@ -414,10 +428,9 @@ class ImageApiMixin(object):
|
|||
repository (str): The repository to push to
|
||||
tag (str): An optional tag to push
|
||||
stream (bool): Stream the output as a blocking generator
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
decode (bool): Decode the JSON data from the server into dicts.
|
||||
Only applies with ``stream=True``
|
||||
|
||||
|
|
|
|||
|
|
@ -88,6 +88,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
|
|||
if container_spec.get('Isolation') is not None:
|
||||
raise_version_error('ContainerSpec.isolation', '1.35')
|
||||
|
||||
if utils.version_lt(version, '1.38'):
|
||||
if container_spec.get('Init') is not None:
|
||||
raise_version_error('ContainerSpec.init', '1.38')
|
||||
|
||||
if task_template.get('Resources'):
|
||||
if utils.version_lt(version, '1.32'):
|
||||
if task_template['Resources'].get('GenericResources'):
|
||||
|
|
@ -387,7 +391,7 @@ class ServiceApiMixin(object):
|
|||
current specification of the service. Default: ``False``
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
A dictionary containing a ``Warnings`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
|
@ -471,5 +475,4 @@ class ServiceApiMixin(object):
|
|||
resp = self._post_json(
|
||||
url, data=data, params={'version': version}, headers=headers
|
||||
)
|
||||
self._raise_for_status(resp)
|
||||
return True
|
||||
return self._result(resp, json=True)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import logging
|
||||
from six.moves import http_client
|
||||
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
|
||||
from .. import errors
|
||||
from .. import types
|
||||
from .. import utils
|
||||
|
|
@ -82,7 +83,9 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None):
|
||||
force_new_cluster=False, swarm_spec=None,
|
||||
default_addr_pool=None, subnet_size=None,
|
||||
data_path_addr=None):
|
||||
"""
|
||||
Initialize a new Swarm using the current connected engine as the first
|
||||
node.
|
||||
|
|
@ -107,9 +110,17 @@ class SwarmApiMixin(object):
|
|||
swarm_spec (dict): Configuration settings of the new Swarm. Use
|
||||
``APIClient.create_swarm_spec`` to generate a valid
|
||||
configuration. Default: None
|
||||
default_addr_pool (list of strings): Default Address Pool specifies
|
||||
default subnet pools for global scope networks. Each pool
|
||||
should be specified as a CIDR block, like '10.0.0.0/8'.
|
||||
Default: None
|
||||
subnet_size (int): SubnetSize specifies the subnet size of the
|
||||
networks created from the default subnet pool. Default: None
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
(str): The ID of the created node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
|
@ -119,15 +130,44 @@ class SwarmApiMixin(object):
|
|||
url = self._url('/swarm/init')
|
||||
if swarm_spec is not None and not isinstance(swarm_spec, dict):
|
||||
raise TypeError('swarm_spec must be a dictionary')
|
||||
|
||||
if default_addr_pool is not None:
|
||||
if utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'Address pool is only available for API version >= 1.39'
|
||||
)
|
||||
# subnet_size becomes 0 if not set with default_addr_pool
|
||||
if subnet_size is None:
|
||||
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
|
||||
|
||||
if subnet_size is not None:
|
||||
if utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'Subnet size is only available for API version >= 1.39'
|
||||
)
|
||||
# subnet_size is ignored if set without default_addr_pool
|
||||
if default_addr_pool is None:
|
||||
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
|
||||
|
||||
data = {
|
||||
'AdvertiseAddr': advertise_addr,
|
||||
'ListenAddr': listen_addr,
|
||||
'DefaultAddrPool': default_addr_pool,
|
||||
'SubnetSize': subnet_size,
|
||||
'ForceNewCluster': force_new_cluster,
|
||||
'Spec': swarm_spec,
|
||||
}
|
||||
|
||||
if data_path_addr is not None:
|
||||
if utils.version_lt(self._version, '1.30'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data address path is only available for '
|
||||
'API version >= 1.30'
|
||||
)
|
||||
data['DataPathAddr'] = data_path_addr
|
||||
|
||||
response = self._post_json(url, data=data)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
return self._result(response, json=True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_swarm(self):
|
||||
|
|
@ -165,7 +205,7 @@ class SwarmApiMixin(object):
|
|||
|
||||
@utils.minimum_version('1.24')
|
||||
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
|
||||
advertise_addr=None):
|
||||
advertise_addr=None, data_path_addr=None):
|
||||
"""
|
||||
Make this Engine join a swarm that has already been created.
|
||||
|
||||
|
|
@ -176,7 +216,7 @@ class SwarmApiMixin(object):
|
|||
listen_addr (string): Listen address used for inter-manager
|
||||
communication if the node gets promoted to manager, as well as
|
||||
determining the networking interface used for the VXLAN Tunnel
|
||||
Endpoint (VTEP). Default: ``None``
|
||||
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
|
||||
advertise_addr (string): Externally reachable address advertised
|
||||
to other nodes. This can either be an address/port combination
|
||||
in the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
|
|
@ -184,6 +224,8 @@ class SwarmApiMixin(object):
|
|||
the port number from the listen address is used. If
|
||||
AdvertiseAddr is not specified, it will be automatically
|
||||
detected when possible. Default: ``None``
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
|
@ -193,11 +235,20 @@ class SwarmApiMixin(object):
|
|||
If the server returns an error.
|
||||
"""
|
||||
data = {
|
||||
"RemoteAddrs": remote_addrs,
|
||||
"ListenAddr": listen_addr,
|
||||
"JoinToken": join_token,
|
||||
"AdvertiseAddr": advertise_addr,
|
||||
'RemoteAddrs': remote_addrs,
|
||||
'ListenAddr': listen_addr,
|
||||
'JoinToken': join_token,
|
||||
'AdvertiseAddr': advertise_addr,
|
||||
}
|
||||
|
||||
if data_path_addr is not None:
|
||||
if utils.version_lt(self._version, '1.30'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data address path is only available for '
|
||||
'API version >= 1.30'
|
||||
)
|
||||
data['DataPathAddr'] = data_path_addr
|
||||
|
||||
url = self._url('/swarm/join')
|
||||
response = self._post_json(url, data=data)
|
||||
self._raise_for_status(response)
|
||||
|
|
@ -355,8 +406,10 @@ class SwarmApiMixin(object):
|
|||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
|
||||
rotate_manager_token=False):
|
||||
def update_swarm(self, version, swarm_spec=None,
|
||||
rotate_worker_token=False,
|
||||
rotate_manager_token=False,
|
||||
rotate_manager_unlock_key=False):
|
||||
"""
|
||||
Update the Swarm's configuration
|
||||
|
||||
|
|
@ -370,6 +423,8 @@ class SwarmApiMixin(object):
|
|||
``False``.
|
||||
rotate_manager_token (bool): Rotate the manager join token.
|
||||
Default: ``False``.
|
||||
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
|
||||
Default: ``False``.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
|
@ -378,12 +433,20 @@ class SwarmApiMixin(object):
|
|||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/swarm/update')
|
||||
response = self._post_json(url, data=swarm_spec, params={
|
||||
params = {
|
||||
'rotateWorkerToken': rotate_worker_token,
|
||||
'rotateManagerToken': rotate_manager_token,
|
||||
'version': version
|
||||
})
|
||||
}
|
||||
if rotate_manager_unlock_key:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'Rotate manager unlock key '
|
||||
'is only available for API version >= 1.25'
|
||||
)
|
||||
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
|
||||
|
||||
response = self._post_json(url, data=swarm_spec, params=params)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ import base64
|
|||
import json
|
||||
import logging
|
||||
|
||||
import dockerpycreds
|
||||
import six
|
||||
|
||||
from . import credentials
|
||||
from . import errors
|
||||
from .utils import config
|
||||
|
||||
|
|
@ -273,17 +273,17 @@ class AuthConfig(dict):
|
|||
'Password': data['Secret'],
|
||||
})
|
||||
return res
|
||||
except dockerpycreds.CredentialsNotFound:
|
||||
except credentials.CredentialsNotFound:
|
||||
log.debug('No entry found')
|
||||
return None
|
||||
except dockerpycreds.StoreError as e:
|
||||
except credentials.StoreError as e:
|
||||
raise errors.DockerException(
|
||||
'Credentials store error: {0}'.format(repr(e))
|
||||
)
|
||||
|
||||
def _get_store_instance(self, name):
|
||||
if name not in self._stores:
|
||||
self._stores[name] = dockerpycreds.Store(
|
||||
self._stores[name] = credentials.Store(
|
||||
name, environment=self._credstore_env
|
||||
)
|
||||
return self._stores[name]
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class DockerClient(object):
|
|||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.30``
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
|
|
@ -62,7 +62,7 @@ class DockerClient(object):
|
|||
|
||||
Args:
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.30``
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
|
|
|||
|
|
@ -25,3 +25,6 @@ DEFAULT_NUM_POOLS = 25
|
|||
DEFAULT_NUM_POOLS_SSH = 9
|
||||
|
||||
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
||||
|
||||
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
|
||||
DEFAULT_SWARM_SUBNET_SIZE = 24
|
||||
|
|
|
|||
|
|
@ -0,0 +1,4 @@
|
|||
# flake8: noqa
|
||||
from .store import Store
|
||||
from .errors import StoreError, CredentialsNotFound
|
||||
from .constants import *
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
PROGRAM_PREFIX = 'docker-credential-'
|
||||
DEFAULT_LINUX_STORE = 'secretservice'
|
||||
DEFAULT_OSX_STORE = 'osxkeychain'
|
||||
DEFAULT_WIN32_STORE = 'wincred'
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
class StoreError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialsNotFound(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
class InitializationError(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
def process_store_error(cpe, program):
|
||||
message = cpe.output.decode('utf-8')
|
||||
if 'credentials not found in native keychain' in message:
|
||||
return CredentialsNotFound(
|
||||
'No matching credentials in {}'.format(
|
||||
program
|
||||
)
|
||||
)
|
||||
return StoreError(
|
||||
'Credentials store {} exited with "{}".'.format(
|
||||
program, cpe.output.decode('utf-8').strip()
|
||||
)
|
||||
)
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
import json
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import six
|
||||
|
||||
from . import constants
|
||||
from . import errors
|
||||
from .utils import create_environment_dict
|
||||
from .utils import find_executable
|
||||
|
||||
|
||||
class Store(object):
|
||||
def __init__(self, program, environment=None):
|
||||
""" Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
'{} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
|
||||
def get(self, server):
|
||||
""" Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, six.binary_type):
|
||||
server = server.encode('utf-8')
|
||||
data = self._execute('get', server)
|
||||
result = json.loads(data.decode('utf-8'))
|
||||
|
||||
# docker-credential-pass will return an object for inexistent servers
|
||||
# whereas other helpers will exit with returncode != 0. For
|
||||
# consistency, if no significant data is returned,
|
||||
# raise CredentialsNotFound
|
||||
if result['Username'] == '' and result['Secret'] == '':
|
||||
raise errors.CredentialsNotFound(
|
||||
'No matching credentials in {}'.format(self.program)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def store(self, server, username, secret):
|
||||
""" Store credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
data_input = json.dumps({
|
||||
'ServerURL': server,
|
||||
'Username': username,
|
||||
'Secret': secret
|
||||
}).encode('utf-8')
|
||||
return self._execute('store', data_input)
|
||||
|
||||
def erase(self, server):
|
||||
""" Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, six.binary_type):
|
||||
server = server.encode('utf-8')
|
||||
self._execute('erase', server)
|
||||
|
||||
def list(self):
|
||||
""" List stored credentials. Requires v0.4.0+ of the helper.
|
||||
"""
|
||||
data = self._execute('list', None)
|
||||
return json.loads(data.decode('utf-8'))
|
||||
|
||||
def _execute(self, subcmd, data_input):
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
if six.PY3:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd], input=data_input, env=env,
|
||||
)
|
||||
else:
|
||||
process = subprocess.Popen(
|
||||
[self.exe, subcmd], stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, env=env,
|
||||
)
|
||||
output, err = process.communicate(data_input)
|
||||
if process.returncode != 0:
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=process.returncode, cmd='', output=output
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program)
|
||||
except OSError as e:
|
||||
if e.errno == os.errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
'{} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise errors.StoreError(
|
||||
'Unexpected OS error "{}", errno={}'.format(
|
||||
e.strerror, e.errno
|
||||
)
|
||||
)
|
||||
return output
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
import distutils.spawn
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def find_executable(executable, path=None):
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
if sys.platform != 'win32':
|
||||
return distutils.spawn.find_executable(executable, path)
|
||||
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
for ext in extensions:
|
||||
f = os.path.join(p, base + ext)
|
||||
if os.path.isfile(f):
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def create_environment_dict(overrides):
|
||||
"""
|
||||
Create and return a copy of os.environ with the specified overrides
|
||||
"""
|
||||
result = os.environ.copy()
|
||||
result.update(overrides or {})
|
||||
return result
|
||||
|
|
@ -62,6 +62,13 @@ class Container(Model):
|
|||
return self.attrs['State']['Status']
|
||||
return self.attrs['State']
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
"""
|
||||
The ports that the container exposes as a dictionary.
|
||||
"""
|
||||
return self.attrs.get('NetworkSettings', {}).get('Ports', {})
|
||||
|
||||
def attach(self, **kwargs):
|
||||
"""
|
||||
Attach to this container.
|
||||
|
|
@ -172,10 +179,11 @@ class Container(Model):
|
|||
(ExecResult): A tuple of (exit_code, output)
|
||||
exit_code: (int):
|
||||
Exit code for the executed command or ``None`` if
|
||||
either ``stream```or ``socket`` is ``True``.
|
||||
output: (generator or bytes):
|
||||
either ``stream`` or ``socket`` is ``True``.
|
||||
output: (generator, bytes, or tuple):
|
||||
If ``stream=True``, a generator yielding response chunks.
|
||||
If ``socket=True``, a socket object for the connection.
|
||||
If ``demux=True``, a tuple of two bytes: stdout and stderr.
|
||||
A bytestring containing response data otherwise.
|
||||
|
||||
Raises:
|
||||
|
|
@ -540,12 +548,15 @@ class ContainerCollection(Collection):
|
|||
cap_add (list of str): Add kernel capabilities. For example,
|
||||
``["SYS_ADMIN", "MKNOD"]``.
|
||||
cap_drop (list of str): Drop kernel capabilities.
|
||||
cgroup_parent (str): Override the default parent cgroup.
|
||||
cpu_count (int): Number of usable CPUs (Windows only).
|
||||
cpu_percent (int): Usable percentage of the available CPUs
|
||||
(Windows only).
|
||||
cpu_period (int): The length of a CPU period in microseconds.
|
||||
cpu_quota (int): Microseconds of CPU time that the container can
|
||||
get in a CPU period.
|
||||
cpu_rt_period (int): Limit CPU real-time period in microseconds.
|
||||
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
|
||||
cpu_shares (int): CPU shares (relative weight).
|
||||
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
|
||||
``0,1``).
|
||||
|
|
@ -589,6 +600,7 @@ class ContainerCollection(Collection):
|
|||
init_path (str): Path to the docker-init binary
|
||||
ipc_mode (str): Set the IPC mode for the container.
|
||||
isolation (str): Isolation technology to use. Default: `None`.
|
||||
kernel_memory (int or str): Kernel memory limit
|
||||
labels (dict or list): A dictionary of name-value labels (e.g.
|
||||
``{"label1": "value1", "label2": "value2"}``) or a list of
|
||||
names of labels to set with empty values (e.g.
|
||||
|
|
@ -598,6 +610,7 @@ class ContainerCollection(Collection):
|
|||
Containers declared in this dict will be linked to the new
|
||||
container using the provided alias. Default: ``None``.
|
||||
log_config (LogConfig): Logging configuration.
|
||||
lxc_conf (dict): LXC config.
|
||||
mac_address (str): MAC address to assign to the container.
|
||||
mem_limit (int or str): Memory limit. Accepts float values
|
||||
(which represent the memory limit of the created container in
|
||||
|
|
@ -605,6 +618,7 @@ class ContainerCollection(Collection):
|
|||
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
|
||||
specified without a units character, bytes are assumed as an
|
||||
intended unit.
|
||||
mem_reservation (int or str): Memory soft limit
|
||||
mem_swappiness (int): Tune a container's memory swappiness
|
||||
behavior. Accepts number between 0 and 100.
|
||||
memswap_limit (str or int): Maximum amount of memory + swap a
|
||||
|
|
@ -643,8 +657,8 @@ class ContainerCollection(Collection):
|
|||
|
||||
The keys of the dictionary are the ports to bind inside the
|
||||
container, either as an integer or a string in the form
|
||||
``port/protocol``, where the protocol is either ``tcp`` or
|
||||
``udp``.
|
||||
``port/protocol``, where the protocol is either ``tcp``,
|
||||
``udp``, or ``sctp``.
|
||||
|
||||
The values of the dictionary are the corresponding ports to
|
||||
open on the host, which can be either:
|
||||
|
|
@ -718,6 +732,10 @@ class ContainerCollection(Collection):
|
|||
userns_mode (str): Sets the user namespace mode for the container
|
||||
when user namespace remapping option is enabled. Supported
|
||||
values are: ``host``
|
||||
uts_mode (str): Sets the UTS namespace mode for the container.
|
||||
Supported values are: ``host``
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
volume_driver (str): The name of a volume driver/plugin.
|
||||
volumes (dict or list): A dictionary to configure volumes mounted
|
||||
inside the container. The key is either the host path or a
|
||||
|
|
@ -953,7 +971,6 @@ RUN_CREATE_KWARGS = [
|
|||
'tty',
|
||||
'use_config_proxy',
|
||||
'user',
|
||||
'volume_driver',
|
||||
'working_dir',
|
||||
]
|
||||
|
||||
|
|
@ -1017,6 +1034,7 @@ RUN_HOST_CONFIG_KWARGS = [
|
|||
'userns_mode',
|
||||
'uts_mode',
|
||||
'version',
|
||||
'volume_driver',
|
||||
'volumes_from',
|
||||
'runtime'
|
||||
]
|
||||
|
|
|
|||
|
|
@ -315,22 +315,26 @@ class ImageCollection(Collection):
|
|||
"""
|
||||
return self.prepare_model(self.client.api.inspect_image(name))
|
||||
|
||||
def get_registry_data(self, name):
|
||||
def get_registry_data(self, name, auth_config=None):
|
||||
"""
|
||||
Gets the registry data for an image.
|
||||
|
||||
Args:
|
||||
name (str): The name of the image.
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(:py:class:`RegistryData`): The data object.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return RegistryData(
|
||||
image_name=name,
|
||||
attrs=self.client.api.inspect_distribution(name),
|
||||
attrs=self.client.api.inspect_distribution(name, auth_config),
|
||||
client=self.client,
|
||||
collection=self,
|
||||
)
|
||||
|
|
@ -404,10 +408,9 @@ class ImageCollection(Collection):
|
|||
Args:
|
||||
repository (str): The repository to pull
|
||||
tag (str): The tag to pull
|
||||
auth_config (dict): Override the credentials that
|
||||
:py:meth:`~docker.client.DockerClient.login` has set for
|
||||
this request. ``auth_config`` should contain the ``username``
|
||||
and ``password`` keys to be valid.
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
|
||||
Returns:
|
||||
|
|
|
|||
|
|
@ -165,6 +165,8 @@ class ServiceCollection(Collection):
|
|||
env (list of str): Environment variables, in the form
|
||||
``KEY=val``.
|
||||
hostname (string): Hostname to set on the container.
|
||||
init (boolean): Run an init inside the container that forwards
|
||||
signals and reaps processes
|
||||
isolation (string): Isolation technology used by the service's
|
||||
containers. Only used for Windows containers.
|
||||
labels (dict): Labels to apply to the service.
|
||||
|
|
@ -280,6 +282,7 @@ CONTAINER_SPEC_KWARGS = [
|
|||
'hostname',
|
||||
'hosts',
|
||||
'image',
|
||||
'init',
|
||||
'isolation',
|
||||
'labels',
|
||||
'mounts',
|
||||
|
|
|
|||
|
|
@ -34,7 +34,8 @@ class Swarm(Model):
|
|||
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
|
||||
|
||||
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, **kwargs):
|
||||
force_new_cluster=False, default_addr_pool=None,
|
||||
subnet_size=None, data_path_addr=None, **kwargs):
|
||||
"""
|
||||
Initialize a new swarm on this Engine.
|
||||
|
||||
|
|
@ -56,6 +57,14 @@ class Swarm(Model):
|
|||
is used. Default: ``0.0.0.0:2377``
|
||||
force_new_cluster (bool): Force creating a new Swarm, even if
|
||||
already part of one. Default: False
|
||||
default_addr_pool (list of str): Default Address Pool specifies
|
||||
default subnet pools for global scope networks. Each pool
|
||||
should be specified as a CIDR block, like '10.0.0.0/8'.
|
||||
Default: None
|
||||
subnet_size (int): SubnetSize specifies the subnet size of the
|
||||
networks created from the default subnet pool. Default: None
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
task_history_retention_limit (int): Maximum number of tasks
|
||||
history stored.
|
||||
snapshot_interval (int): Number of logs entries between snapshot.
|
||||
|
|
@ -89,7 +98,7 @@ class Swarm(Model):
|
|||
created in the orchestrator.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
(str): The ID of the created node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
|
@ -99,7 +108,8 @@ class Swarm(Model):
|
|||
|
||||
>>> client.swarm.init(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
|
||||
force_new_cluster=False, snapshot_interval=5000,
|
||||
force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
|
||||
subnet_size=24, snapshot_interval=5000,
|
||||
log_entries_for_slow_followers=1200
|
||||
)
|
||||
|
||||
|
|
@ -107,12 +117,15 @@ class Swarm(Model):
|
|||
init_kwargs = {
|
||||
'advertise_addr': advertise_addr,
|
||||
'listen_addr': listen_addr,
|
||||
'force_new_cluster': force_new_cluster
|
||||
'force_new_cluster': force_new_cluster,
|
||||
'default_addr_pool': default_addr_pool,
|
||||
'subnet_size': subnet_size,
|
||||
'data_path_addr': data_path_addr,
|
||||
}
|
||||
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
|
||||
self.client.api.init_swarm(**init_kwargs)
|
||||
node_id = self.client.api.init_swarm(**init_kwargs)
|
||||
self.reload()
|
||||
return True
|
||||
return node_id
|
||||
|
||||
def join(self, *args, **kwargs):
|
||||
return self.client.api.join_swarm(*args, **kwargs)
|
||||
|
|
@ -138,7 +151,7 @@ class Swarm(Model):
|
|||
unlock.__doc__ = APIClient.unlock_swarm.__doc__
|
||||
|
||||
def update(self, rotate_worker_token=False, rotate_manager_token=False,
|
||||
**kwargs):
|
||||
rotate_manager_unlock_key=False, **kwargs):
|
||||
"""
|
||||
Update the swarm's configuration.
|
||||
|
||||
|
|
@ -151,7 +164,8 @@ class Swarm(Model):
|
|||
``False``.
|
||||
rotate_manager_token (bool): Rotate the manager join token.
|
||||
Default: ``False``.
|
||||
|
||||
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
|
||||
Default: ``False``.
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
|
@ -165,5 +179,6 @@ class Swarm(Model):
|
|||
version=self.version,
|
||||
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
|
||||
rotate_worker_token=rotate_worker_token,
|
||||
rotate_manager_token=rotate_manager_token
|
||||
rotate_manager_token=rotate_manager_token,
|
||||
rotate_manager_unlock_key=rotate_manager_unlock_key
|
||||
)
|
||||
|
|
|
|||
|
|
@ -110,13 +110,15 @@ class ContainerSpec(dict):
|
|||
privileges (Privileges): Security options for the service's containers.
|
||||
isolation (string): Isolation technology used by the service's
|
||||
containers. Only used for Windows containers.
|
||||
init (boolean): Run an init inside the container that forwards signals
|
||||
and reaps processes.
|
||||
"""
|
||||
def __init__(self, image, command=None, args=None, hostname=None, env=None,
|
||||
workdir=None, user=None, labels=None, mounts=None,
|
||||
stop_grace_period=None, secrets=None, tty=None, groups=None,
|
||||
open_stdin=None, read_only=None, stop_signal=None,
|
||||
healthcheck=None, hosts=None, dns_config=None, configs=None,
|
||||
privileges=None, isolation=None):
|
||||
privileges=None, isolation=None, init=None):
|
||||
self['Image'] = image
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
|
|
@ -183,6 +185,9 @@ class ContainerSpec(dict):
|
|||
if isolation is not None:
|
||||
self['Isolation'] = isolation
|
||||
|
||||
if init is not None:
|
||||
self['Init'] = init
|
||||
|
||||
|
||||
class Mount(dict):
|
||||
"""
|
||||
|
|
@ -692,7 +697,7 @@ class PlacementPreference(dict):
|
|||
'PlacementPreference strategy value is invalid ({}):'
|
||||
' must be "spread".'.format(strategy)
|
||||
)
|
||||
self['SpreadOver'] = descriptor
|
||||
self['Spread'] = {'SpreadDescriptor': descriptor}
|
||||
|
||||
|
||||
class DNSConfig(dict):
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ PORT_SPEC = re.compile(
|
|||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp))?" # Protocol
|
||||
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
version = "3.7.2"
|
||||
version = "4.0.0"
|
||||
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
||||
|
|
|
|||
|
|
@ -1,6 +1,43 @@
|
|||
Change log
|
||||
==========
|
||||
|
||||
4.0.0
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/57?closed=1)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Support for Python 3.3 and Python 3.4 has been dropped
|
||||
- `APIClient.update_service`, `APIClient.init_swarm`, and
|
||||
`DockerClient.swarm.init` now return a `dict` from the API's response body
|
||||
- In `APIClient.build` and `DockerClient.images.build`, the `use_config_proxy`
|
||||
parameter now defaults to True
|
||||
- `init_path` is no longer a valid parameter for `HostConfig`
|
||||
|
||||
### Features
|
||||
|
||||
- It is now possible to provide `SCTP` ports for port mappings
|
||||
- `ContainerSpec`s now support the `init` parameter
|
||||
- `DockerClient.swarm.init` and `APIClient.init_swarm` now support the
|
||||
`data_path_addr` parameter
|
||||
- `APIClient.update_swarm` and `DockerClient.swarm.update` now support the
|
||||
`rotate_manager_unlock_key` parameter
|
||||
- `APIClient.update_service` returns the API's response body as a `dict`
|
||||
- `APIClient.init_swarm`, and `DockerClient.swarm.init` now return the API's
|
||||
response body as a `dict`
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed `PlacementPreference` instances to produce a valid API type
|
||||
- Fixed a bug where not setting a value for `buildargs` in `build` could cause
|
||||
the library to attempt accessing attributes of a `None` value
|
||||
- Fixed a bug where setting the `volume_driver` parameter in
|
||||
`DockerClient.containers.create` would result in an error
|
||||
- `APIClient.inspect_distribution` now correctly sets the authentication
|
||||
headers on the request, allowing it to be used with private repositories
|
||||
This change also applies to `DockerClient.get_registry_data`
|
||||
|
||||
3.7.2
|
||||
-----
|
||||
|
||||
|
|
|
|||
|
|
@ -2,9 +2,7 @@ appdirs==1.4.3
|
|||
asn1crypto==0.22.0
|
||||
backports.ssl-match-hostname==3.5.0.1
|
||||
cffi==1.10.0
|
||||
cryptography==1.9; python_version == '3.3'
|
||||
cryptography==2.3; python_version > '3.3'
|
||||
docker-pycreds==0.4.0
|
||||
cryptography==2.3
|
||||
enum34==1.1.6
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
|
|
@ -17,5 +15,5 @@ pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
|||
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
urllib3==1.24.3
|
||||
websocket-client==0.40.0
|
||||
urllib3==1.21.1; python_version == '3.3'
|
||||
|
|
@ -26,8 +26,8 @@ class Version(namedtuple('_Version', 'major minor patch stage edition')):
|
|||
edition = stage
|
||||
stage = None
|
||||
elif '-' in stage:
|
||||
edition, stage = stage.split('-')
|
||||
major, minor, patch = version.split('.', 3)
|
||||
edition, stage = stage.split('-', 1)
|
||||
major, minor, patch = version.split('.', 2)
|
||||
return cls(major, minor, patch, stage, edition)
|
||||
|
||||
@property
|
||||
|
|
@ -63,7 +63,7 @@ def main():
|
|||
res = requests.get(url)
|
||||
content = res.text
|
||||
versions = [Version.parse(v) for v in re.findall(
|
||||
r'"docker-([0-9]+\.[0-9]+\.[0-9]+)-?.*tgz"', content
|
||||
r'"docker-([0-9]+\.[0-9]+\.[0-9]+-?.*)\.tgz"', content
|
||||
)]
|
||||
sorted_versions = sorted(
|
||||
versions, reverse=True, key=operator.attrgetter('order')
|
||||
|
|
@ -72,5 +72,6 @@ def main():
|
|||
results.add(str(latest))
|
||||
print(' '.join(results))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
|||
11
setup.py
11
setup.py
|
|
@ -4,7 +4,8 @@ from __future__ import print_function
|
|||
import codecs
|
||||
import os
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
from setuptools import find_packages
|
||||
from setuptools import setup
|
||||
|
||||
ROOT_DIR = os.path.dirname(__file__)
|
||||
SOURCE_DIR = os.path.join(ROOT_DIR)
|
||||
|
|
@ -12,7 +13,6 @@ SOURCE_DIR = os.path.join(ROOT_DIR)
|
|||
requirements = [
|
||||
'six >= 1.4.0',
|
||||
'websocket-client >= 0.32.0',
|
||||
'docker-pycreds >= 0.4.0',
|
||||
'requests >= 2.14.2, != 2.18.0',
|
||||
]
|
||||
|
||||
|
|
@ -29,9 +29,6 @@ extras_require = {
|
|||
':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
|
||||
':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223',
|
||||
|
||||
# urllib3 drops support for Python 3.3 in 1.23
|
||||
':python_version == "3.3"': 'urllib3 < 1.23',
|
||||
|
||||
# If using docker-py over TLS, highly recommend this option is
|
||||
# pip-installed or pinned.
|
||||
|
||||
|
|
@ -75,7 +72,7 @@ setup(
|
|||
install_requires=requirements,
|
||||
tests_require=test_requirements,
|
||||
extras_require=extras_require,
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
|
||||
zip_safe=False,
|
||||
test_suite='tests',
|
||||
classifiers=[
|
||||
|
|
@ -87,8 +84,6 @@ setup(
|
|||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
|
|
|
|||
|
|
@ -1,9 +1,6 @@
|
|||
coverage==4.5.2
|
||||
flake8==3.6.0; python_version != '3.3'
|
||||
flake8==3.4.1; python_version == '3.3'
|
||||
flake8==3.6.0
|
||||
mock==1.0.1
|
||||
pytest==2.9.1; python_version == '3.3'
|
||||
pytest==4.1.0; python_version != '3.3'
|
||||
pytest-cov==2.6.1; python_version != '3.3'
|
||||
pytest-cov==2.5.1; python_version == '3.3'
|
||||
pytest==4.1.0
|
||||
pytest-cov==2.6.1
|
||||
pytest-timeout==1.3.3
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
ARG PYTHON_VERSION=3.6
|
||||
FROM python:$PYTHON_VERSION-jessie
|
||||
RUN apt-get update && apt-get -y install \
|
||||
gnupg2 \
|
||||
pass \
|
||||
curl
|
||||
|
||||
COPY ./tests/gpg-keys /gpg-keys
|
||||
RUN gpg2 --import gpg-keys/secret
|
||||
RUN gpg2 --import-ownertrust gpg-keys/ownertrust
|
||||
RUN yes | pass init $(gpg2 --no-auto-check-trustdb --list-secret-keys | grep ^sec | cut -d/ -f2 | cut -d" " -f1)
|
||||
RUN gpg2 --check-trustdb
|
||||
ARG CREDSTORE_VERSION=v0.6.0
|
||||
RUN curl -sSL -o /opt/docker-credential-pass.tar.gz \
|
||||
https://github.com/docker/docker-credential-helpers/releases/download/$CREDSTORE_VERSION/docker-credential-pass-$CREDSTORE_VERSION-amd64.tar.gz && \
|
||||
tar -xf /opt/docker-credential-pass.tar.gz -O > /usr/local/bin/docker-credential-pass && \
|
||||
rm -rf /opt/docker-credential-pass.tar.gz && \
|
||||
chmod +x /usr/local/bin/docker-credential-pass
|
||||
|
||||
WORKDIR /src
|
||||
COPY requirements.txt /src/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY test-requirements.txt /src/test-requirements.txt
|
||||
RUN pip install -r test-requirements.txt
|
||||
|
||||
COPY . /src
|
||||
RUN pip install .
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# List of assigned trustvalues, created Wed 25 Apr 2018 01:28:17 PM PDT
|
||||
# (Use "gpg --import-ownertrust" to restore them)
|
||||
9781B87DAB042E6FD51388A5464ED987A7B21401:6:
|
||||
Binary file not shown.
|
|
@ -2,16 +2,16 @@ import functools
|
|||
import os
|
||||
import os.path
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
import re
|
||||
import six
|
||||
import socket
|
||||
|
||||
import docker
|
||||
import paramiko
|
||||
import pytest
|
||||
import six
|
||||
|
||||
|
||||
def make_tree(dirs, files):
|
||||
|
|
@ -119,13 +119,18 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
|
|||
# If we're using a Unix socket, the sock.send call will fail with a
|
||||
# BrokenPipeError ; INET sockets will just stop receiving / sending data
|
||||
# but will not raise an error
|
||||
if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
|
||||
with pytest.raises(socket.error):
|
||||
sock.sendall(b'make sure the socket is closed\n')
|
||||
elif isinstance(sock, paramiko.Channel):
|
||||
if isinstance(sock, paramiko.Channel):
|
||||
with pytest.raises(OSError):
|
||||
sock.sendall(b'make sure the socket is closed\n')
|
||||
else:
|
||||
if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
|
||||
# We do not want to use pytest.raises here because future versions
|
||||
# of the daemon no longer cause this to raise an error.
|
||||
try:
|
||||
sock.sendall(b'make sure the socket is closed\n')
|
||||
except socket.error:
|
||||
return
|
||||
|
||||
sock.sendall(b"make sure the socket is closed\n")
|
||||
data = sock.recv(128)
|
||||
# New in 18.06: error message is broadcast over the socket when reading
|
||||
|
|
|
|||
|
|
@ -5,21 +5,20 @@ import tempfile
|
|||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
import docker
|
||||
from docker.constants import IS_WINDOWS_PLATFORM
|
||||
from docker.utils.socket import next_frame_header
|
||||
from docker.utils.socket import read_exactly
|
||||
|
||||
import pytest
|
||||
|
||||
import requests
|
||||
import six
|
||||
|
||||
from .base import BUSYBOX, BaseAPIIntegrationTest
|
||||
import docker
|
||||
from .. import helpers
|
||||
from ..helpers import (
|
||||
requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
|
||||
)
|
||||
from ..helpers import assert_cat_socket_detached_with_keys
|
||||
from ..helpers import ctrl_with
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseAPIIntegrationTest
|
||||
from .base import BUSYBOX
|
||||
from docker.constants import IS_WINDOWS_PLATFORM
|
||||
from docker.utils.socket import next_frame_header
|
||||
from docker.utils.socket import read_exactly
|
||||
|
||||
|
||||
class ListContainersTest(BaseAPIIntegrationTest):
|
||||
|
|
@ -38,7 +37,7 @@ class ListContainersTest(BaseAPIIntegrationTest):
|
|||
assert 'Command' in retrieved
|
||||
assert retrieved['Command'] == six.text_type('true')
|
||||
assert 'Image' in retrieved
|
||||
assert re.search(r'busybox:.*', retrieved['Image'])
|
||||
assert re.search(r'alpine:.*', retrieved['Image'])
|
||||
assert 'Status' in retrieved
|
||||
|
||||
|
||||
|
|
@ -368,10 +367,9 @@ class CreateContainerTest(BaseAPIIntegrationTest):
|
|||
)
|
||||
self.tmp_containers.append(container['Id'])
|
||||
config = self.client.inspect_container(container['Id'])
|
||||
assert (
|
||||
sorted(config['Config']['Env']) ==
|
||||
sorted(['Foo', 'Other=one', 'Blank='])
|
||||
)
|
||||
assert 'Foo' in config['Config']['Env']
|
||||
assert 'Other=one' in config['Config']['Env']
|
||||
assert 'Blank=' in config['Config']['Env']
|
||||
|
||||
@requires_api_version('1.22')
|
||||
def test_create_with_tmpfs(self):
|
||||
|
|
@ -448,19 +446,6 @@ class CreateContainerTest(BaseAPIIntegrationTest):
|
|||
config = self.client.inspect_container(ctnr)
|
||||
assert config['HostConfig']['Init'] is True
|
||||
|
||||
@pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
|
||||
@requires_api_version('1.25')
|
||||
def test_create_with_init_path(self):
|
||||
ctnr = self.client.create_container(
|
||||
BUSYBOX, 'true',
|
||||
host_config=self.client.create_host_config(
|
||||
init_path="/usr/libexec/docker-init"
|
||||
)
|
||||
)
|
||||
self.tmp_containers.append(ctnr['Id'])
|
||||
config = self.client.inspect_container(ctnr)
|
||||
assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
|
||||
|
||||
@requires_api_version('1.24')
|
||||
@pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
|
||||
reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
|
||||
|
|
@ -1082,11 +1067,17 @@ class PortTest(BaseAPIIntegrationTest):
|
|||
def test_port(self):
|
||||
port_bindings = {
|
||||
'1111': ('127.0.0.1', '4567'),
|
||||
'2222': ('127.0.0.1', '4568')
|
||||
'2222': ('127.0.0.1', '4568'),
|
||||
'3333/udp': ('127.0.0.1', '4569'),
|
||||
}
|
||||
ports = [
|
||||
1111,
|
||||
2222,
|
||||
(3333, 'udp'),
|
||||
]
|
||||
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
|
||||
BUSYBOX, ['sleep', '60'], ports=ports,
|
||||
host_config=self.client.create_host_config(
|
||||
port_bindings=port_bindings, network_mode='bridge'
|
||||
)
|
||||
|
|
@ -1097,13 +1088,15 @@ class PortTest(BaseAPIIntegrationTest):
|
|||
|
||||
# Call the port function on each biding and compare expected vs actual
|
||||
for port in port_bindings:
|
||||
port, _, protocol = port.partition('/')
|
||||
actual_bindings = self.client.port(container, port)
|
||||
port_binding = actual_bindings.pop()
|
||||
|
||||
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
|
||||
|
||||
assert ip == port_bindings[port][0]
|
||||
assert host_port == port_bindings[port][1]
|
||||
port_binding = port if not protocol else port + "/" + protocol
|
||||
assert ip == port_bindings[port_binding][0]
|
||||
assert host_port == port_bindings[port_binding][1]
|
||||
|
||||
self.client.kill(id)
|
||||
|
||||
|
|
@ -1168,10 +1161,10 @@ class RestartContainerTest(BaseAPIIntegrationTest):
|
|||
def test_restart_with_low_timeout(self):
|
||||
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
|
||||
self.client.start(container)
|
||||
self.client.timeout = 1
|
||||
self.client.restart(container, timeout=3)
|
||||
self.client.timeout = 3
|
||||
self.client.restart(container, timeout=1)
|
||||
self.client.timeout = None
|
||||
self.client.restart(container, timeout=3)
|
||||
self.client.restart(container, timeout=1)
|
||||
self.client.kill(container)
|
||||
|
||||
def test_restart_with_dict_instead_of_id(self):
|
||||
|
|
@ -1256,7 +1249,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
|
|||
output = self.client.attach(container, stream=False, logs=True)
|
||||
assert output == 'hello\n'.encode(encoding='ascii')
|
||||
|
||||
@pytest.mark.timeout(5)
|
||||
@pytest.mark.timeout(10)
|
||||
@pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
|
||||
reason='No cancellable streams over SSH')
|
||||
@pytest.mark.xfail(condition=os.environ.get('DOCKER_TLS_VERIFY') or
|
||||
|
|
@ -1264,14 +1257,14 @@ class AttachContainerTest(BaseAPIIntegrationTest):
|
|||
reason='Flaky test on TLS')
|
||||
def test_attach_stream_and_cancel(self):
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'sh -c "echo hello && sleep 60"',
|
||||
BUSYBOX, 'sh -c "sleep 2 && echo hello && sleep 60"',
|
||||
tty=True
|
||||
)
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
output = self.client.attach(container, stream=True, logs=True)
|
||||
|
||||
threading.Timer(1, output.close).start()
|
||||
threading.Timer(3, output.close).start()
|
||||
|
||||
lines = []
|
||||
for line in output:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
from ..helpers import assert_cat_socket_detached_with_keys
|
||||
from ..helpers import ctrl_with
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseAPIIntegrationTest
|
||||
from .base import BUSYBOX
|
||||
from docker.utils.proxy import ProxyConfig
|
||||
from docker.utils.socket import next_frame_header
|
||||
from docker.utils.socket import read_exactly
|
||||
from docker.utils.proxy import ProxyConfig
|
||||
|
||||
from .base import BaseAPIIntegrationTest, BUSYBOX
|
||||
from ..helpers import (
|
||||
requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
|
||||
)
|
||||
|
||||
|
||||
class ExecTest(BaseAPIIntegrationTest):
|
||||
|
|
@ -17,7 +17,6 @@ class ExecTest(BaseAPIIntegrationTest):
|
|||
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'cat', detach=True, stdin_open=True,
|
||||
use_config_proxy=True,
|
||||
)
|
||||
self.client.start(container)
|
||||
self.tmp_containers.append(container)
|
||||
|
|
@ -81,11 +80,11 @@ class ExecTest(BaseAPIIntegrationTest):
|
|||
self.client.start(id)
|
||||
self.tmp_containers.append(id)
|
||||
|
||||
res = self.client.exec_create(id, 'whoami', user='default')
|
||||
res = self.client.exec_create(id, 'whoami', user='postgres')
|
||||
assert 'Id' in res
|
||||
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert exec_log == b'default\n'
|
||||
assert exec_log == b'postgres\n'
|
||||
|
||||
def test_exec_command_as_root(self):
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
|
|
@ -115,75 +114,6 @@ class ExecTest(BaseAPIIntegrationTest):
|
|||
res += chunk
|
||||
assert res == b'hello\nworld\n'
|
||||
|
||||
def test_exec_command_demux(self):
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'cat', detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
self.client.start(id)
|
||||
self.tmp_containers.append(id)
|
||||
|
||||
script = ' ; '.join([
|
||||
# Write something on stdout
|
||||
'echo hello out',
|
||||
# Busybox's sleep does not handle sub-second times.
|
||||
# This loops takes ~0.3 second to execute on my machine.
|
||||
'for i in $(seq 1 50000); do echo $i>/dev/null; done',
|
||||
# Write something on stderr
|
||||
'echo hello err >&2'])
|
||||
cmd = 'sh -c "{}"'.format(script)
|
||||
|
||||
# tty=False, stream=False, demux=False
|
||||
res = self.client.exec_create(id, cmd)
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert exec_log == b'hello out\nhello err\n'
|
||||
|
||||
# tty=False, stream=True, demux=False
|
||||
res = self.client.exec_create(id, cmd)
|
||||
exec_log = self.client.exec_start(res, stream=True)
|
||||
assert next(exec_log) == b'hello out\n'
|
||||
assert next(exec_log) == b'hello err\n'
|
||||
with self.assertRaises(StopIteration):
|
||||
next(exec_log)
|
||||
|
||||
# tty=False, stream=False, demux=True
|
||||
res = self.client.exec_create(id, cmd)
|
||||
exec_log = self.client.exec_start(res, demux=True)
|
||||
assert exec_log == (b'hello out\n', b'hello err\n')
|
||||
|
||||
# tty=False, stream=True, demux=True
|
||||
res = self.client.exec_create(id, cmd)
|
||||
exec_log = self.client.exec_start(res, demux=True, stream=True)
|
||||
assert next(exec_log) == (b'hello out\n', None)
|
||||
assert next(exec_log) == (None, b'hello err\n')
|
||||
with self.assertRaises(StopIteration):
|
||||
next(exec_log)
|
||||
|
||||
# tty=True, stream=False, demux=False
|
||||
res = self.client.exec_create(id, cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert exec_log == b'hello out\r\nhello err\r\n'
|
||||
|
||||
# tty=True, stream=True, demux=False
|
||||
res = self.client.exec_create(id, cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res, stream=True)
|
||||
assert next(exec_log) == b'hello out\r\n'
|
||||
assert next(exec_log) == b'hello err\r\n'
|
||||
with self.assertRaises(StopIteration):
|
||||
next(exec_log)
|
||||
|
||||
# tty=True, stream=False, demux=True
|
||||
res = self.client.exec_create(id, cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res, demux=True)
|
||||
assert exec_log == (b'hello out\r\nhello err\r\n', None)
|
||||
|
||||
# tty=True, stream=True, demux=True
|
||||
res = self.client.exec_create(id, cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res, demux=True, stream=True)
|
||||
assert next(exec_log) == (b'hello out\r\n', None)
|
||||
assert next(exec_log) == (b'hello err\r\n', None)
|
||||
with self.assertRaises(StopIteration):
|
||||
next(exec_log)
|
||||
|
||||
def test_exec_start_socket(self):
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
|
|
@ -257,9 +187,9 @@ class ExecTest(BaseAPIIntegrationTest):
|
|||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
|
||||
res = self.client.exec_create(container, 'pwd', workdir='/var/www')
|
||||
res = self.client.exec_create(container, 'pwd', workdir='/var/opt')
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert exec_log == b'/var/www\n'
|
||||
assert exec_log == b'/var/opt\n'
|
||||
|
||||
def test_detach_with_default(self):
|
||||
container = self.client.create_container(
|
||||
|
|
@ -313,3 +243,88 @@ class ExecTest(BaseAPIIntegrationTest):
|
|||
self.addCleanup(sock.close)
|
||||
|
||||
assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
|
||||
|
||||
|
||||
class ExecDemuxTest(BaseAPIIntegrationTest):
|
||||
cmd = 'sh -c "{}"'.format(' ; '.join([
|
||||
# Write something on stdout
|
||||
'echo hello out',
|
||||
# Busybox's sleep does not handle sub-second times.
|
||||
# This loops takes ~0.3 second to execute on my machine.
|
||||
'sleep 0.5',
|
||||
# Write something on stderr
|
||||
'echo hello err >&2'])
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
super(ExecDemuxTest, self).setUp()
|
||||
self.container = self.client.create_container(
|
||||
BUSYBOX, 'cat', detach=True, stdin_open=True
|
||||
)
|
||||
self.client.start(self.container)
|
||||
self.tmp_containers.append(self.container)
|
||||
|
||||
def test_exec_command_no_stream_no_demux(self):
|
||||
# tty=False, stream=False, demux=False
|
||||
res = self.client.exec_create(self.container, self.cmd)
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert b'hello out\n' in exec_log
|
||||
assert b'hello err\n' in exec_log
|
||||
|
||||
def test_exec_command_stream_no_demux(self):
|
||||
# tty=False, stream=True, demux=False
|
||||
res = self.client.exec_create(self.container, self.cmd)
|
||||
exec_log = list(self.client.exec_start(res, stream=True))
|
||||
assert len(exec_log) == 2
|
||||
assert b'hello out\n' in exec_log
|
||||
assert b'hello err\n' in exec_log
|
||||
|
||||
def test_exec_command_no_stream_demux(self):
|
||||
# tty=False, stream=False, demux=True
|
||||
res = self.client.exec_create(self.container, self.cmd)
|
||||
exec_log = self.client.exec_start(res, demux=True)
|
||||
assert exec_log == (b'hello out\n', b'hello err\n')
|
||||
|
||||
def test_exec_command_stream_demux(self):
|
||||
# tty=False, stream=True, demux=True
|
||||
res = self.client.exec_create(self.container, self.cmd)
|
||||
exec_log = list(self.client.exec_start(res, demux=True, stream=True))
|
||||
assert len(exec_log) == 2
|
||||
assert (b'hello out\n', None) in exec_log
|
||||
assert (None, b'hello err\n') in exec_log
|
||||
|
||||
def test_exec_command_tty_no_stream_no_demux(self):
|
||||
# tty=True, stream=False, demux=False
|
||||
res = self.client.exec_create(self.container, self.cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res)
|
||||
assert exec_log == b'hello out\r\nhello err\r\n'
|
||||
|
||||
def test_exec_command_tty_stream_no_demux(self):
|
||||
# tty=True, stream=True, demux=False
|
||||
res = self.client.exec_create(self.container, self.cmd, tty=True)
|
||||
exec_log = list(self.client.exec_start(res, stream=True))
|
||||
assert b'hello out\r\n' in exec_log
|
||||
if len(exec_log) == 2:
|
||||
assert b'hello err\r\n' in exec_log
|
||||
else:
|
||||
assert len(exec_log) == 3
|
||||
assert b'hello err' in exec_log
|
||||
assert b'\r\n' in exec_log
|
||||
|
||||
def test_exec_command_tty_no_stream_demux(self):
|
||||
# tty=True, stream=False, demux=True
|
||||
res = self.client.exec_create(self.container, self.cmd, tty=True)
|
||||
exec_log = self.client.exec_start(res, demux=True)
|
||||
assert exec_log == (b'hello out\r\nhello err\r\n', None)
|
||||
|
||||
def test_exec_command_tty_stream_demux(self):
|
||||
# tty=True, stream=True, demux=True
|
||||
res = self.client.exec_create(self.container, self.cmd, tty=True)
|
||||
exec_log = list(self.client.exec_start(res, demux=True, stream=True))
|
||||
assert (b'hello out\r\n', None) in exec_log
|
||||
if len(exec_log) == 2:
|
||||
assert (b'hello err\r\n', None) in exec_log
|
||||
else:
|
||||
assert len(exec_log) == 3
|
||||
assert (b'hello err', None) in exec_log
|
||||
assert (b'\r\n', None) in exec_log
|
||||
|
|
|
|||
|
|
@ -427,6 +427,21 @@ class ServiceTest(BaseAPIIntegrationTest):
|
|||
assert 'Placement' in svc_info['Spec']['TaskTemplate']
|
||||
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
|
||||
|
||||
@requires_api_version('1.27')
|
||||
def test_create_service_with_placement_preferences_tuple(self):
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
placemt = docker.types.Placement(preferences=(
|
||||
('spread', 'com.dockerpy.test'),
|
||||
))
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, placement=placemt
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'Placement' in svc_info['Spec']['TaskTemplate']
|
||||
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
|
||||
|
||||
def test_create_service_with_endpoint_spec(self):
|
||||
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
|
|
@ -835,6 +850,20 @@ class ServiceTest(BaseAPIIntegrationTest):
|
|||
)
|
||||
assert privileges['SELinuxContext']['Disable'] is True
|
||||
|
||||
@requires_api_version('1.38')
|
||||
def test_create_service_with_init(self):
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
'busybox', ['sleep', '999'], init=True
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'Init' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
|
||||
assert (
|
||||
svc_info['Spec']['TaskTemplate']['ContainerSpec']['Init'] is True
|
||||
)
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_update_service_with_defaults_name(self):
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
|
|
|
|||
|
|
@ -35,6 +35,35 @@ class SwarmTest(BaseAPIIntegrationTest):
|
|||
version_2 = self.client.inspect_swarm()['Version']['Index']
|
||||
assert version_2 != version_1
|
||||
|
||||
@requires_api_version('1.39')
|
||||
def test_init_swarm_custom_addr_pool_defaults(self):
|
||||
assert self.init_swarm()
|
||||
results = self.client.inspect_swarm()
|
||||
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
|
||||
assert results['SubnetSize'] == 24
|
||||
|
||||
@requires_api_version('1.39')
|
||||
def test_init_swarm_custom_addr_pool_only_pool(self):
|
||||
assert self.init_swarm(default_addr_pool=['2.0.0.0/16'])
|
||||
results = self.client.inspect_swarm()
|
||||
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'}
|
||||
assert results['SubnetSize'] == 24
|
||||
|
||||
@requires_api_version('1.39')
|
||||
def test_init_swarm_custom_addr_pool_only_subnet_size(self):
|
||||
assert self.init_swarm(subnet_size=26)
|
||||
results = self.client.inspect_swarm()
|
||||
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
|
||||
assert results['SubnetSize'] == 26
|
||||
|
||||
@requires_api_version('1.39')
|
||||
def test_init_swarm_custom_addr_pool_both_args(self):
|
||||
assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'],
|
||||
subnet_size=28)
|
||||
results = self.client.inspect_swarm()
|
||||
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'}
|
||||
assert results['SubnetSize'] == 28
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_init_already_in_cluster(self):
|
||||
assert self.init_swarm()
|
||||
|
|
@ -157,12 +186,14 @@ class SwarmTest(BaseAPIIntegrationTest):
|
|||
|
||||
@requires_api_version('1.24')
|
||||
def test_inspect_node(self):
|
||||
assert self.init_swarm()
|
||||
node_id = self.init_swarm()
|
||||
assert node_id
|
||||
nodes_list = self.client.nodes()
|
||||
assert len(nodes_list) == 1
|
||||
node = nodes_list[0]
|
||||
node_data = self.client.inspect_node(node['ID'])
|
||||
assert node['ID'] == node_data['ID']
|
||||
assert node_id == node['ID']
|
||||
assert node['Version'] == node_data['Version']
|
||||
|
||||
@requires_api_version('1.24')
|
||||
|
|
@ -204,3 +235,20 @@ class SwarmTest(BaseAPIIntegrationTest):
|
|||
self.client.remove_node(node_id, True)
|
||||
|
||||
assert e.value.response.status_code >= 400
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_rotate_manager_unlock_key(self):
|
||||
spec = self.client.create_swarm_spec(autolock_managers=True)
|
||||
assert self.init_swarm(swarm_spec=spec)
|
||||
swarm_info = self.client.inspect_swarm()
|
||||
key_1 = self.client.get_unlock_key()
|
||||
assert self.client.update_swarm(
|
||||
version=swarm_info['Version']['Index'],
|
||||
rotate_manager_unlock_key=True
|
||||
)
|
||||
key_2 = self.client.get_unlock_key()
|
||||
assert key_1['UnlockKey'] != key_2['UnlockKey']
|
||||
|
||||
@requires_api_version('1.30')
|
||||
def test_init_swarm_data_path_addr(self):
|
||||
assert self.init_swarm(data_path_addr='eth0')
|
||||
|
|
|
|||
|
|
@ -3,11 +3,10 @@ import shutil
|
|||
import unittest
|
||||
|
||||
import docker
|
||||
from .. import helpers
|
||||
from docker.utils import kwargs_from_env
|
||||
|
||||
from .. import helpers
|
||||
|
||||
BUSYBOX = 'busybox:buildroot-2014.02'
|
||||
BUSYBOX = 'alpine:3.9.3' # FIXME: this should probably be renamed
|
||||
TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/sh
|
||||
haveged
|
||||
gpg --batch --gen-key <<-EOF
|
||||
%echo Generating a standard key
|
||||
Key-Type: DSA
|
||||
Key-Length: 1024
|
||||
Subkey-Type: ELG-E
|
||||
Subkey-Length: 1024
|
||||
Name-Real: Sakuya Izayoi
|
||||
Name-Email: sakuya@gensokyo.jp
|
||||
Expire-Date: 0
|
||||
EOF
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import six
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
from docker.credentials import (
|
||||
CredentialsNotFound, Store, StoreError, DEFAULT_LINUX_STORE,
|
||||
DEFAULT_OSX_STORE
|
||||
)
|
||||
|
||||
|
||||
class TestStore(object):
|
||||
def teardown_method(self):
|
||||
for server in self.tmp_keys:
|
||||
try:
|
||||
self.store.erase(server)
|
||||
except StoreError:
|
||||
pass
|
||||
|
||||
def setup_method(self):
|
||||
self.tmp_keys = []
|
||||
if sys.platform.startswith('linux'):
|
||||
if find_executable('docker-credential-' + DEFAULT_LINUX_STORE):
|
||||
self.store = Store(DEFAULT_LINUX_STORE)
|
||||
elif find_executable('docker-credential-pass'):
|
||||
self.store = Store('pass')
|
||||
else:
|
||||
raise Exception('No supported docker-credential store in PATH')
|
||||
elif sys.platform.startswith('darwin'):
|
||||
self.store = Store(DEFAULT_OSX_STORE)
|
||||
|
||||
def get_random_servername(self):
|
||||
res = 'pycreds_test_{:x}'.format(random.getrandbits(32))
|
||||
self.tmp_keys.append(res)
|
||||
return res
|
||||
|
||||
def test_store_and_get(self):
|
||||
key = self.get_random_servername()
|
||||
self.store.store(server=key, username='user', secret='pass')
|
||||
data = self.store.get(key)
|
||||
assert data == {
|
||||
'ServerURL': key,
|
||||
'Username': 'user',
|
||||
'Secret': 'pass'
|
||||
}
|
||||
|
||||
def test_get_nonexistent(self):
|
||||
key = self.get_random_servername()
|
||||
with pytest.raises(CredentialsNotFound):
|
||||
self.store.get(key)
|
||||
|
||||
def test_store_and_erase(self):
|
||||
key = self.get_random_servername()
|
||||
self.store.store(server=key, username='user', secret='pass')
|
||||
self.store.erase(key)
|
||||
with pytest.raises(CredentialsNotFound):
|
||||
self.store.get(key)
|
||||
|
||||
def test_unicode_strings(self):
|
||||
key = self.get_random_servername()
|
||||
key = six.u(key)
|
||||
self.store.store(server=key, username='user', secret='pass')
|
||||
data = self.store.get(key)
|
||||
assert data
|
||||
self.store.erase(key)
|
||||
with pytest.raises(CredentialsNotFound):
|
||||
self.store.get(key)
|
||||
|
||||
def test_list(self):
|
||||
names = (self.get_random_servername(), self.get_random_servername())
|
||||
self.store.store(names[0], username='sakuya', secret='izayoi')
|
||||
self.store.store(names[1], username='reimu', secret='hakurei')
|
||||
data = self.store.list()
|
||||
assert names[0] in data
|
||||
assert data[names[0]] == 'sakuya'
|
||||
assert names[1] in data
|
||||
assert data[names[1]] == 'reimu'
|
||||
|
||||
def test_execute_with_env_override(self):
|
||||
self.store.exe = 'env'
|
||||
self.store.environment = {'FOO': 'bar'}
|
||||
data = self.store._execute('--null', '')
|
||||
assert b'\0FOO=bar\0' in data
|
||||
assert 'FOO' not in os.environ
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import os
|
||||
|
||||
from docker.credentials.utils import create_environment_dict
|
||||
|
||||
try:
|
||||
from unittest import mock
|
||||
except ImportError:
|
||||
import mock
|
||||
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_create_environment_dict():
|
||||
base = {'FOO': 'bar', 'BAZ': 'foobar'}
|
||||
os.environ = base
|
||||
assert create_environment_dict({'FOO': 'baz'}) == {
|
||||
'FOO': 'baz', 'BAZ': 'foobar',
|
||||
}
|
||||
assert create_environment_dict({'HELLO': 'world'}) == {
|
||||
'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world',
|
||||
}
|
||||
|
||||
assert os.environ == base
|
||||
|
|
@ -2,10 +2,13 @@ import os
|
|||
import tempfile
|
||||
import threading
|
||||
|
||||
import docker
|
||||
import pytest
|
||||
from .base import BaseIntegrationTest, TEST_API_VERSION
|
||||
from ..helpers import random_name, requires_api_version
|
||||
|
||||
import docker
|
||||
from ..helpers import random_name
|
||||
from ..helpers import requires_api_version
|
||||
from .base import BaseIntegrationTest
|
||||
from .base import TEST_API_VERSION
|
||||
|
||||
|
||||
class ContainerCollectionTest(BaseIntegrationTest):
|
||||
|
|
@ -123,7 +126,9 @@ class ContainerCollectionTest(BaseIntegrationTest):
|
|||
def test_run_with_auto_remove(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
out = client.containers.run(
|
||||
'alpine', 'echo hello', auto_remove=True
|
||||
# sleep(2) to allow any communication with the container
|
||||
# before it gets removed by the host.
|
||||
'alpine', 'sh -c "echo hello && sleep 2"', auto_remove=True
|
||||
)
|
||||
assert out == b'hello\n'
|
||||
|
||||
|
|
@ -132,7 +137,10 @@ class ContainerCollectionTest(BaseIntegrationTest):
|
|||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
with pytest.raises(docker.errors.ContainerError) as e:
|
||||
client.containers.run(
|
||||
'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
|
||||
# sleep(2) to allow any communication with the container
|
||||
# before it gets removed by the host.
|
||||
'alpine', 'sh -c ">&2 echo error && sleep 2 && exit 1"',
|
||||
auto_remove=True
|
||||
)
|
||||
assert e.value.exit_status == 1
|
||||
assert e.value.stderr is None
|
||||
|
|
@ -169,9 +177,7 @@ class ContainerCollectionTest(BaseIntegrationTest):
|
|||
ftp='sakuya.jp:4967'
|
||||
)
|
||||
|
||||
out = client.containers.run(
|
||||
'alpine', 'sh -c "env"', use_config_proxy=True
|
||||
)
|
||||
out = client.containers.run('alpine', 'sh -c "env"')
|
||||
|
||||
assert b'FTP_PROXY=sakuya.jp:4967\n' in out
|
||||
assert b'ftp_proxy=sakuya.jp:4967\n' in out
|
||||
|
|
@ -341,6 +347,66 @@ class ContainerTest(BaseIntegrationTest):
|
|||
'memory_stats', 'blkio_stats']:
|
||||
assert key in stats
|
||||
|
||||
def test_ports_target_none(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
ports = None
|
||||
target_ports = {'2222/tcp': ports}
|
||||
container = client.containers.run(
|
||||
"alpine", "sleep 100", detach=True,
|
||||
ports=target_ports
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.reload() # required to get auto-assigned ports
|
||||
actual_ports = container.ports
|
||||
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
|
||||
for target_client, target_host in target_ports.items():
|
||||
for actual_port in actual_ports[target_client]:
|
||||
actual_keys = sorted(actual_port.keys())
|
||||
assert sorted(['HostIp', 'HostPort']) == actual_keys
|
||||
assert target_host is ports
|
||||
assert int(actual_port['HostPort']) > 0
|
||||
client.close()
|
||||
|
||||
def test_ports_target_tuple(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
ports = ('127.0.0.1', 1111)
|
||||
target_ports = {'2222/tcp': ports}
|
||||
container = client.containers.run(
|
||||
"alpine", "sleep 100", detach=True,
|
||||
ports=target_ports
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.reload() # required to get auto-assigned ports
|
||||
actual_ports = container.ports
|
||||
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
|
||||
for target_client, target_host in target_ports.items():
|
||||
for actual_port in actual_ports[target_client]:
|
||||
actual_keys = sorted(actual_port.keys())
|
||||
assert sorted(['HostIp', 'HostPort']) == actual_keys
|
||||
assert target_host == ports
|
||||
assert int(actual_port['HostPort']) > 0
|
||||
client.close()
|
||||
|
||||
def test_ports_target_list(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
ports = [1234, 4567]
|
||||
target_ports = {'2222/tcp': ports}
|
||||
container = client.containers.run(
|
||||
"alpine", "sleep 100", detach=True,
|
||||
ports=target_ports
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
container.reload() # required to get auto-assigned ports
|
||||
actual_ports = container.ports
|
||||
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
|
||||
for target_client, target_host in target_ports.items():
|
||||
for actual_port in actual_ports[target_client]:
|
||||
actual_keys = sorted(actual_port.keys())
|
||||
assert sorted(['HostIp', 'HostPort']) == actual_keys
|
||||
assert target_host == ports
|
||||
assert int(actual_port['HostPort']) > 0
|
||||
client.close()
|
||||
|
||||
def test_stop(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
container = client.containers.run("alpine", "top", detach=True)
|
||||
|
|
@ -378,3 +444,13 @@ class ContainerTest(BaseIntegrationTest):
|
|||
detach=True)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.wait()['StatusCode'] == 1
|
||||
|
||||
def test_create_with_volume_driver(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
container = client.containers.create(
|
||||
'alpine',
|
||||
'sleep 300',
|
||||
volume_driver='foo'
|
||||
)
|
||||
self.tmp_containers.append(container.id)
|
||||
assert container.attrs['HostConfig']['VolumeDriver'] == 'foo'
|
||||
|
|
|
|||
|
|
@ -31,3 +31,15 @@ class SwarmTest(unittest.TestCase):
|
|||
cm.value.response.status_code == 406 or
|
||||
cm.value.response.status_code == 503
|
||||
)
|
||||
|
||||
def test_join_on_already_joined_swarm(self):
|
||||
client = docker.from_env(version=TEST_API_VERSION)
|
||||
client.swarm.init()
|
||||
join_token = client.swarm.attrs['JoinTokens']['Manager']
|
||||
with pytest.raises(docker.errors.APIError) as cm:
|
||||
client.swarm.join(
|
||||
remote_addrs=['127.0.0.1'],
|
||||
join_token=join_token,
|
||||
)
|
||||
assert cm.value.response.status_code == 503
|
||||
assert 'This node is already part of a swarm.' in cm.value.explanation
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ import shutil
|
|||
import tempfile
|
||||
import unittest
|
||||
|
||||
from docker import auth, errors
|
||||
import dockerpycreds
|
||||
from docker import auth, credentials, errors
|
||||
import pytest
|
||||
|
||||
try:
|
||||
|
|
@ -661,7 +660,7 @@ class CredstoreTest(unittest.TestCase):
|
|||
}
|
||||
|
||||
|
||||
class InMemoryStore(dockerpycreds.Store):
|
||||
class InMemoryStore(credentials.Store):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__store = {}
|
||||
|
||||
|
|
@ -669,7 +668,7 @@ class InMemoryStore(dockerpycreds.Store):
|
|||
try:
|
||||
return self.__store[server]
|
||||
except KeyError:
|
||||
raise dockerpycreds.errors.CredentialsNotFound()
|
||||
raise credentials.errors.CredentialsNotFound()
|
||||
|
||||
def store(self, server, username, secret):
|
||||
self.__store[server] = {
|
||||
|
|
|
|||
|
|
@ -176,6 +176,7 @@ class ContainerCollectionTest(unittest.TestCase):
|
|||
'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
|
||||
'UsernsMode': 'host',
|
||||
'UTSMode': 'host',
|
||||
'VolumeDriver': 'some_driver',
|
||||
'VolumesFrom': ['container'],
|
||||
},
|
||||
healthcheck={'test': 'true'},
|
||||
|
|
@ -190,7 +191,6 @@ class ContainerCollectionTest(unittest.TestCase):
|
|||
stop_signal=9,
|
||||
tty=True,
|
||||
user='bob',
|
||||
volume_driver='some_driver',
|
||||
volumes=[
|
||||
'/mnt/vol2',
|
||||
'/mnt/vol1',
|
||||
|
|
|
|||
|
|
@ -495,9 +495,12 @@ class PortsTest(unittest.TestCase):
|
|||
assert external_port == [("127.0.0.1", "1000")]
|
||||
|
||||
def test_split_port_with_protocol(self):
|
||||
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
|
||||
assert internal_port == ["2000/udp"]
|
||||
assert external_port == [("127.0.0.1", "1000")]
|
||||
for protocol in ['tcp', 'udp', 'sctp']:
|
||||
internal_port, external_port = split_port(
|
||||
"127.0.0.1:1000:2000/" + protocol
|
||||
)
|
||||
assert internal_port == ["2000/" + protocol]
|
||||
assert external_port == [("127.0.0.1", "1000")]
|
||||
|
||||
def test_split_port_with_host_ip_no_port(self):
|
||||
internal_port, external_port = split_port("127.0.0.1::2000")
|
||||
|
|
@ -550,6 +553,10 @@ class PortsTest(unittest.TestCase):
|
|||
with pytest.raises(ValueError):
|
||||
split_port("0.0.0.0:1000:2000:tcp")
|
||||
|
||||
def test_split_port_invalid_protocol(self):
|
||||
with pytest.raises(ValueError):
|
||||
split_port("0.0.0.0:1000:2000/ftp")
|
||||
|
||||
def test_non_matching_length_port_ranges(self):
|
||||
with pytest.raises(ValueError):
|
||||
split_port("0.0.0.0:1000-1010:2000-2002/tcp")
|
||||
|
|
|
|||
2
tox.ini
2
tox.ini
|
|
@ -1,5 +1,5 @@
|
|||
[tox]
|
||||
envlist = py27, py34, py35, py36, py37, flake8
|
||||
envlist = py27, py35, py36, py37, flake8
|
||||
skipsdist=True
|
||||
|
||||
[testenv]
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
-r requirements.txt
|
||||
Loading…
Reference in New Issue