From 86d1b8fb83fe1e74109cdd25847208e0538ebf5d Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Mon, 6 Jun 2016 19:29:09 -0400 Subject: [PATCH 01/83] invoke self._result with json=True if decode=True Signed-off-by: Massimiliano Pippi --- docker/client.py | 2 +- tests/unit/api_test.py | 42 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/docker/client.py b/docker/client.py index de3cb3ca..a7e84a7d 100644 --- a/docker/client.py +++ b/docker/client.py @@ -235,7 +235,7 @@ class Client( else: # Response isn't chunked, meaning we probably # encountered an error immediately - yield self._result(response) + yield self._result(response, json=decode) def _multiplexed_buffer_helper(self, response): """A generator of multiplexed data blocks read from a buffered diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 23fd1913..263cd693 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -22,9 +22,11 @@ import sys import tempfile import threading import time +import io import docker import requests +from requests.packages import urllib3 import six from .. import base @@ -42,7 +44,7 @@ DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS def response(status_code=200, content='', headers=None, reason=None, elapsed=0, - request=None): + request=None, raw=None): res = requests.Response() res.status_code = status_code if not isinstance(content, six.binary_type): @@ -52,6 +54,7 @@ def response(status_code=200, content='', headers=None, reason=None, elapsed=0, res.reason = reason res.elapsed = datetime.timedelta(elapsed) res.request = request + res.raw = raw return res @@ -317,6 +320,43 @@ class DockerApiTest(DockerClientTest): TypeError, self.client.create_host_config, security_opt='wrong' ) + def test_stream_helper_decoding(self): + status_code, content = fake_api.fake_responses[url_prefix + 'events']() + content_str = json.dumps(content) + if six.PY3: + content_str = content_str.encode('utf-8') + body = io.BytesIO(content_str) + + # mock a stream interface + raw_resp = urllib3.HTTPResponse(body=body) + setattr(raw_resp._fp, 'chunked', True) + setattr(raw_resp._fp, 'chunk_left', len(body.getvalue())-1) + + # pass `decode=False` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp)) + self.assertEqual(result, content_str) + + # pass `decode=True` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp, decode=True)) + self.assertEqual(result, content) + + # non-chunked response, pass `decode=False` to the helper + setattr(raw_resp._fp, 'chunked', False) + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp)) + self.assertEqual(result, content_str.decode('utf-8')) + + # non-chunked response, pass `decode=True` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp, decode=True)) + self.assertEqual(result, content) + class StreamTest(base.Cleanup, base.BaseTestCase): def setUp(self): From 7d9bb6d209480ac8a48d1361ea08456542f5f865 Mon Sep 17 00:00:00 2001 From: Aiden Luo Date: Fri, 17 Jun 2016 11:20:39 +0800 Subject: [PATCH 02/83] fix #1094, support PidsLimit in host config Signed-off-by: Aiden Luo --- docker/utils/utils.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 2ef8ef0d..8cf21277 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -620,7 +620,7 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, device_write_bps=None, device_read_iops=None, device_write_iops=None, oom_kill_disable=False, shm_size=None, version=None, tmpfs=None, - oom_score_adj=None): + oom_score_adj=None, pids_limit=None,): host_config = {} @@ -853,6 +853,13 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, raise host_config_version_error('tmpfs', '1.22') host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs) + if pids_limit: + if not isinstance(pids_limit, int): + raise host_config_type_error('pids_limit', pids_limit, 'int') + if version_lt(version, '1.23'): + raise host_config_version_error('pids_limit', '1.23') + host_config["PidsLimit"] = pids_limit + return host_config From 0de366da3de451939ee05ef636506333e4f1ca70 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 24 Jun 2016 15:17:58 -0700 Subject: [PATCH 03/83] Add support for link-local IPs in endpoint config Signed-off-by: Joffrey F --- docker/api/network.py | 5 +++-- docker/utils/utils.py | 10 +++++++++- docs/api.md | 10 ++++++++++ docs/networks.md | 1 + 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/docker/api/network.py b/docker/api/network.py index a35f0a40..34cd8987 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -60,12 +60,13 @@ class NetworkApiMixin(object): @minimum_version('1.21') def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, - aliases=None, links=None): + aliases=None, links=None, + link_local_ips=None): data = { "Container": container, "EndpointConfig": self.create_endpoint_config( aliases=aliases, links=links, ipv4_address=ipv4_address, - ipv6_address=ipv6_address + ipv6_address=ipv6_address, link_local_ips=link_local_ips ), } diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 2ef8ef0d..b38cda47 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -873,7 +873,8 @@ def create_networking_config(endpoints_config=None): def create_endpoint_config(version, aliases=None, links=None, - ipv4_address=None, ipv6_address=None): + ipv4_address=None, ipv6_address=None, + link_local_ips=None): if version_lt(version, '1.22'): raise errors.InvalidVersion( 'Endpoint config is not supported for API version < 1.22' @@ -896,6 +897,13 @@ def create_endpoint_config(version, aliases=None, links=None, if ipam_config: endpoint_config['IPAMConfig'] = ipam_config + if link_local_ips is not None: + if version_lt(version, '1.24'): + raise errors.InvalidVersion( + 'link_local_ips is not supported for API version < 1.24' + ) + endpoint_config['LinkLocalIPs'] = link_local_ips + return endpoint_config diff --git a/docs/api.md b/docs/api.md index 51b6e271..5b8ef22b 100644 --- a/docs/api.md +++ b/docs/api.md @@ -179,6 +179,16 @@ Connect a container to a network. * container (str): container-id/name to be connected to the network * net_id (str): network id +* aliases (list): A list of aliases for this endpoint. Names in that list can + be used within the network to reach the container. Defaults to `None`. +* links (list): A list of links for this endpoint. Containers declared in this + list will be [linked](https://docs.docker.com/engine/userguide/networking/work-with-networks/#linking-containers-in-user-defined-networks) + to this container. Defaults to `None`. +* ipv4_address (str): The IP address of this container on the network, + using the IPv4 protocol. Defaults to `None`. +* ipv6_address (str): The IP address of this container on the network, + using the IPv6 protocol. Defaults to `None`. +* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses. ## copy Identical to the `docker cp` command. Get files/folders from the container. diff --git a/docs/networks.md b/docs/networks.md index ec45e1c5..fb0e9f42 100644 --- a/docs/networks.md +++ b/docs/networks.md @@ -107,6 +107,7 @@ Create an endpoint config dictionary to be used with using the IPv4 protocol. Defaults to `None`. * ipv6_address (str): The IP address of this container on the network, using the IPv6 protocol. Defaults to `None`. +* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses. **Returns** An endpoint config dictionary. From 5480493662df912f13b2d31ee217c425bef003e0 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 27 Jun 2016 18:00:55 -0700 Subject: [PATCH 04/83] signal in Client.kill can be a string containing the signal's name Signed-off-by: Joffrey F --- docker/api/container.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/api/container.py b/docker/api/container.py index b591b173..9cc14dbd 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -187,7 +187,9 @@ class ContainerApiMixin(object): url = self._url("/containers/{0}/kill", container) params = {} if signal is not None: - params['signal'] = int(signal) + if not isinstance(signal, six.string_types): + signal = int(signal) + params['signal'] = signal res = self._post(url, params=params) self._raise_for_status(res) From 1132368be19e39cbf2c3ab0ee073949ebb434815 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 28 Jun 2016 11:05:01 -0700 Subject: [PATCH 05/83] Fix network aliases test with Engine 1.12 Signed-off-by: Joffrey F --- tests/integration/network_test.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py index 26d27a5a..f719fea4 100644 --- a/tests/integration/network_test.py +++ b/tests/integration/network_test.py @@ -138,9 +138,11 @@ class TestNetworks(helpers.BaseTestCase): self.client.connect_container_to_network( container, net_id, aliases=['foo', 'bar']) container_data = self.client.inspect_container(container) - self.assertEqual( - container_data['NetworkSettings']['Networks'][net_name]['Aliases'], - ['foo', 'bar']) + aliases = ( + container_data['NetworkSettings']['Networks'][net_name]['Aliases'] + ) + assert 'foo' in aliases + assert 'bar' in aliases @requires_api_version('1.21') def test_connect_on_container_create(self): @@ -183,10 +185,11 @@ class TestNetworks(helpers.BaseTestCase): self.client.start(container) container_data = self.client.inspect_container(container) - self.assertEqual( - container_data['NetworkSettings']['Networks'][net_name]['Aliases'], - ['foo', 'bar'] + aliases = ( + container_data['NetworkSettings']['Networks'][net_name]['Aliases'] ) + assert 'foo' in aliases + assert 'bar' in aliases @requires_api_version('1.22') def test_create_with_ipv4_address(self): From d96d848bb44a958941043d808efc78a39e27ca33 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 28 Jun 2016 11:55:13 -0700 Subject: [PATCH 06/83] Add integration tests for different types of kill signals Signed-off-by: Joffrey F --- tests/integration/container_test.py | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index 940e5b83..56b648a3 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -840,6 +840,36 @@ class KillTest(helpers.BaseTestCase): self.assertIn('Running', state) self.assertEqual(state['Running'], False, state) + def test_kill_with_signal_name(self): + id = self.client.create_container(BUSYBOX, ['sleep', '60']) + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(id, signal='SIGKILL') + exitcode = self.client.wait(id) + self.assertNotEqual(exitcode, 0) + container_info = self.client.inspect_container(id) + self.assertIn('State', container_info) + state = container_info['State'] + self.assertIn('ExitCode', state) + self.assertNotEqual(state['ExitCode'], 0) + self.assertIn('Running', state) + self.assertEqual(state['Running'], False, state) + + def test_kill_with_signal_integer(self): + id = self.client.create_container(BUSYBOX, ['sleep', '60']) + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(id, signal=9) + exitcode = self.client.wait(id) + self.assertNotEqual(exitcode, 0) + container_info = self.client.inspect_container(id) + self.assertIn('State', container_info) + state = container_info['State'] + self.assertIn('ExitCode', state) + self.assertNotEqual(state['ExitCode'], 0) + self.assertIn('Running', state) + self.assertEqual(state['Running'], False, state) + class PortTest(helpers.BaseTestCase): def test_port(self): From b5d3556bce4190c9092551a441c1725f36176aec Mon Sep 17 00:00:00 2001 From: Srikalyan Swayampakula Date: Wed, 29 Jun 2016 21:00:19 -0700 Subject: [PATCH 07/83] Added support for user namespace. Signed-off-by: Srikalyan Swayampakula --- docker/utils/utils.py | 10 +++++++++- docs/hostconfig.md | 2 ++ tests/unit/utils_test.py | 11 ++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index b38cda47..6d9e7218 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -620,7 +620,7 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, device_write_bps=None, device_read_iops=None, device_write_iops=None, oom_kill_disable=False, shm_size=None, version=None, tmpfs=None, - oom_score_adj=None): + oom_score_adj=None, userns_mode=None): host_config = {} @@ -853,6 +853,14 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, raise host_config_version_error('tmpfs', '1.22') host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs) + if userns_mode: + if version_lt(version, '1.23'): + raise host_config_version_error('userns_mode', '1.23') + + if userns_mode != "host": + raise host_config_value_error("userns_mode", userns_mode) + host_config['UsernsMode'] = userns_mode + return host_config diff --git a/docs/hostconfig.md b/docs/hostconfig.md index c1e23533..e996a75b 100644 --- a/docs/hostconfig.md +++ b/docs/hostconfig.md @@ -123,6 +123,8 @@ for example: for more information. * tmpfs: Temporary filesystems to mouunt. See [Using tmpfs](tmpfs.md) for more information. +* userns_mode: Sets the user namespace mode for the container when user namespace remapping option + is enabled. supported values are: host **Returns** (dict) HostConfig dictionary diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 128778f1..61d87b73 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -98,6 +98,16 @@ class HostConfigTest(base.BaseTestCase): InvalidVersion, lambda: create_host_config(version='1.18.3', oom_kill_disable=True)) + def test_create_host_config_with_userns_mode(self): + config = create_host_config(version='1.23', userns_mode='host') + self.assertEqual(config.get('UsernsMode'), 'host') + self.assertRaises( + InvalidVersion, lambda: create_host_config(version='1.22', + userns_mode='host')) + self.assertRaises( + ValueError, lambda: create_host_config(version='1.23', + userns_mode='host12')) + def test_create_host_config_with_oom_score_adj(self): config = create_host_config(version='1.22', oom_score_adj=100) self.assertEqual(config.get('OomScoreAdj'), 100) @@ -602,7 +612,6 @@ class UtilsTest(base.BaseTestCase): class SplitCommandTest(base.BaseTestCase): - def test_split_command_with_unicode(self): self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ']) From 6d347cd8948d83545379703bbb501d45f203c63b Mon Sep 17 00:00:00 2001 From: Faylixe Date: Fri, 8 Jul 2016 10:41:39 +0200 Subject: [PATCH 08/83] Update api.md Added ``buildargs`` parameter to ``build`` documentation Signed-off-by: Faylixe --- docs/api.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/api.md b/docs/api.md index 5b8ef22b..07d75088 100644 --- a/docs/api.md +++ b/docs/api.md @@ -46,6 +46,8 @@ already, pass a readable file-like object to `fileobj` and also pass `custom_context=True`. If the stream is compressed also, set `encoding` to the correct value (e.g `gzip`). +Build argument can also be pass a a Python dict through ``buildargs`` parameter. + **Params**: * path (str): Path to the directory containing the Dockerfile @@ -65,6 +67,7 @@ correct value (e.g `gzip`). * pull (bool): Downloads any updates to the FROM image in Dockerfiles * forcerm (bool): Always remove intermediate containers, even after unsuccessful builds * dockerfile (str): path within the build context to the Dockerfile +* buildargs (dict): A dictionary of build arguments * container_limits (dict): A dictionary of limits applied to each container created by the build process. Valid keys: - memory (int): set memory limit for build From c8c6f0073ee0d8e3de9e6f3c13e9eb805661949e Mon Sep 17 00:00:00 2001 From: Faylixe Date: Fri, 8 Jul 2016 21:43:23 +0200 Subject: [PATCH 09/83] Removed superfluous sentence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Faylixe Signed-off-by: Félix Voituret --- docs/api.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/api.md b/docs/api.md index 07d75088..4d2fe103 100644 --- a/docs/api.md +++ b/docs/api.md @@ -46,8 +46,6 @@ already, pass a readable file-like object to `fileobj` and also pass `custom_context=True`. If the stream is compressed also, set `encoding` to the correct value (e.g `gzip`). -Build argument can also be pass a a Python dict through ``buildargs`` parameter. - **Params**: * path (str): Path to the directory containing the Dockerfile From 66e7af93532890dcb8e43d8a701ca3b3eae51d4e Mon Sep 17 00:00:00 2001 From: Justin Michalicek Date: Wed, 6 Jul 2016 16:10:16 -0400 Subject: [PATCH 10/83] Pass X-Registry-Auth when building an image * Initialize headers variable in BuildApiMixin.build() as a dict rather than as None. This way the correct object gets passed to _set_auth_headers() even if no headers were set in build() * Changing object from None to {} in BuildApiMixin._set_auth_headers() removed because it changes the object reference, so has no effect on calling code. Signed-off-by: Justin Michalicek --- docker/api/build.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/api/build.py b/docker/api/build.py index 971a50ed..74037167 100644 --- a/docker/api/build.py +++ b/docker/api/build.py @@ -18,7 +18,8 @@ class BuildApiMixin(object): custom_context=False, encoding=None, pull=False, forcerm=False, dockerfile=None, container_limits=None, decode=False, buildargs=None, gzip=False): - remote = context = headers = None + remote = context = None + headers = {} container_limits = container_limits or {} if path is None and fileobj is None: raise TypeError("Either path or fileobj needs to be provided.") @@ -134,8 +135,7 @@ class BuildApiMixin(object): ', '.join(repr(k) for k in self._auth_configs.keys()) ) ) - if headers is None: - headers = {} + if utils.compare_version('1.19', self._version) >= 0: headers['X-Registry-Config'] = auth.encode_header( self._auth_configs From f7807bdb52513dcbdab486a633ad5edeefa09e66 Mon Sep 17 00:00:00 2001 From: Justin Michalicek Date: Thu, 7 Jul 2016 17:20:31 -0400 Subject: [PATCH 11/83] Update build unit tests * Test that the request from build when the client has auth configs contains the correct X-Registry-Config header * Test that BuildApiMixin._set_auth_headers() updates the passed in headers dict with auth data from the client * Test that BuildApiMixin._set_auth_headers() leaves headers dict intact when there is no _auth_config on the client. Signed-off-by: Justin Michalicek --- tests/unit/build_test.py | 61 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/tests/unit/build_test.py b/tests/unit/build_test.py index 414153ed..8bd626bd 100644 --- a/tests/unit/build_test.py +++ b/tests/unit/build_test.py @@ -2,8 +2,9 @@ import gzip import io import docker +from docker import auth -from .api_test import DockerClientTest +from .api_test import DockerClientTest, fake_request, url_prefix class BuildTest(DockerClientTest): @@ -83,8 +84,25 @@ class BuildTest(DockerClientTest): } } + expected_params = {'t': None, 'q': False, 'dockerfile': None, + 'rm': False, 'nocache': False, 'pull': False, + 'forcerm': False, + 'remote': 'https://github.com/docker-library/mongo'} + expected_headers = { + 'X-Registry-Config': auth.encode_header(self.client._auth_configs)} + self.client.build(path='https://github.com/docker-library/mongo') + fake_request.assert_called_with( + 'POST', + url_prefix + 'build', + stream=True, + data=None, + headers=expected_headers, + params=expected_params, + timeout=None + ) + def test_build_container_with_named_dockerfile(self): self.client.build('.', dockerfile='nameddockerfile') @@ -103,3 +121,44 @@ class BuildTest(DockerClientTest): 'foo': 'bar' }) ) + + def test__set_auth_headers_with_empty_dict_and_auth_configs(self): + self.client._auth_configs = { + 'https://example.com': { + 'user': 'example', + 'password': 'example', + 'email': 'example@example.com' + } + } + + headers = {} + expected_headers = { + 'X-Registry-Config': auth.encode_header(self.client._auth_configs)} + self.client._set_auth_headers(headers) + self.assertEqual(headers, expected_headers) + + def test__set_auth_headers_with_dict_and_auth_configs(self): + self.client._auth_configs = { + 'https://example.com': { + 'user': 'example', + 'password': 'example', + 'email': 'example@example.com' + } + } + + headers = {'foo': 'bar'} + expected_headers = { + 'foo': 'bar', + 'X-Registry-Config': auth.encode_header(self.client._auth_configs)} + + self.client._set_auth_headers(headers) + self.assertEqual(headers, expected_headers) + + def test__set_auth_headers_with_dict_and_no_auth_configs(self): + headers = {'foo': 'bar'} + expected_headers = { + 'foo': 'bar' + } + + self.client._set_auth_headers(headers) + self.assertEqual(headers, expected_headers) From e8ea79dfdb7b722801113131bfe90e88c141dc09 Mon Sep 17 00:00:00 2001 From: Justin Michalicek Date: Tue, 12 Jul 2016 10:04:37 -0400 Subject: [PATCH 12/83] Change double underscore in test case names for _set_auth_headers * Change test__set_auth_headers_* methods to test_set_auth_headers_* Signed-off-by: Justin Michalicek --- tests/unit/build_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/build_test.py b/tests/unit/build_test.py index 8bd626bd..b2705eb2 100644 --- a/tests/unit/build_test.py +++ b/tests/unit/build_test.py @@ -122,7 +122,7 @@ class BuildTest(DockerClientTest): }) ) - def test__set_auth_headers_with_empty_dict_and_auth_configs(self): + def test_set_auth_headers_with_empty_dict_and_auth_configs(self): self.client._auth_configs = { 'https://example.com': { 'user': 'example', @@ -137,7 +137,7 @@ class BuildTest(DockerClientTest): self.client._set_auth_headers(headers) self.assertEqual(headers, expected_headers) - def test__set_auth_headers_with_dict_and_auth_configs(self): + def test_set_auth_headers_with_dict_and_auth_configs(self): self.client._auth_configs = { 'https://example.com': { 'user': 'example', @@ -154,7 +154,7 @@ class BuildTest(DockerClientTest): self.client._set_auth_headers(headers) self.assertEqual(headers, expected_headers) - def test__set_auth_headers_with_dict_and_no_auth_configs(self): + def test_set_auth_headers_with_dict_and_no_auth_configs(self): headers = {'foo': 'bar'} expected_headers = { 'foo': 'bar' From 8f8a3d0ed2dd1f1bc3ae68c2af186070dc3007a8 Mon Sep 17 00:00:00 2001 From: Tomas Tomecek Date: Thu, 30 Jun 2016 10:26:30 +0200 Subject: [PATCH 13/83] volumes,create: support adding labels Fixes #1102 Signed-off-by: Tomas Tomecek --- docker/api/volume.py | 13 ++++++++++++- docs/api.md | 11 +++++++++-- tests/unit/fake_api.py | 5 ++++- tests/unit/volume_test.py | 16 ++++++++++++++++ 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/docker/api/volume.py b/docker/api/volume.py index bb8b39b3..afc72cbb 100644 --- a/docker/api/volume.py +++ b/docker/api/volume.py @@ -1,3 +1,4 @@ +from .. import errors from .. import utils @@ -11,7 +12,7 @@ class VolumeApiMixin(object): return self._result(self._get(url, params=params), True) @utils.minimum_version('1.21') - def create_volume(self, name, driver=None, driver_opts=None): + def create_volume(self, name, driver=None, driver_opts=None, labels=None): url = self._url('/volumes/create') if driver_opts is not None and not isinstance(driver_opts, dict): raise TypeError('driver_opts must be a dictionary') @@ -21,6 +22,16 @@ class VolumeApiMixin(object): 'Driver': driver, 'DriverOpts': driver_opts, } + + if labels is not None: + if utils.compare_version('1.23', self._version) < 0: + raise errors.InvalidVersion( + 'volume labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + return self._result(self._post_json(url, data=data), True) @utils.minimum_version('1.21') diff --git a/docs/api.md b/docs/api.md index 4d2fe103..41c5e6cf 100644 --- a/docs/api.md +++ b/docs/api.md @@ -310,6 +310,7 @@ Create and register a named volume * name (str): Name of the volume * driver (str): Name of the driver used to create the volume * driver_opts (dict): Driver options as a key-value dictionary +* labels (dict): Labels to set on the volume **Returns** (dict): The created volume reference object @@ -317,10 +318,16 @@ Create and register a named volume >>> from docker import Client >>> cli = Client() >>> volume = cli.create_volume( - name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'} + name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'}, + labels={"key": "value"} ) >>> print(volume) -{u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'} +{ + u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', + u'Driver': u'local', + u'Name': u'foobar', + u'Labels': {u'key': u'value'} +} ``` ## diff diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py index 99525956..835d73f2 100644 --- a/tests/unit/fake_api.py +++ b/tests/unit/fake_api.py @@ -433,7 +433,10 @@ def get_fake_volume(): response = { 'Name': 'perfectcherryblossom', 'Driver': 'local', - 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom' + 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom', + 'Labels': { + 'com.example.some-label': 'some-value' + } } return status_code, response diff --git a/tests/unit/volume_test.py b/tests/unit/volume_test.py index 5b1823a4..136d11af 100644 --- a/tests/unit/volume_test.py +++ b/tests/unit/volume_test.py @@ -43,6 +43,22 @@ class VolumeTest(DockerClientTest): self.assertEqual(args[0][1], url_prefix + 'volumes/create') self.assertEqual(json.loads(args[1]['data']), {'Name': name}) + @base.requires_api_version('1.23') + def test_create_volume_with_labels(self): + name = 'perfectcherryblossom' + result = self.client.create_volume(name, labels={ + 'com.example.some-label': 'some-value'}) + self.assertEqual( + result["Labels"], + {'com.example.some-label': 'some-value'} + ) + + @base.requires_api_version('1.23') + def test_create_volume_with_invalid_labels(self): + name = 'perfectcherryblossom' + with pytest.raises(TypeError): + self.client.create_volume(name, labels=1) + @base.requires_api_version('1.21') def test_create_volume_with_driver(self): name = 'perfectcherryblossom' From 6dec639a1ada87f924bc5230718386b8a8a98206 Mon Sep 17 00:00:00 2001 From: David Gageot Date: Fri, 8 Jul 2016 12:11:40 +0200 Subject: [PATCH 14/83] Add hijack hints for attach api calls Signed-off-by: David Gageot --- docker/api/container.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docker/api/container.py b/docker/api/container.py index 9cc14dbd..eec25802 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -15,10 +15,16 @@ class ContainerApiMixin(object): 'logs': logs and 1 or 0, 'stdout': stdout and 1 or 0, 'stderr': stderr and 1 or 0, - 'stream': stream and 1 or 0, + 'stream': stream and 1 or 0 } + + headers = { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + u = self._url("/containers/{0}/attach", container) - response = self._post(u, params=params, stream=stream) + response = self._post(u, headers=headers, params=params, stream=stream) return self._get_result(container, stream, response) From 5464cf2bea9f232923c74cec17ef59b43b3613ef Mon Sep 17 00:00:00 2001 From: David Gageot Date: Fri, 8 Jul 2016 12:12:23 +0200 Subject: [PATCH 15/83] Add hijack hints for non-detached exec api calls Signed-off-by: David Gageot --- docker/api/exec_api.py | 10 +++++++++- tests/unit/exec_test.py | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py index f0e4afa6..ad2cd331 100644 --- a/docker/api/exec_api.py +++ b/docker/api/exec_api.py @@ -66,8 +66,16 @@ class ExecApiMixin(object): 'Detach': detach } + headers = {} if detach else { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + res = self._post_json( - self._url('/exec/{0}/start', exec_id), data=data, stream=stream + self._url('/exec/{0}/start', exec_id), + headers=headers, + data=data, + stream=stream ) if socket: diff --git a/tests/unit/exec_test.py b/tests/unit/exec_test.py index 3007799c..6ba2a3dd 100644 --- a/tests/unit/exec_test.py +++ b/tests/unit/exec_test.py @@ -51,8 +51,36 @@ class ExecTest(DockerClientTest): } ) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) + self.assertEqual( + args[1]['headers'], { + 'Content-Type': 'application/json', + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + ) + + def test_exec_start_detached(self): + self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True) + + args = fake_request.call_args + self.assertEqual( + args[0][1], url_prefix + 'exec/{0}/start'.format( + fake_api.FAKE_EXEC_ID + ) + ) + + self.assertEqual( + json.loads(args[1]['data']), { + 'Tty': False, + 'Detach': True + } + ) + + self.assertEqual( + args[1]['headers'], { + 'Content-Type': 'application/json' + } + ) def test_exec_inspect(self): self.client.exec_inspect(fake_api.FAKE_EXEC_ID) From 76ed9c37cdd532a0efa0b07b2f23d024dd8a3ab4 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 8 Jul 2016 15:58:50 -0700 Subject: [PATCH 16/83] Read from socket after sending TCP upgrade headers. Signed-off-by: Joffrey F --- docker/api/container.py | 15 ++++++++++--- docker/api/exec_api.py | 6 ++--- docker/client.py | 50 +++++++++++++++++++++++++++++++++++++++++ tests/helpers.py | 4 ++-- 4 files changed, 66 insertions(+), 9 deletions(-) diff --git a/docker/api/container.py b/docker/api/container.py index eec25802..b8507d85 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -26,7 +26,7 @@ class ContainerApiMixin(object): u = self._url("/containers/{0}/attach", container) response = self._post(u, headers=headers, params=params, stream=stream) - return self._get_result(container, stream, response) + return self._read_from_socket(response, stream) @utils.check_resource def attach_socket(self, container, params=None, ws=False): @@ -40,9 +40,18 @@ class ContainerApiMixin(object): if ws: return self._attach_websocket(container, params) + headers = { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + u = self._url("/containers/{0}/attach", container) - return self._get_raw_response_socket(self.post( - u, None, params=self._attach_params(params), stream=True)) + return self._get_raw_response_socket( + self.post( + u, None, params=self._attach_params(params), stream=True, + headers=headers + ) + ) @utils.check_resource def commit(self, container, repository=None, tag=None, message=None, diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py index ad2cd331..6e499960 100644 --- a/docker/api/exec_api.py +++ b/docker/api/exec_api.py @@ -56,8 +56,6 @@ class ExecApiMixin(object): def exec_start(self, exec_id, detach=False, tty=False, stream=False, socket=False): # we want opened socket if socket == True - if socket: - stream = True if isinstance(exec_id, dict): exec_id = exec_id.get('Id') @@ -75,9 +73,9 @@ class ExecApiMixin(object): self._url('/exec/{0}/start', exec_id), headers=headers, data=data, - stream=stream + stream=True ) if socket: return self._get_raw_response_socket(res) - return self._get_result_tty(stream, res, tty) + return self._read_from_socket(res, stream) diff --git a/docker/client.py b/docker/client.py index b96a78ce..dbbfb06c 100644 --- a/docker/client.py +++ b/docker/client.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import errno import json +import os +import select import struct import requests @@ -305,6 +308,53 @@ class Client( for out in response.iter_content(chunk_size=1, decode_unicode=True): yield out + def _read_from_socket(self, response, stream): + def read_socket(socket, n=4096): + recoverable_errors = ( + errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK + ) + + # wait for data to become available + select.select([socket], [], []) + + try: + if hasattr(socket, 'recv'): + return socket.recv(n) + return os.read(socket.fileno(), n) + except EnvironmentError as e: + if e.errno not in recoverable_errors: + raise + + def next_packet_size(socket): + data = six.binary_type() + while len(data) < 8: + next_data = read_socket(socket, 8 - len(data)) + if not next_data: + return 0 + data = data + next_data + + if data is None: + return 0 + + if len(data) == 8: + _, actual = struct.unpack('>BxxxL', data) + return actual + + def read_loop(socket): + n = next_packet_size(socket) + while n > 0: + yield read_socket(socket, n) + n = next_packet_size(socket) + + socket = self._get_raw_response_socket(response) + if stream: + return read_loop(socket) + else: + data = six.binary_type() + for d in read_loop(socket): + data += d + return data + def _disable_socket_timeout(self, socket): """ Depending on the combination of python version and whether we're connecting over http or https, we might need to access _sock, which diff --git a/tests/helpers.py b/tests/helpers.py index 21036ace..70be803c 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -54,7 +54,7 @@ def exec_driver_is_native(): c = docker_client() EXEC_DRIVER = c.info()['ExecutionDriver'] c.close() - return EXEC_DRIVER.startswith('native') + return EXEC_DRIVER.startswith('native') or EXEC_DRIVER == '' def docker_client(**kwargs): @@ -105,7 +105,7 @@ def read_data(socket, packet_size): while len(data) < packet_size: next_data = read_socket(socket, packet_size - len(data)) if not next_data: - assert False, "Failed trying to read in the dataz" + assert False, "Failed trying to read in the data" data += next_data return data From e64ba8f2b96ad1bbdaed8a65e538b5ab6129f0ba Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 8 Jul 2016 16:21:35 -0700 Subject: [PATCH 17/83] Mock read_from_socket method Signed-off-by: Joffrey F --- tests/unit/api_test.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 23fd1913..34bf14f6 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -93,6 +93,10 @@ def fake_put(self, url, *args, **kwargs): def fake_delete(self, url, *args, **kwargs): return fake_request('DELETE', url, *args, **kwargs) + +def fake_read_from_socket(self, response, stream): + return six.binary_type() + url_base = 'http+docker://localunixsocket/' url_prefix = '{0}v{1}/'.format( url_base, @@ -103,7 +107,8 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase): def setUp(self): self.patcher = mock.patch.multiple( 'docker.Client', get=fake_get, post=fake_post, put=fake_put, - delete=fake_delete + delete=fake_delete, + _read_from_socket=fake_read_from_socket ) self.patcher.start() self.client = docker.Client() From 73f06e3335bc9d2bc5569dc9bdfeeab2a78fcdb8 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Tue, 12 Jul 2016 16:23:13 -0400 Subject: [PATCH 18/83] Move socket-reading test helpers into docker.utils.socket Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 49 +++++++++++++++++++++++++++++ tests/helpers.py | 46 --------------------------- tests/integration/container_test.py | 6 ++-- tests/integration/exec_test.py | 7 +++-- 4 files changed, 58 insertions(+), 50 deletions(-) create mode 100644 docker/utils/socket.py diff --git a/docker/utils/socket.py b/docker/utils/socket.py new file mode 100644 index 00000000..f81d2f5d --- /dev/null +++ b/docker/utils/socket.py @@ -0,0 +1,49 @@ +import errno +import os +import select +import struct + +import six + + +def read_socket(socket, n=4096): + """ Code stolen from dockerpty to read the socket """ + recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) + + # wait for data to become available + select.select([socket], [], []) + + try: + if hasattr(socket, 'recv'): + return socket.recv(n) + return os.read(socket.fileno(), n) + except EnvironmentError as e: + if e.errno not in recoverable_errors: + raise + + +def next_packet_size(socket): + """ Code stolen from dockerpty to get the next packet size """ + data = six.binary_type() + while len(data) < 8: + next_data = read_socket(socket, 8 - len(data)) + if not next_data: + return 0 + data = data + next_data + + if data is None: + return 0 + + if len(data) == 8: + _, actual = struct.unpack('>BxxxL', data) + return actual + + +def read_data(socket, packet_size): + data = six.binary_type() + while len(data) < packet_size: + next_data = read_socket(socket, packet_size - len(data)) + if not next_data: + assert False, "Failed trying to read in the data" + data += next_data + return data diff --git a/tests/helpers.py b/tests/helpers.py index 70be803c..94ea3887 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,9 +1,6 @@ -import errno import os import os.path -import select import shutil -import struct import tarfile import tempfile import unittest @@ -67,49 +64,6 @@ def docker_client_kwargs(**kwargs): return client_kwargs -def read_socket(socket, n=4096): - """ Code stolen from dockerpty to read the socket """ - recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) - - # wait for data to become available - select.select([socket], [], []) - - try: - if hasattr(socket, 'recv'): - return socket.recv(n) - return os.read(socket.fileno(), n) - except EnvironmentError as e: - if e.errno not in recoverable_errors: - raise - - -def next_packet_size(socket): - """ Code stolen from dockerpty to get the next packet size """ - data = six.binary_type() - while len(data) < 8: - next_data = read_socket(socket, 8 - len(data)) - if not next_data: - return 0 - data = data + next_data - - if data is None: - return 0 - - if len(data) == 8: - _, actual = struct.unpack('>BxxxL', data) - return actual - - -def read_data(socket, packet_size): - data = six.binary_type() - while len(data) < packet_size: - next_data = read_socket(socket, packet_size - len(data)) - if not next_data: - assert False, "Failed trying to read in the data" - data += next_data - return data - - class BaseTestCase(unittest.TestCase): tmp_imgs = [] tmp_containers = [] diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index 56b648a3..594aaa32 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -3,6 +3,8 @@ import signal import tempfile import docker +from docker.utils.socket import next_packet_size +from docker.utils.socket import read_data import pytest import six @@ -1025,9 +1027,9 @@ class AttachContainerTest(helpers.BaseTestCase): self.client.start(ident) - next_size = helpers.next_packet_size(pty_stdout) + next_size = next_packet_size(pty_stdout) self.assertEqual(next_size, len(line)) - data = helpers.read_data(pty_stdout, next_size) + data = read_data(pty_stdout, next_size) self.assertEqual(data.decode('utf-8'), line) diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py index 9f548080..d0c8c9bf 100644 --- a/tests/integration/exec_test.py +++ b/tests/integration/exec_test.py @@ -1,5 +1,8 @@ import pytest +from docker.utils.socket import next_packet_size +from docker.utils.socket import read_data + from .. import helpers BUSYBOX = helpers.BUSYBOX @@ -107,9 +110,9 @@ class ExecTest(helpers.BaseTestCase): socket = self.client.exec_start(exec_id, socket=True) self.addCleanup(socket.close) - next_size = helpers.next_packet_size(socket) + next_size = next_packet_size(socket) self.assertEqual(next_size, len(line)) - data = helpers.read_data(socket, next_size) + data = read_data(socket, next_size) self.assertEqual(data.decode('utf-8'), line) def test_exec_inspect(self): From b100666a3c4ce97d9957003a0cd77741058ef752 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 15:45:37 -0400 Subject: [PATCH 19/83] Remove duplicated methods from container.py Signed-off-by: Aanand Prasad --- docker/client.py | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/docker/client.py b/docker/client.py index dbbfb06c..53cc5c29 100644 --- a/docker/client.py +++ b/docker/client.py @@ -32,6 +32,7 @@ from .ssladapter import ssladapter from .tls import TLSConfig from .transport import UnixAdapter from .utils import utils, check_resource, update_headers, kwargs_from_env +from .utils.socket import read_socket, next_packet_size try: from .transport import NpipeAdapter except ImportError: @@ -309,37 +310,6 @@ class Client( yield out def _read_from_socket(self, response, stream): - def read_socket(socket, n=4096): - recoverable_errors = ( - errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK - ) - - # wait for data to become available - select.select([socket], [], []) - - try: - if hasattr(socket, 'recv'): - return socket.recv(n) - return os.read(socket.fileno(), n) - except EnvironmentError as e: - if e.errno not in recoverable_errors: - raise - - def next_packet_size(socket): - data = six.binary_type() - while len(data) < 8: - next_data = read_socket(socket, 8 - len(data)) - if not next_data: - return 0 - data = data + next_data - - if data is None: - return 0 - - if len(data) == 8: - _, actual = struct.unpack('>BxxxL', data) - return actual - def read_loop(socket): n = next_packet_size(socket) while n > 0: From 43158cfe3fd9299c4c47536cefd4d683d627d6a1 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:06:41 -0400 Subject: [PATCH 20/83] Move read_loop() into docker.utils.socket.read_iter() Signed-off-by: Aanand Prasad --- docker/client.py | 16 ++++------------ docker/utils/socket.py | 7 +++++++ 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/docker/client.py b/docker/client.py index 53cc5c29..7df587c7 100644 --- a/docker/client.py +++ b/docker/client.py @@ -32,7 +32,7 @@ from .ssladapter import ssladapter from .tls import TLSConfig from .transport import UnixAdapter from .utils import utils, check_resource, update_headers, kwargs_from_env -from .utils.socket import read_socket, next_packet_size +from .utils.socket import read_socket, next_packet_size, read_iter try: from .transport import NpipeAdapter except ImportError: @@ -310,20 +310,12 @@ class Client( yield out def _read_from_socket(self, response, stream): - def read_loop(socket): - n = next_packet_size(socket) - while n > 0: - yield read_socket(socket, n) - n = next_packet_size(socket) - socket = self._get_raw_response_socket(response) + if stream: - return read_loop(socket) + return read_iter(socket) else: - data = six.binary_type() - for d in read_loop(socket): - data += d - return data + return six.binary_type().join(read_iter(socket)) def _disable_socket_timeout(self, socket): """ Depending on the combination of python version and whether we're diff --git a/docker/utils/socket.py b/docker/utils/socket.py index f81d2f5d..2fb1180d 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -47,3 +47,10 @@ def read_data(socket, packet_size): assert False, "Failed trying to read in the data" data += next_data return data + + +def read_iter(socket): + n = next_packet_size(socket) + while n > 0: + yield read_socket(socket, n) + n = next_packet_size(socket) From 3e2f4a61424c434949a4a080657506ee4eaaa776 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:50:16 -0400 Subject: [PATCH 21/83] Refactors - `read_data()` raises an exception instead of asserting `False` - `next_packet_size()` uses `read_data()` - Renamed `packet_size` arg to `n` for consistency Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 2fb1180d..fbbf1e62 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -6,6 +6,10 @@ import struct import six +class SocketError(Exception): + pass + + def read_socket(socket, n=4096): """ Code stolen from dockerpty to read the socket """ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) @@ -24,27 +28,22 @@ def read_socket(socket, n=4096): def next_packet_size(socket): """ Code stolen from dockerpty to get the next packet size """ - data = six.binary_type() - while len(data) < 8: - next_data = read_socket(socket, 8 - len(data)) - if not next_data: - return 0 - data = data + next_data - if data is None: + try: + data = read_data(socket, 8) + except SocketError: return 0 - if len(data) == 8: - _, actual = struct.unpack('>BxxxL', data) - return actual + _, actual = struct.unpack('>BxxxL', data) + return actual -def read_data(socket, packet_size): +def read_data(socket, n): data = six.binary_type() - while len(data) < packet_size: - next_data = read_socket(socket, packet_size - len(data)) + while len(data) < n: + next_data = read_socket(socket, n - len(data)) if not next_data: - assert False, "Failed trying to read in the data" + raise SocketError("Unexpected EOF") data += next_data return data From ce2b60ecf6aa56de73ee3b8ab6c67ee10905e0ac Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:52:15 -0400 Subject: [PATCH 22/83] Document all socket utility methods Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index fbbf1e62..47b2320f 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -11,7 +11,9 @@ class SocketError(Exception): def read_socket(socket, n=4096): - """ Code stolen from dockerpty to read the socket """ + """ + Reads at most n bytes from socket + """ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) # wait for data to become available @@ -27,8 +29,12 @@ def read_socket(socket, n=4096): def next_packet_size(socket): - """ Code stolen from dockerpty to get the next packet size """ + """ + Returns the size of the next frame of data waiting to be read from socket, + according to the protocol defined here: + https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container + """ try: data = read_data(socket, 8) except SocketError: @@ -39,6 +45,9 @@ def next_packet_size(socket): def read_data(socket, n): + """ + Reads exactly n bytes from socket + """ data = six.binary_type() while len(data) < n: next_data = read_socket(socket, n - len(data)) @@ -49,6 +58,9 @@ def read_data(socket, n): def read_iter(socket): + """ + Returns a generator of frames read from socket + """ n = next_packet_size(socket) while n > 0: yield read_socket(socket, n) From 472a7ffce8032a92dbcc264b9178a3e92695f8ae Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:52:58 -0400 Subject: [PATCH 23/83] Remove unused imports Signed-off-by: Aanand Prasad --- docker/client.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docker/client.py b/docker/client.py index 7df587c7..e131829f 100644 --- a/docker/client.py +++ b/docker/client.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import errno import json -import os -import select import struct import requests @@ -32,7 +29,7 @@ from .ssladapter import ssladapter from .tls import TLSConfig from .transport import UnixAdapter from .utils import utils, check_resource, update_headers, kwargs_from_env -from .utils.socket import read_socket, next_packet_size, read_iter +from .utils.socket import read_iter try: from .transport import NpipeAdapter except ImportError: From 456bfa1c1d2bbca68eb91343d210f55f645c5a33 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:53:41 -0400 Subject: [PATCH 24/83] Reorder socket.py methods Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 47b2320f..0174d5f5 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -28,6 +28,19 @@ def read_socket(socket, n=4096): raise +def read_data(socket, n): + """ + Reads exactly n bytes from socket + """ + data = six.binary_type() + while len(data) < n: + next_data = read_socket(socket, n - len(data)) + if not next_data: + raise SocketError("Unexpected EOF") + data += next_data + return data + + def next_packet_size(socket): """ Returns the size of the next frame of data waiting to be read from socket, @@ -44,19 +57,6 @@ def next_packet_size(socket): return actual -def read_data(socket, n): - """ - Reads exactly n bytes from socket - """ - data = six.binary_type() - while len(data) < n: - next_data = read_socket(socket, n - len(data)) - if not next_data: - raise SocketError("Unexpected EOF") - data += next_data - return data - - def read_iter(socket): """ Returns a generator of frames read from socket From 9fb2caecb9e58b287fc56f84a8135848e30e1e01 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 16:54:37 -0400 Subject: [PATCH 25/83] Rename next_packet_size to next_frame_size Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 6 +++--- tests/integration/container_test.py | 4 ++-- tests/integration/exec_test.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 0174d5f5..fb099b3e 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -41,7 +41,7 @@ def read_data(socket, n): return data -def next_packet_size(socket): +def next_frame_size(socket): """ Returns the size of the next frame of data waiting to be read from socket, according to the protocol defined here: @@ -61,7 +61,7 @@ def read_iter(socket): """ Returns a generator of frames read from socket """ - n = next_packet_size(socket) + n = next_frame_size(socket) while n > 0: yield read_socket(socket, n) - n = next_packet_size(socket) + n = next_frame_size(socket) diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index 594aaa32..b2f0e511 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -3,7 +3,7 @@ import signal import tempfile import docker -from docker.utils.socket import next_packet_size +from docker.utils.socket import next_frame_size from docker.utils.socket import read_data import pytest import six @@ -1027,7 +1027,7 @@ class AttachContainerTest(helpers.BaseTestCase): self.client.start(ident) - next_size = next_packet_size(pty_stdout) + next_size = next_frame_size(pty_stdout) self.assertEqual(next_size, len(line)) data = read_data(pty_stdout, next_size) self.assertEqual(data.decode('utf-8'), line) diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py index d0c8c9bf..2debe306 100644 --- a/tests/integration/exec_test.py +++ b/tests/integration/exec_test.py @@ -1,6 +1,6 @@ import pytest -from docker.utils.socket import next_packet_size +from docker.utils.socket import next_frame_size from docker.utils.socket import read_data from .. import helpers @@ -110,7 +110,7 @@ class ExecTest(helpers.BaseTestCase): socket = self.client.exec_start(exec_id, socket=True) self.addCleanup(socket.close) - next_size = next_packet_size(socket) + next_size = next_frame_size(socket) self.assertEqual(next_size, len(line)) data = read_data(socket, next_size) self.assertEqual(data.decode('utf-8'), line) From 69832627f89bfc7810aebbe6560a1d8f8d19feb4 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 17:41:59 -0400 Subject: [PATCH 26/83] Rename read_iter() to frames_iter() This makes it more clearly high-level and distinct from the raw data-reading functions Signed-off-by: Aanand Prasad --- docker/client.py | 6 +++--- docker/utils/socket.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/client.py b/docker/client.py index e131829f..6ca9e57a 100644 --- a/docker/client.py +++ b/docker/client.py @@ -29,7 +29,7 @@ from .ssladapter import ssladapter from .tls import TLSConfig from .transport import UnixAdapter from .utils import utils, check_resource, update_headers, kwargs_from_env -from .utils.socket import read_iter +from .utils.socket import frames_iter try: from .transport import NpipeAdapter except ImportError: @@ -310,9 +310,9 @@ class Client( socket = self._get_raw_response_socket(response) if stream: - return read_iter(socket) + return frames_iter(socket) else: - return six.binary_type().join(read_iter(socket)) + return six.binary_type().join(frames_iter(socket)) def _disable_socket_timeout(self, socket): """ Depending on the combination of python version and whether we're diff --git a/docker/utils/socket.py b/docker/utils/socket.py index fb099b3e..610271de 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -57,7 +57,7 @@ def next_frame_size(socket): return actual -def read_iter(socket): +def frames_iter(socket): """ Returns a generator of frames read from socket """ From 267021e4535606165887760b88e75ad79542ae99 Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 13 Jul 2016 18:58:57 -0400 Subject: [PATCH 27/83] Rename read methods for clarity read_socket() is now just read(), because its behaviour is consistent with `os.read` et al. Signed-off-by: Aanand Prasad --- docker/utils/socket.py | 11 ++++++----- tests/integration/container_test.py | 4 ++-- tests/integration/exec_test.py | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 610271de..ed343507 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -10,7 +10,7 @@ class SocketError(Exception): pass -def read_socket(socket, n=4096): +def read(socket, n=4096): """ Reads at most n bytes from socket """ @@ -28,13 +28,14 @@ def read_socket(socket, n=4096): raise -def read_data(socket, n): +def read_exactly(socket, n): """ Reads exactly n bytes from socket + Raises SocketError if there isn't enough data """ data = six.binary_type() while len(data) < n: - next_data = read_socket(socket, n - len(data)) + next_data = read(socket, n - len(data)) if not next_data: raise SocketError("Unexpected EOF") data += next_data @@ -49,7 +50,7 @@ def next_frame_size(socket): https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container """ try: - data = read_data(socket, 8) + data = read_exactly(socket, 8) except SocketError: return 0 @@ -63,5 +64,5 @@ def frames_iter(socket): """ n = next_frame_size(socket) while n > 0: - yield read_socket(socket, n) + yield read(socket, n) n = next_frame_size(socket) diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index b2f0e511..61b33983 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -4,7 +4,7 @@ import tempfile import docker from docker.utils.socket import next_frame_size -from docker.utils.socket import read_data +from docker.utils.socket import read_exactly import pytest import six @@ -1029,7 +1029,7 @@ class AttachContainerTest(helpers.BaseTestCase): next_size = next_frame_size(pty_stdout) self.assertEqual(next_size, len(line)) - data = read_data(pty_stdout, next_size) + data = read_exactly(pty_stdout, next_size) self.assertEqual(data.decode('utf-8'), line) diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py index 2debe306..8bf2762a 100644 --- a/tests/integration/exec_test.py +++ b/tests/integration/exec_test.py @@ -1,7 +1,7 @@ import pytest from docker.utils.socket import next_frame_size -from docker.utils.socket import read_data +from docker.utils.socket import read_exactly from .. import helpers @@ -112,7 +112,7 @@ class ExecTest(helpers.BaseTestCase): next_size = next_frame_size(socket) self.assertEqual(next_size, len(line)) - data = read_data(socket, next_size) + data = read_exactly(socket, next_size) self.assertEqual(data.decode('utf-8'), line) def test_exec_inspect(self): From bd73225e14265dae4e2de1b15ad4a0c7fbc3e5ba Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Fri, 8 Jul 2016 13:50:50 +0100 Subject: [PATCH 28/83] Set custom user agent on client Signed-off-by: Ben Firshman --- docker/client.py | 4 +++- docker/constants.py | 3 +++ docs/api.md | 1 + tests/unit/api_test.py | 30 ++++++++++++++++++++++++++++++ 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/docker/client.py b/docker/client.py index 6ca9e57a..c3e5874e 100644 --- a/docker/client.py +++ b/docker/client.py @@ -50,7 +50,8 @@ class Client( api.VolumeApiMixin, api.NetworkApiMixin): def __init__(self, base_url=None, version=None, - timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False): + timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False, + user_agent=constants.DEFAULT_USER_AGENT): super(Client, self).__init__() if tls and not base_url: @@ -60,6 +61,7 @@ class Client( self.base_url = base_url self.timeout = timeout + self.headers['User-Agent'] = user_agent self._auth_configs = auth.load_config() diff --git a/docker/constants.py b/docker/constants.py index 0388f705..904d50ea 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -1,4 +1,5 @@ import sys +from .version import version DEFAULT_DOCKER_API_VERSION = '1.22' DEFAULT_TIMEOUT_SECONDS = 60 @@ -12,3 +13,5 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \ 'is deprecated and non-functional. Please remove it.' IS_WINDOWS_PLATFORM = (sys.platform == 'win32') + +DEFAULT_USER_AGENT = "docker-py/{0}".format(version) diff --git a/docs/api.md b/docs/api.md index 41c5e6cf..e058deb7 100644 --- a/docs/api.md +++ b/docs/api.md @@ -16,6 +16,7 @@ is hosted. to use the API version provided by the server. * timeout (int): The HTTP request timeout, in seconds. * tls (bool or [TLSConfig](tls.md#TLSConfig)): Equivalent CLI options: `docker --tls ...` +* user_agent (str): Set a custom user agent for requests to the server. **** diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 34bf14f6..696c0739 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -420,3 +420,33 @@ class StreamTest(base.Cleanup, base.BaseTestCase): self.assertEqual(list(stream), [ str(i).encode() for i in range(50)]) + + +class UserAgentTest(base.BaseTestCase): + def setUp(self): + self.patcher = mock.patch.object( + docker.Client, + 'send', + return_value=fake_resp("GET", "%s/version" % fake_api.prefix) + ) + self.mock_send = self.patcher.start() + + def tearDown(self): + self.patcher.stop() + + def test_default_user_agent(self): + client = docker.Client() + client.version() + + self.assertEqual(self.mock_send.call_count, 1) + headers = self.mock_send.call_args[0][0].headers + expected = 'docker-py/%s' % docker.__version__ + self.assertEqual(headers['User-Agent'], expected) + + def test_custom_user_agent(self): + client = docker.Client(user_agent='foo/bar') + client.version() + + self.assertEqual(self.mock_send.call_count, 1) + headers = self.mock_send.call_args[0][0].headers + self.assertEqual(headers['User-Agent'], 'foo/bar') From 9b63bed6a0b5185b043e85df8c49d86d2c048aa1 Mon Sep 17 00:00:00 2001 From: Keerthan Reddy Mala Date: Thu, 14 Jul 2016 22:43:33 -0600 Subject: [PATCH 29/83] Add optional auth config to docker push Signed-off-by: Keerthan Reddy Mala --- docker/api/image.py | 27 +++++++++++++++++---------- docs/api.md | 2 ++ 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/docker/api/image.py b/docker/api/image.py index 3e66347e..2bdbce83 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -205,7 +205,7 @@ class ImageApiMixin(object): return self._result(response) def push(self, repository, tag=None, stream=False, - insecure_registry=False, decode=False): + insecure_registry=False, auth_config=None, decode=False): if insecure_registry: warnings.warn( INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'), @@ -224,15 +224,22 @@ class ImageApiMixin(object): if utils.compare_version('1.5', self._version) >= 0: # If we don't have any auth data so far, try reloading the config # file one more time in case anything showed up in there. - if not self._auth_configs: - self._auth_configs = auth.load_config() - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - - # Do not fail here if no authentication exists for this specific - # registry as we can have a readonly pull. Just put the header if - # we can. - if authcfg: - headers['X-Registry-Auth'] = auth.encode_header(authcfg) + if auth_config is None: + log.debug('Looking for auth config') + if not self._auth_configs: + log.debug( + "No auth config in memory - loading from filesystem" + ) + self._auth_configs = auth.load_config() + authcfg = auth.resolve_authconfig(self._auth_configs, registry) + # Do not fail here if no authentication exists for this + # specific registry as we can have a readonly pull. Just + # put the header if we can. + if authcfg: + headers['X-Registry-Auth'] = auth.encode_header(authcfg) + else: + log.debug('Sending supplied auth config') + headers['X-Registry-Auth'] = auth.encode_header(auth_config) response = self._post_json( u, None, headers=headers, stream=stream, params=params diff --git a/docs/api.md b/docs/api.md index e058deb7..9b3a7265 100644 --- a/docs/api.md +++ b/docs/api.md @@ -801,6 +801,8 @@ command. * tag (str): An optional tag to push * stream (bool): Stream the output as a blocking generator * insecure_registry (bool): Use `http://` to connect to the registry +* auth_config (dict): Override the credentials that Client.login has set for this request + `auth_config` should contain the `username` and `password` keys to be valid. **Returns** (generator or str): The output of the upload From 1294d3c4103fc33949edc146be9bc91fd1a05c4d Mon Sep 17 00:00:00 2001 From: Keerthan Reddy Mala Date: Thu, 21 Jul 2016 11:01:03 -0600 Subject: [PATCH 30/83] Add unit tests Signed-off-by: Keerthan Reddy Mala --- tests/unit/image_test.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/unit/image_test.py b/tests/unit/image_test.py index 8fd894cc..b2b1dd6d 100644 --- a/tests/unit/image_test.py +++ b/tests/unit/image_test.py @@ -2,6 +2,7 @@ import docker import pytest from . import fake_api +from docker import auth from .api_test import ( DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix, fake_resolve_authconfig @@ -262,6 +263,31 @@ class ImageTest(DockerClientTest): timeout=DEFAULT_TIMEOUT_SECONDS ) + def test_push_image_with_auth(self): + auth_config = { + 'username': "test_user", + 'password': "test_password", + 'serveraddress': "test_server", + } + encoded_auth = auth.encode_header(auth_config) + self.client.push( + fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME, + auth_config=auth_config + ) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'images/test_image/push', + params={ + 'tag': fake_api.FAKE_TAG_NAME, + }, + data='{}', + headers={'Content-Type': 'application/json', + 'X-Registry-Auth': encoded_auth}, + stream=False, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + def test_push_image_stream(self): with mock.patch('docker.auth.auth.resolve_authconfig', fake_resolve_authconfig): From cea73760863182035ddbf1c336b388df283c7431 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 25 Jul 2016 15:04:04 -0700 Subject: [PATCH 31/83] Send LinkLocalIPs as part of IPAMConfig dictionary Signed-off-by: Joffrey F --- docker/utils/utils.py | 8 ++++---- tests/integration/network_test.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index b38cda47..4e48fc7e 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -894,15 +894,15 @@ def create_endpoint_config(version, aliases=None, links=None, if ipv6_address: ipam_config['IPv6Address'] = ipv6_address - if ipam_config: - endpoint_config['IPAMConfig'] = ipam_config - if link_local_ips is not None: if version_lt(version, '1.24'): raise errors.InvalidVersion( 'link_local_ips is not supported for API version < 1.24' ) - endpoint_config['LinkLocalIPs'] = link_local_ips + ipam_config['LinkLocalIPs'] = link_local_ips + + if ipam_config: + endpoint_config['IPAMConfig'] = ipam_config return endpoint_config diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py index f719fea4..27e1b14d 100644 --- a/tests/integration/network_test.py +++ b/tests/integration/network_test.py @@ -249,6 +249,27 @@ class TestNetworks(helpers.BaseTestCase): '2001:389::f00d' ) + @requires_api_version('1.24') + def test_create_with_linklocal_ips(self): + container = self.client.create_container( + 'busybox', 'top', + networking_config=self.client.create_networking_config( + { + 'bridge': self.client.create_endpoint_config( + link_local_ips=['169.254.8.8'] + ) + } + ), + host_config=self.client.create_host_config(network_mode='bridge') + ) + self.tmp_containers.append(container) + self.client.start(container) + container_data = self.client.inspect_container(container) + net_cfg = container_data['NetworkSettings']['Networks']['bridge'] + assert 'IPAMConfig' in net_cfg + assert 'LinkLocalIPs' in net_cfg['IPAMConfig'] + assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8'] + @requires_api_version('1.22') def test_create_with_links(self): net_name, net_id = self.create_network() From 0e68b0a42989fa8f072acde2f947055ae7d64d7a Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Mon, 25 Jul 2016 14:08:25 +0100 Subject: [PATCH 32/83] Default to npipe address on Windows Signed-off-by: Aanand Prasad --- docker/utils/utils.py | 4 +++- tests/unit/utils_test.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 4e48fc7e..4d218692 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -36,6 +36,8 @@ from .types import Ulimit, LogConfig DEFAULT_HTTP_HOST = "127.0.0.1" DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock" +DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' + BYTE_UNITS = { 'b': 1, 'k': 1024, @@ -390,7 +392,7 @@ def parse_host(addr, is_win32=False, tls=False): path = '' if not addr and is_win32: - addr = '{0}:{1}'.format(DEFAULT_HTTP_HOST, 2375) + addr = DEFAULT_NPIPE if not addr or addr.strip() == 'unix://': return DEFAULT_UNIX_SOCKET diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 128778f1..68484fe5 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -419,11 +419,11 @@ class ParseHostTest(base.BaseTestCase): def test_parse_host_empty_value(self): unix_socket = 'http+unix://var/run/docker.sock' - tcp_port = 'http://127.0.0.1:2375' + npipe = 'npipe:////./pipe/docker_engine' for val in [None, '']: assert parse_host(val, is_win32=False) == unix_socket - assert parse_host(val, is_win32=True) == tcp_port + assert parse_host(val, is_win32=True) == npipe def test_parse_host_tls(self): host_value = 'myhost.docker.net:3348' From 2d3bda84de39a75e560fc79512143d43e5d61226 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 26 Jul 2016 15:48:29 -0700 Subject: [PATCH 33/83] dev version Signed-off-by: Joffrey F --- docker/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/version.py b/docker/version.py index 95405c74..dea7b7cb 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "1.9.0" +version = "1.10.0-dev" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) From 723d144db528ff8defce7c6172ab11a4aa67f54c Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 27 Jul 2016 18:42:14 -0700 Subject: [PATCH 34/83] Add support for IPv6 docker host connections. Signed-off-by: Joffrey F --- docker/utils/utils.py | 38 ++++++++++++++++++-------------------- tests/unit/utils_test.py | 10 +++++++++- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 4d218692..1cfc8acc 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -22,8 +22,8 @@ import tarfile import tempfile import warnings from distutils.version import StrictVersion -from fnmatch import fnmatch from datetime import datetime +from fnmatch import fnmatch import requests import six @@ -33,6 +33,10 @@ from .. import errors from .. import tls from .types import Ulimit, LogConfig +if six.PY2: + from urllib import splitnport +else: + from urllib.parse import splitnport DEFAULT_HTTP_HOST = "127.0.0.1" DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock" @@ -387,7 +391,6 @@ def parse_repository_tag(repo_name): # Protocol translation: tcp -> http, unix -> http+unix def parse_host(addr, is_win32=False, tls=False): proto = "http+unix" - host = DEFAULT_HTTP_HOST port = None path = '' @@ -427,32 +430,27 @@ def parse_host(addr, is_win32=False, tls=False): ) proto = "https" if tls else "http" - if proto != "http+unix" and ":" in addr: - host_parts = addr.split(':') - if len(host_parts) != 2: - raise errors.DockerException( - "Invalid bind address format: {0}".format(addr) - ) - if host_parts[0]: - host = host_parts[0] + if proto in ("http", "https"): + address_parts = addr.split('/', 1) + host = address_parts[0] + if len(address_parts) == 2: + path = '/' + address_parts[1] + host, port = splitnport(host) - port = host_parts[1] - if '/' in port: - port, path = port.split('/', 1) - path = '/{0}'.format(path) - try: - port = int(port) - except Exception: + if port is None: raise errors.DockerException( "Invalid port: {0}".format(addr) ) - elif proto in ("http", "https") and ':' not in addr: - raise errors.DockerException( - "Bind address needs a port: {0}".format(addr)) + if not host: + host = DEFAULT_HTTP_HOST else: host = addr + if proto in ("http", "https") and port == -1: + raise errors.DockerException( + "Bind address needs a port: {0}".format(addr)) + if proto == "http+unix" or proto == 'npipe': return "{0}://{1}".format(proto, host) return "{0}://{1}:{2}{3}".format(proto, host, port, path) diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 68484fe5..0f7a58c9 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -404,10 +404,18 @@ class ParseHostTest(base.BaseTestCase): 'https://kokia.jp:2375': 'https://kokia.jp:2375', 'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock', 'unix://': 'http+unix://var/run/docker.sock', + '12.234.45.127:2375/docker/engine': ( + 'http://12.234.45.127:2375/docker/engine' + ), 'somehost.net:80/service/swarm': ( 'http://somehost.net:80/service/swarm' ), 'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine', + '[fd12::82d1]:2375': 'http://[fd12::82d1]:2375', + 'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090', + '[fd12::82d1]:2375/docker/engine': ( + 'http://[fd12::82d1]:2375/docker/engine' + ), } for host in invalid_hosts: @@ -415,7 +423,7 @@ class ParseHostTest(base.BaseTestCase): parse_host(host, None) for host, expected in valid_hosts.items(): - self.assertEqual(parse_host(host, None), expected, msg=host) + assert parse_host(host, None) == expected def test_parse_host_empty_value(self): unix_socket = 'http+unix://var/run/docker.sock' From f006da6a43e39a1126ccca1651e9ff4abda41aaa Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Mon, 1 Aug 2016 13:51:58 +0100 Subject: [PATCH 35/83] More explicit debug for config path logic Signed-off-by: Aanand Prasad --- docker/auth/auth.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/docker/auth/auth.py b/docker/auth/auth.py index d23e6f3c..b61a8d09 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -160,18 +160,24 @@ def find_config_file(config_path=None): os.path.basename(DOCKER_CONFIG_FILENAME) ) if os.environ.get('DOCKER_CONFIG') else None - paths = [ + paths = filter(None, [ config_path, # 1 environment_path, # 2 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3 os.path.join( os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME ) # 4 - ] + ]) + + log.debug("Trying paths: {0}".format(repr(paths))) for path in paths: - if path and os.path.exists(path): + if os.path.exists(path): + log.debug("Found file at path: {0}".format(path)) return path + + log.debug("No config file found") + return None @@ -186,7 +192,6 @@ def load_config(config_path=None): config_file = find_config_file(config_path) if not config_file: - log.debug("File doesn't exist") return {} try: From f8b843b127a99dc329b9da7da4bedc050be36ebf Mon Sep 17 00:00:00 2001 From: Tristan Escalada Date: Thu, 26 May 2016 21:56:34 -0400 Subject: [PATCH 36/83] 1059-Fixing a bug with multiple json objects This splits the text by CRLF and then json.loads each part independently instead of attempting the parse the whole string. Signed-off-by: Tristan Escalada --- docker/client.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docker/client.py b/docker/client.py index c3e5874e..de67dbe4 100644 --- a/docker/client.py +++ b/docker/client.py @@ -251,8 +251,16 @@ class Client( if decode: if six.PY3: data = data.decode('utf-8') - data = json.loads(data) - yield data + # remove the trailing newline + data = data.strip() + # split the data at any newlines + data_list = data.split("\r\n") + # load and yield each line seperately + for data in data_list: + data = json.loads(data) + yield data + else: + yield data else: # Response isn't chunked, meaning we probably # encountered an error immediately From dec29e1c10be3fbba239891e3bf47dc6b40ee567 Mon Sep 17 00:00:00 2001 From: Jari Takkala Date: Thu, 28 Jul 2016 22:57:35 -0400 Subject: [PATCH 37/83] Add support for sysctl when creating container Closes #1144 Signed-off-by: Jari Takkala --- docker/utils/utils.py | 9 ++++++++- docs/hostconfig.md | 1 + tests/unit/container_test.py | 27 +++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 1cfc8acc..00a7af14 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -619,7 +619,7 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, blkio_weight_device=None, device_read_bps=None, device_write_bps=None, device_read_iops=None, device_write_iops=None, oom_kill_disable=False, - shm_size=None, version=None, tmpfs=None, + shm_size=None, sysctls=None, version=None, tmpfs=None, oom_score_adj=None): host_config = {} @@ -725,6 +725,13 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, host_config['SecurityOpt'] = security_opt + if sysctls: + if not isinstance(sysctls, dict): + raise host_config_type_error('sysctls', sysctls, 'dict') + host_config['Sysctls'] = {} + for k, v in six.iteritems(sysctls): + host_config['Sysctls'][k] = six.text_type(v) + if volumes_from is not None: if isinstance(volumes_from, six.string_types): volumes_from = volumes_from.split(',') diff --git a/docs/hostconfig.md b/docs/hostconfig.md index c1e23533..01c4625f 100644 --- a/docs/hostconfig.md +++ b/docs/hostconfig.md @@ -123,6 +123,7 @@ for example: for more information. * tmpfs: Temporary filesystems to mouunt. See [Using tmpfs](tmpfs.md) for more information. +* sysctls (dict): Kernel parameters to set in the container. **Returns** (dict) HostConfig dictionary diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index 2a72c179..4c94c844 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -1074,6 +1074,33 @@ class CreateContainerTest(DockerClientTest): DEFAULT_TIMEOUT_SECONDS ) + @requires_api_version('1.24') + def test_create_container_with_sysctl(self): + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config( + sysctls={ + 'net.core.somaxconn': 1024, + 'net.ipv4.tcp_syncookies': '0', + } + ) + ) + + args = fake_request.call_args + self.assertEqual(args[0][1], url_prefix + 'containers/create') + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Sysctls'] = { + 'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0', + } + self.assertEqual(json.loads(args[1]['data']), expected_payload) + self.assertEqual( + args[1]['headers'], {'Content-Type': 'application/json'} + ) + self.assertEqual( + args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS + ) + class ContainerTest(DockerClientTest): def test_list_containers(self): From ae7cb4b99f45ec88616da5c4c04129a78a8c0c46 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 2 Aug 2016 17:25:50 -0700 Subject: [PATCH 38/83] Avoid crashing in update_headers decorator when headers kwarg is None Signed-off-by: Joffrey F --- docker/utils/decorators.py | 2 +- tests/unit/utils_test.py | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py index 7c41a5f8..46c28a80 100644 --- a/docker/utils/decorators.py +++ b/docker/utils/decorators.py @@ -40,7 +40,7 @@ def minimum_version(version): def update_headers(f): def inner(self, *args, **kwargs): if 'HttpHeaders' in self._auth_configs: - if 'headers' not in kwargs: + if not kwargs.get('headers'): kwargs['headers'] = self._auth_configs['HttpHeaders'] else: kwargs['headers'].update(self._auth_configs['HttpHeaders']) diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 0f7a58c9..47ced433 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -20,9 +20,11 @@ from docker.utils import ( create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file, exclude_paths, convert_volume_binds, decode_json_header, tar, split_command, create_ipam_config, create_ipam_pool, parse_devices, + update_headers, ) -from docker.utils.utils import create_endpoint_config + from docker.utils.ports import build_port_bindings, split_port +from docker.utils.utils import create_endpoint_config from .. import base from ..helpers import make_tree @@ -34,6 +36,37 @@ TEST_CERT_DIR = os.path.join( ) +class DecoratorsTest(base.BaseTestCase): + def test_update_headers(self): + sample_headers = { + 'X-Docker-Locale': 'en-US', + } + + def f(self, headers=None): + return headers + + client = Client() + client._auth_configs = {} + + g = update_headers(f) + assert g(client, headers=None) is None + assert g(client, headers={}) == {} + assert g(client, headers={'Content-type': 'application/json'}) == { + 'Content-type': 'application/json', + } + + client._auth_configs = { + 'HttpHeaders': sample_headers + } + + assert g(client, headers=None) == sample_headers + assert g(client, headers={}) == sample_headers + assert g(client, headers={'Content-type': 'application/json'}) == { + 'Content-type': 'application/json', + 'X-Docker-Locale': 'en-US', + } + + class HostConfigTest(base.BaseTestCase): def test_create_host_config_no_options(self): config = create_host_config(version='1.19') From 9d48b4f60323db22c45437892e450bf8e545d3ef Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 3 Aug 2016 16:48:41 -0700 Subject: [PATCH 39/83] Test fixes and updated Makefile for 1.12 testing Signed-off-by: Joffrey F --- Makefile | 23 +++++++++++++++++------ docker/utils/__init__.py | 13 +++++++------ tests/integration/container_test.py | 2 +- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index c8c72130..a635edfa 100644 --- a/Makefile +++ b/Makefile @@ -32,16 +32,27 @@ integration-test-py3: build-py3 integration-dind: build build-py3 docker rm -vf dpy-dind || : - docker run -d --name dpy-dind --privileged dockerswarm/dind:1.10.3 docker daemon -H tcp://0.0.0.0:2375 - docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py py.test tests/integration - docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3 py.test tests/integration + docker run -d --name dpy-dind --privileged dockerswarm/dind:1.12.0 docker daemon\ + -H tcp://0.0.0.0:2375 + docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py\ + py.test tests/integration + docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3\ + py.test tests/integration docker rm -vf dpy-dind integration-dind-ssl: build-dind-certs build build-py3 docker run -d --name dpy-dind-certs dpy-dind-certs - docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl -v /tmp --privileged dockerswarm/dind:1.10.3 docker daemon --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 - docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py py.test tests/integration - docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration + docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\ + --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\ + -v /tmp --privileged dockerswarm/dind:1.12.0 docker daemon --tlsverify\ + --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\ + --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 + docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\ + --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\ + --link=dpy-dind-ssl:docker docker-py py.test tests/integration + docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\ + --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\ + --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration docker rm -vf dpy-dind-ssl dpy-dind-certs flake8: build diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index ccc38191..41df0047 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -1,11 +1,12 @@ +# flake8: noqa from .utils import ( compare_version, convert_port_bindings, convert_volume_binds, mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host, - kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config, - create_container_config, parse_bytes, ping_registry, parse_env_file, - version_lt, version_gte, decode_json_header, split_command, + kwargs_from_env, convert_filters, datetime_to_timestamp, + create_host_config, create_container_config, parse_bytes, ping_registry, + parse_env_file, version_lt, version_gte, decode_json_header, split_command, create_ipam_config, create_ipam_pool, parse_devices, normalize_links, -) # flake8: noqa +) -from .types import Ulimit, LogConfig # flake8: noqa -from .decorators import check_resource, minimum_version, update_headers #flake8: noqa +from .types import Ulimit, LogConfig +from .decorators import check_resource, minimum_version, update_headers diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index 61b33983..f347c12a 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -292,7 +292,7 @@ class CreateContainerTest(helpers.BaseTestCase): ) self.client.start(container) - assert expected_msg in str(excinfo.value) + assert six.b(expected_msg) in excinfo.value.explanation def test_valid_no_log_driver_specified(self): log_config = docker.utils.LogConfig( From 44868fa0faf26a597703bf49114c54fe1a064066 Mon Sep 17 00:00:00 2001 From: minzhang Date: Wed, 29 Jun 2016 23:51:58 -0700 Subject: [PATCH 40/83] Added support for docker swarm api version 1.24. 3 API are added swarm_init() swarm_leave() swarm_join() Signed-off-by: Min Zhang Signed-off-by: Min Zhang --- docker/api/__init__.py | 1 + docker/api/swarm.py | 41 +++++++++++++++++++++++++++++++++++++++++ docker/client.py | 3 ++- docs/swarm.md | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 docker/api/swarm.py create mode 100644 docs/swarm.md diff --git a/docker/api/__init__.py b/docker/api/__init__.py index 9e744289..b0d60878 100644 --- a/docker/api/__init__.py +++ b/docker/api/__init__.py @@ -6,3 +6,4 @@ from .exec_api import ExecApiMixin from .image import ImageApiMixin from .volume import VolumeApiMixin from .network import NetworkApiMixin +from .swarm import SwarmApiMixin diff --git a/docker/api/swarm.py b/docker/api/swarm.py new file mode 100644 index 00000000..1b6f3429 --- /dev/null +++ b/docker/api/swarm.py @@ -0,0 +1,41 @@ +from .. import utils +import logging +log = logging.getLogger(__name__) + + +class SwarmApiMixin(object): + @utils.minimum_version('1.24') + def swarm(self): + url = self._url('/swarm') + return self._result(self._get(url), True) + + @utils.minimum_version('1.24') + def swarm_init(self, listen_addr, force_new_cluster=False, + swarm_opts=None): + url = self._url('/swarm/init') + if swarm_opts is not None and not isinstance(swarm_opts, dict): + raise TypeError('swarm_opts must be a dictionary') + data = { + 'ListenAddr': listen_addr, + 'ForceNewCluster': force_new_cluster, + 'Spec': swarm_opts + } + return self._result(self._post_json(url, data=data), True) + + @utils.minimum_version('1.24') + def swarm_join(self, remote_address, listen_address=None, + secret=None, ca_cert_hash=None, manager=False): + data ={ + "RemoteAddr": remote_address, + "ListenAddr": listen_address, + "Secret": secret, + "CACertHash": ca_cert_hash, + "Manager": manager + } + url = self._url('/swarm/join', ) + return self._result(self._post_json(url, data=data), True) + + @utils.minimum_version('1.24') + def swarm_leave(self): + url = self._url('/swarm/leave') + return self._result(self._post(url)) diff --git a/docker/client.py b/docker/client.py index c3e5874e..1b5420e9 100644 --- a/docker/client.py +++ b/docker/client.py @@ -48,7 +48,8 @@ class Client( api.ExecApiMixin, api.ImageApiMixin, api.VolumeApiMixin, - api.NetworkApiMixin): + api.NetworkApiMixin, + api.SwarmApiMixin): def __init__(self, base_url=None, version=None, timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False, user_agent=constants.DEFAULT_USER_AGENT): diff --git a/docs/swarm.md b/docs/swarm.md new file mode 100644 index 00000000..e3a1cd12 --- /dev/null +++ b/docs/swarm.md @@ -0,0 +1,35 @@ +# Using swarm for API version 1.24 or higher + +Swarm initialization is done in two parts. Provide a listen_addr and `force_new_cluster` (OPTIONAL) to +the `Client().swarm_init()` method, and declare mappings in the +`swarm_opts` section. + +```python +swarm_id = cli.swarm_init(listen_addr="0.0.0.0:4500", +swarm_opts={ + "AcceptancePolicy": { + "Policies": [ + { + "Role": "MANAGER", + "Autoaccept": True + } + ] + } +}) +``` + +Join another swarm, by providing the remote_address, listen_address(optional), +secret(optional), ca_cert_hash(optional, manager(optional) +```python +cli.swarm_join( + remote_address="swarm-master:2377", + manager=True +) +``` + + +Leave swarm + +```python +cli.swarm_leave() +``` From 9fdc8d476dfad2dad20ded9a1a471a225dc398aa Mon Sep 17 00:00:00 2001 From: minzhang Date: Wed, 29 Jun 2016 23:51:58 -0700 Subject: [PATCH 41/83] Added support for docker swarm api version 1.24. 3 API are added swarm_init() swarm_leave() swarm_join() Signed-off-by: Min Zhang Signed-off-by: Min Zhang --- docker/api/swarm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/api/swarm.py b/docker/api/swarm.py index 1b6f3429..be3eae41 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -25,7 +25,7 @@ class SwarmApiMixin(object): @utils.minimum_version('1.24') def swarm_join(self, remote_address, listen_address=None, secret=None, ca_cert_hash=None, manager=False): - data ={ + data = { "RemoteAddr": remote_address, "ListenAddr": listen_address, "Secret": secret, From 07563cfe3f565b57b955455d2ce2b350ed34883b Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 1 Aug 2016 13:59:52 -0700 Subject: [PATCH 42/83] Update swarm methods to include newly added parameters Rename swarm methods to be more explicit Utility methods / types to create swarm spec objects Integration tests Signed-off-by: Joffrey F --- docker/api/swarm.py | 51 ++++++++++++++++++------------ docker/constants.py | 2 +- docker/utils/__init__.py | 5 ++- docker/utils/types.py | 49 +++++++++++++++++++++++++++++ tests/integration/swarm_test.py | 55 +++++++++++++++++++++++++++++++++ 5 files changed, 140 insertions(+), 22 deletions(-) create mode 100644 tests/integration/swarm_test.py diff --git a/docker/api/swarm.py b/docker/api/swarm.py index be3eae41..bc2179cc 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -4,38 +4,49 @@ log = logging.getLogger(__name__) class SwarmApiMixin(object): + + def create_swarm_spec(self, *args, **kwargs): + return utils.SwarmSpec(*args, **kwargs) + @utils.minimum_version('1.24') - def swarm(self): + def init_swarm(self, advertise_addr, listen_addr='0.0.0.0:2377', + force_new_cluster=False, swarm_spec=None): + url = self._url('/swarm/init') + if swarm_spec is not None and not isinstance(swarm_spec, dict): + raise TypeError('swarm_spec must be a dictionary') + data = { + 'AdvertiseAddr': advertise_addr, + 'ListenAddr': listen_addr, + 'ForceNewCluster': force_new_cluster, + 'Spec': swarm_spec, + } + response = self._post_json(url, data=data) + self._raise_for_status(response) + return True + + @utils.minimum_version('1.24') + def inspect_swarm(self): url = self._url('/swarm') return self._result(self._get(url), True) @utils.minimum_version('1.24') - def swarm_init(self, listen_addr, force_new_cluster=False, - swarm_opts=None): - url = self._url('/swarm/init') - if swarm_opts is not None and not isinstance(swarm_opts, dict): - raise TypeError('swarm_opts must be a dictionary') - data = { - 'ListenAddr': listen_addr, - 'ForceNewCluster': force_new_cluster, - 'Spec': swarm_opts - } - return self._result(self._post_json(url, data=data), True) - - @utils.minimum_version('1.24') - def swarm_join(self, remote_address, listen_address=None, + def join_swarm(self, remote_addresses, listen_address=None, secret=None, ca_cert_hash=None, manager=False): data = { - "RemoteAddr": remote_address, + "RemoteAddrs": remote_addresses, "ListenAddr": listen_address, "Secret": secret, "CACertHash": ca_cert_hash, "Manager": manager } - url = self._url('/swarm/join', ) - return self._result(self._post_json(url, data=data), True) + url = self._url('/swarm/join') + response = self._post_json(url, data=data) + self._raise_for_status(response) + return True @utils.minimum_version('1.24') - def swarm_leave(self): + def leave_swarm(self, force=False): url = self._url('/swarm/leave') - return self._result(self._post(url)) + response = self._post(url, params={'force': force}) + self._raise_for_status(response) + return True diff --git a/docker/constants.py b/docker/constants.py index 904d50ea..cf5a39ac 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -1,7 +1,7 @@ import sys from .version import version -DEFAULT_DOCKER_API_VERSION = '1.22' +DEFAULT_DOCKER_API_VERSION = '1.24' DEFAULT_TIMEOUT_SECONDS = 60 STREAM_HEADER_SIZE_BYTES = 8 CONTAINER_LIMITS_KEYS = [ diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index 41df0047..c02adea1 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -8,5 +8,8 @@ from .utils import ( create_ipam_config, create_ipam_pool, parse_devices, normalize_links, ) -from .types import Ulimit, LogConfig +from .types import LogConfig, Ulimit +from .types import ( + SwarmAcceptancePolicy, SwarmExternalCA, SwarmSpec, +) from .decorators import check_resource, minimum_version, update_headers diff --git a/docker/utils/types.py b/docker/utils/types.py index ea9f06d5..b970114e 100644 --- a/docker/utils/types.py +++ b/docker/utils/types.py @@ -94,3 +94,52 @@ class Ulimit(DictType): @hard.setter def hard(self, value): self['Hard'] = value + + +class SwarmSpec(DictType): + def __init__(self, policies=None, task_history_retention_limit=None, + snapshot_interval=None, keep_old_snapshots=None, + log_entries_for_slow_followers=None, heartbeat_tick=None, + election_tick=None, dispatcher_heartbeat_period=None, + node_cert_expiry=None, external_ca=None): + if policies is not None: + self['AcceptancePolicy'] = {'Policies': policies} + if task_history_retention_limit is not None: + self['Orchestration'] = { + 'TaskHistoryRetentionLimit': task_history_retention_limit + } + if any(snapshot_interval, keep_old_snapshots, + log_entries_for_slow_followers, heartbeat_tick, election_tick): + self['Raft'] = { + 'SnapshotInterval': snapshot_interval, + 'KeepOldSnapshots': keep_old_snapshots, + 'LogEntriesForSlowFollowers': log_entries_for_slow_followers, + 'HeartbeatTick': heartbeat_tick, + 'ElectionTick': election_tick + } + + if dispatcher_heartbeat_period: + self['Dispatcher'] = { + 'HeartbeatPeriod': dispatcher_heartbeat_period + } + + if node_cert_expiry or external_ca: + self['CAConfig'] = { + 'NodeCertExpiry': node_cert_expiry, + 'ExternalCA': external_ca + } + + +class SwarmAcceptancePolicy(DictType): + def __init__(self, role, auto_accept=False, secret=None): + self['Role'] = role.upper() + self['Autoaccept'] = auto_accept + if secret is not None: + self['Secret'] = secret + + +class SwarmExternalCA(DictType): + def __init__(self, url, protocol=None, options=None): + self['URL'] = url + self['Protocol'] = protocol + self['Options'] = options diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py new file mode 100644 index 00000000..734d4701 --- /dev/null +++ b/tests/integration/swarm_test.py @@ -0,0 +1,55 @@ +import docker +import pytest + +from ..base import requires_api_version +from .. import helpers + + +BUSYBOX = helpers.BUSYBOX + + +class SwarmTest(helpers.BaseTestCase): + def setUp(self): + super(SwarmTest, self).setUp() + try: + self.client.leave_swarm(force=True) + except docker.errors.APIError: + pass + + def tearDown(self): + super(SwarmTest, self).tearDown() + try: + self.client.leave_swarm(force=True) + except docker.errors.APIError: + pass + + @requires_api_version('1.24') + def test_init_swarm_simple(self): + assert self.client.init_swarm('eth0') + + @requires_api_version('1.24') + def test_init_swarm_force_new_cluster(self): + pytest.skip('Test stalls the engine on 1.12') + + assert self.client.init_swarm('eth0') + version_1 = self.client.inspect_swarm()['Version']['Index'] + assert self.client.init_swarm('eth0', force_new_cluster=True) + version_2 = self.client.inspect_swarm()['Version']['Index'] + assert version_2 != version_1 + + @requires_api_version('1.24') + def test_init_already_in_cluster(self): + assert self.client.init_swarm('eth0') + with pytest.raises(docker.errors.APIError): + self.client.init_swarm('eth0') + + @requires_api_version('1.24') + def test_leave_swarm(self): + assert self.client.init_swarm('eth0') + with pytest.raises(docker.errors.APIError) as exc_info: + self.client.leave_swarm() + exc_info.value.response.status_code == 500 + assert self.client.leave_swarm(force=True) + with pytest.raises(docker.errors.APIError) as exc_info: + self.client.inspect_swarm() + exc_info.value.response.status_code == 406 From 1f055796a8992da041280401025792cc8fb22336 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 3 Aug 2016 18:00:29 -0700 Subject: [PATCH 43/83] Add new init_swarm test with custom spec Signed-off-by: Joffrey F --- docker/utils/types.py | 4 ++-- tests/integration/swarm_test.py | 14 +++++++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/docker/utils/types.py b/docker/utils/types.py index b970114e..725c8c81 100644 --- a/docker/utils/types.py +++ b/docker/utils/types.py @@ -108,8 +108,8 @@ class SwarmSpec(DictType): self['Orchestration'] = { 'TaskHistoryRetentionLimit': task_history_retention_limit } - if any(snapshot_interval, keep_old_snapshots, - log_entries_for_slow_followers, heartbeat_tick, election_tick): + if any([snapshot_interval, keep_old_snapshots, + log_entries_for_slow_followers, heartbeat_tick, election_tick]): self['Raft'] = { 'SnapshotInterval': snapshot_interval, 'KeepOldSnapshots': keep_old_snapshots, diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py index 734d4701..969e05ef 100644 --- a/tests/integration/swarm_test.py +++ b/tests/integration/swarm_test.py @@ -29,7 +29,7 @@ class SwarmTest(helpers.BaseTestCase): @requires_api_version('1.24') def test_init_swarm_force_new_cluster(self): - pytest.skip('Test stalls the engine on 1.12') + pytest.skip('Test stalls the engine on 1.12.0') assert self.client.init_swarm('eth0') version_1 = self.client.inspect_swarm()['Version']['Index'] @@ -43,6 +43,18 @@ class SwarmTest(helpers.BaseTestCase): with pytest.raises(docker.errors.APIError): self.client.init_swarm('eth0') + @requires_api_version('1.24') + def test_init_swarm_custom_raft_spec(self): + spec = self.client.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200 + ) + assert self.client.init_swarm( + advertise_addr='eth0', swarm_spec=spec + ) + swarm_info = self.client.inspect_swarm() + assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000 + assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200 + @requires_api_version('1.24') def test_leave_swarm(self): assert self.client.init_swarm('eth0') From df31f9a8ce9c43f4e4b23d81d711fe76a2b7f696 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 3 Aug 2016 18:00:52 -0700 Subject: [PATCH 44/83] Update Swarm documentation Signed-off-by: Joffrey F --- docs/swarm.md | 89 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 64 insertions(+), 25 deletions(-) diff --git a/docs/swarm.md b/docs/swarm.md index e3a1cd12..44a855b1 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -1,35 +1,74 @@ -# Using swarm for API version 1.24 or higher +# Swarm management -Swarm initialization is done in two parts. Provide a listen_addr and `force_new_cluster` (OPTIONAL) to -the `Client().swarm_init()` method, and declare mappings in the -`swarm_opts` section. +Starting with Engine version 1.12 (API 1.24), it is possible to manage the +engine's associated Swarm cluster using the API. + +## Initializing a new Swarm + +You can initialize a new Swarm by calling `Client.init_swarm`. An advertising +address needs to be provided, usually simply by indicating which network +interface needs to be used. Advanced options are provided using the +`swarm_spec` parameter, which can easily be created using +`Client.create_swarm_spec`. ```python -swarm_id = cli.swarm_init(listen_addr="0.0.0.0:4500", -swarm_opts={ - "AcceptancePolicy": { - "Policies": [ - { - "Role": "MANAGER", - "Autoaccept": True - } - ] - } -}) -``` - -Join another swarm, by providing the remote_address, listen_address(optional), -secret(optional), ca_cert_hash(optional, manager(optional) -```python -cli.swarm_join( - remote_address="swarm-master:2377", - manager=True +spec = client.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200 +) +client.init_swarm( + advertise_addr='eth0', listen_addr='0.0.0.0:5000', force_new_cluster=False, + swarm_spec=spec ) ``` +## Joining an existing Swarm -Leave swarm +If you're looking to have the engine your client is connected to joining an +existing Swarm, this ca be accomplished by using the `Client.join_swarm` +method. You will need to provide a list of at least one remote address +corresponding to other machines already part of the swarm. In most cases, +a `listen_address` for your node, as well as the `secret` token are required +to join too. ```python -cli.swarm_leave() +client.join_swarm( + remote_addresses=['192.168.14.221:2377'], secret='SWMTKN-1-redacted', + listen_address='0.0.0.0:5000', manager=True +) ``` + +## Leaving the Swarm + +To leave the swarm you are currently a member of, simply use +`Client.leave_swarm`. Note that if your engine is the Swarm's manager, +you will need to specify `force=True` to be able to leave. + +```python +client.leave_swarm(force=False) +``` + + +## Retrieving Swarm status + +You can retrieve information about your current Swarm status by calling +`Client.inspect_swarm`. This method takes no arguments. + +```python +client.inspect_swarm() +``` + +## Swarm API documentation + +### Client.init_swarm + +#### Client.create_swarm_spec + +#### docker.utils.SwarmAcceptancePolicy + +#### docker.utils.SwarmExternalCA + +### Client.inspect_swarm + +### Client.join_swarm + +### CLient.leave_swarm \ No newline at end of file From 25db440c967e7be96b431f5f744e9250ab438a36 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 4 Aug 2016 15:11:13 -0700 Subject: [PATCH 45/83] Update arguments for several Swarm API methods Add Client.update_swarm method Add test for Client.update_swarm Signed-off-by: Joffrey F --- docker/api/swarm.py | 27 +++++++++++++++++++-------- docker/utils/__init__.py | 2 +- docker/utils/types.py | 12 +----------- tests/integration/swarm_test.py | 31 +++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 20 deletions(-) diff --git a/docker/api/swarm.py b/docker/api/swarm.py index bc2179cc..28f9336a 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -9,7 +9,7 @@ class SwarmApiMixin(object): return utils.SwarmSpec(*args, **kwargs) @utils.minimum_version('1.24') - def init_swarm(self, advertise_addr, listen_addr='0.0.0.0:2377', + def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377', force_new_cluster=False, swarm_spec=None): url = self._url('/swarm/init') if swarm_spec is not None and not isinstance(swarm_spec, dict): @@ -30,14 +30,13 @@ class SwarmApiMixin(object): return self._result(self._get(url), True) @utils.minimum_version('1.24') - def join_swarm(self, remote_addresses, listen_address=None, - secret=None, ca_cert_hash=None, manager=False): + def join_swarm(self, remote_addrs, join_token, listen_addr=None, + advertise_addr=None): data = { - "RemoteAddrs": remote_addresses, - "ListenAddr": listen_address, - "Secret": secret, - "CACertHash": ca_cert_hash, - "Manager": manager + "RemoteAddrs": remote_addrs, + "ListenAddr": listen_addr, + "JoinToken": join_token, + "AdvertiseAddr": advertise_addr, } url = self._url('/swarm/join') response = self._post_json(url, data=data) @@ -50,3 +49,15 @@ class SwarmApiMixin(object): response = self._post(url, params={'force': force}) self._raise_for_status(response) return True + + @utils.minimum_version('1.24') + def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, + rotate_manager_token=False): + url = self._url('/swarm/update') + response = self._post_json(url, data=swarm_spec, params={ + 'rotateWorkerToken': rotate_worker_token, + 'rotateManagerToken': rotate_manager_token, + 'version': version + }) + self._raise_for_status(response) + return True diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index c02adea1..35acc779 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -10,6 +10,6 @@ from .utils import ( from .types import LogConfig, Ulimit from .types import ( - SwarmAcceptancePolicy, SwarmExternalCA, SwarmSpec, + SwarmExternalCA, SwarmSpec, ) from .decorators import check_resource, minimum_version, update_headers diff --git a/docker/utils/types.py b/docker/utils/types.py index 725c8c81..92faaa8a 100644 --- a/docker/utils/types.py +++ b/docker/utils/types.py @@ -97,13 +97,11 @@ class Ulimit(DictType): class SwarmSpec(DictType): - def __init__(self, policies=None, task_history_retention_limit=None, + def __init__(self, task_history_retention_limit=None, snapshot_interval=None, keep_old_snapshots=None, log_entries_for_slow_followers=None, heartbeat_tick=None, election_tick=None, dispatcher_heartbeat_period=None, node_cert_expiry=None, external_ca=None): - if policies is not None: - self['AcceptancePolicy'] = {'Policies': policies} if task_history_retention_limit is not None: self['Orchestration'] = { 'TaskHistoryRetentionLimit': task_history_retention_limit @@ -130,14 +128,6 @@ class SwarmSpec(DictType): } -class SwarmAcceptancePolicy(DictType): - def __init__(self, role, auto_accept=False, secret=None): - self['Role'] = role.upper() - self['Autoaccept'] = auto_accept - if secret is not None: - self['Secret'] = secret - - class SwarmExternalCA(DictType): def __init__(self, url, protocol=None, options=None): self['URL'] = url diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py index 969e05ef..226689ba 100644 --- a/tests/integration/swarm_test.py +++ b/tests/integration/swarm_test.py @@ -65,3 +65,34 @@ class SwarmTest(helpers.BaseTestCase): with pytest.raises(docker.errors.APIError) as exc_info: self.client.inspect_swarm() exc_info.value.response.status_code == 406 + + @requires_api_version('1.24') + def test_update_swarm(self): + assert self.client.init_swarm('eth0') + swarm_info_1 = self.client.inspect_swarm() + spec = self.client.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200, + node_cert_expiry=7776000000000000 + ) + assert self.client.update_swarm( + version=swarm_info_1['Version']['Index'], + swarm_spec=spec, rotate_worker_token=True + ) + swarm_info_2 = self.client.inspect_swarm() + + assert ( + swarm_info_1['Version']['Index'] != + swarm_info_2['Version']['Index'] + ) + assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000 + assert ( + swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200 + ) + assert ( + swarm_info_1['JoinTokens']['Manager'] == + swarm_info_2['JoinTokens']['Manager'] + ) + assert ( + swarm_info_1['JoinTokens']['Worker'] != + swarm_info_2['JoinTokens']['Worker'] + ) From fdfe582b764ab5a194f147b9d8efa04e7ae43f1c Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 4 Aug 2016 15:12:43 -0700 Subject: [PATCH 46/83] Update Swarm API docs Signed-off-by: Joffrey F --- docs/swarm.md | 140 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 131 insertions(+), 9 deletions(-) diff --git a/docs/swarm.md b/docs/swarm.md index 44a855b1..2c87702c 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -23,17 +23,17 @@ client.init_swarm( ## Joining an existing Swarm -If you're looking to have the engine your client is connected to joining an -existing Swarm, this ca be accomplished by using the `Client.join_swarm` +If you're looking to have the engine your client is connected to join an +existing Swarm, this can be accomplished by using the `Client.join_swarm` method. You will need to provide a list of at least one remote address -corresponding to other machines already part of the swarm. In most cases, -a `listen_address` for your node, as well as the `secret` token are required -to join too. +corresponding to other machines already part of the swarm as well as the +`join_token`. In most cases, a `listen_addr` and `advertise_addr` for your +node are also required. ```python client.join_swarm( - remote_addresses=['192.168.14.221:2377'], secret='SWMTKN-1-redacted', - listen_address='0.0.0.0:5000', manager=True + remote_addrs=['192.168.14.221:2377'], join_token='SWMTKN-1-redacted', + listen_addr='0.0.0.0:5000', advertise_addr='eth0:5000' ) ``` @@ -61,14 +61,136 @@ client.inspect_swarm() ### Client.init_swarm +Initialize a new Swarm using the current connected engine as the first node. + +**Params:** + +* advertise_addr (string): Externally reachable address advertised to other + nodes. This can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, like + `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `advertise_addr` is not specified, it will be + automatically detected when possible. Default: None +* listen_addr (string): Listen address used for inter-manager communication, + as well as determining the networking interface used for the VXLAN Tunnel + Endpoint (VTEP). This can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, like + `eth0:4567`. If the port number is omitted, the default swarm listening port + is used. Default: '0.0.0.0:2377' +* force_new_cluster (bool): Force creating a new Swarm, even if already part of + one. Default: False +* swarm_spec (dict): Configuration settings of the new Swarm. Use + `Client.create_swarm_spec` to generate a valid configuration. Default: None + +**Returns:** `True` if the request went through. Raises an `APIError` if it + fails. + #### Client.create_swarm_spec -#### docker.utils.SwarmAcceptancePolicy +Create a `docker.utils.SwarmSpec` instance that can be used as the `swarm_spec` +argument in `Client.init_swarm`. + +**Params:** + +* task_history_retention_limit (int): Maximum number of tasks history stored. +* snapshot_interval (int): Number of logs entries between snapshot. +* keep_old_snapshots (int): Number of snapshots to keep beyond the current + snapshot. +* log_entries_for_slow_followers (int): Number of log entries to keep around + to sync up slow followers after a snapshot is created. +* heartbeat_tick (int): Amount of ticks (in seconds) between each heartbeat. +* election_tick (int): Amount of ticks (in seconds) needed without a leader to + trigger a new election. +* dispatcher_heartbeat_period (int): The delay for an agent to send a + heartbeat to the dispatcher. +* node_cert_expiry (int): Automatic expiry for nodes certificates. +* external_ca (dict): Configuration for forwarding signing requests to an + external certificate authority. Use `docker.utils.SwarmExternalCA`. + +**Returns:** `docker.utils.SwarmSpec` instance. #### docker.utils.SwarmExternalCA +Create a configuration dictionary for the `external_ca` argument in a +`SwarmSpec`. + +**Params:** + +* protocol (string): Protocol for communication with the external CA (currently + only “cfssl” is supported). +* url (string): URL where certificate signing requests should be sent. +* options (dict): An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + ### Client.inspect_swarm +Retrieve information about the current Swarm. + +**Returns:** A dictionary containing information about the Swarm. See sample + below. + +```python +{u'CreatedAt': u'2016-08-04T21:26:18.779800579Z', + u'ID': u'8hk6e9wh4iq214qtbgvbp84a9', + u'JoinTokens': {u'Manager': u'SWMTKN-1-redacted-1', + u'Worker': u'SWMTKN-1-redacted-2'}, + u'Spec': {u'CAConfig': {u'NodeCertExpiry': 7776000000000000}, + u'Dispatcher': {u'HeartbeatPeriod': 5000000000}, + u'Name': u'default', + u'Orchestration': {u'TaskHistoryRetentionLimit': 10}, + u'Raft': {u'ElectionTick': 3, + u'HeartbeatTick': 1, + u'LogEntriesForSlowFollowers': 500, + u'SnapshotInterval': 10000}, + u'TaskDefaults': {}}, + u'UpdatedAt': u'2016-08-04T21:26:19.391623265Z', + u'Version': {u'Index': 11}} +``` + ### Client.join_swarm -### CLient.leave_swarm \ No newline at end of file +Join an existing Swarm. + +**Params:** + +* remote_addrs (list): Addresses of one or more manager nodes already + participating in the Swarm to join. +* join_token (string): Secret token for joining this Swarm. +* listen_addr (string): Listen address used for inter-manager communication + if the node gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). Default: `None` +* advertise_addr (string): Externally reachable address advertised to other + nodes. This can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, like + `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If AdvertiseAddr is not specified, it will be automatically + detected when possible. Default: `None` + +**Returns:** `True` if the request went through. Raises an `APIError` if it + fails. + +### Client.leave_swarm + +Leave a Swarm. + +**Params:** + +* force (bool): Leave the Swarm even if this node is a manager. + Default: `False` + +**Returns:** `True` if the request went through. Raises an `APIError` if it + fails. + +### Client.update_swarm + +Update the Swarm's configuration + +**Params:** + +* version (int): The version number of the swarm object being updated. This + is required to avoid conflicting writes. +* swarm_spec (dict): Configuration settings to update. Use + `Client.create_swarm_spec` to generate a valid configuration. + Default: `None`. +* rotate_worker_token (bool): Rotate the worker join token. Default: `False`. +* rotate_manager_token (bool): Rotate the manager join token. Default: `False`. From 0f70b6a38b80133e6df29c9cb007bc2c709db8ec Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 4 Aug 2016 16:48:21 -0700 Subject: [PATCH 47/83] Add support for custom name in SwarmSpec Signed-off-by: Joffrey F --- docker/utils/types.py | 5 ++++- docs/swarm.md | 4 ++++ tests/integration/swarm_test.py | 18 ++++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/docker/utils/types.py b/docker/utils/types.py index 92faaa8a..d778b90d 100644 --- a/docker/utils/types.py +++ b/docker/utils/types.py @@ -101,7 +101,7 @@ class SwarmSpec(DictType): snapshot_interval=None, keep_old_snapshots=None, log_entries_for_slow_followers=None, heartbeat_tick=None, election_tick=None, dispatcher_heartbeat_period=None, - node_cert_expiry=None, external_ca=None): + node_cert_expiry=None, external_ca=None, name=None): if task_history_retention_limit is not None: self['Orchestration'] = { 'TaskHistoryRetentionLimit': task_history_retention_limit @@ -127,6 +127,9 @@ class SwarmSpec(DictType): 'ExternalCA': external_ca } + if name is not None: + self['Name'] = name + class SwarmExternalCA(DictType): def __init__(self, url, protocol=None, options=None): diff --git a/docs/swarm.md b/docs/swarm.md index 2c87702c..a9a1d1f5 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -106,6 +106,7 @@ argument in `Client.init_swarm`. * node_cert_expiry (int): Automatic expiry for nodes certificates. * external_ca (dict): Configuration for forwarding signing requests to an external certificate authority. Use `docker.utils.SwarmExternalCA`. +* name (string): Swarm's name **Returns:** `docker.utils.SwarmSpec` instance. @@ -194,3 +195,6 @@ Update the Swarm's configuration Default: `None`. * rotate_worker_token (bool): Rotate the worker join token. Default: `False`. * rotate_manager_token (bool): Rotate the manager join token. Default: `False`. + +**Returns:** `True` if the request went through. Raises an `APIError` if it + fails. diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py index 226689ba..b73f81c4 100644 --- a/tests/integration/swarm_test.py +++ b/tests/integration/swarm_test.py @@ -96,3 +96,21 @@ class SwarmTest(helpers.BaseTestCase): swarm_info_1['JoinTokens']['Worker'] != swarm_info_2['JoinTokens']['Worker'] ) + + @requires_api_version('1.24') + def test_update_swarm_name(self): + assert self.client.init_swarm('eth0') + swarm_info_1 = self.client.inspect_swarm() + spec = self.client.create_swarm_spec( + node_cert_expiry=7776000000000000, name='reimuhakurei' + ) + assert self.client.update_swarm( + version=swarm_info_1['Version']['Index'], swarm_spec=spec + ) + swarm_info_2 = self.client.inspect_swarm() + + assert ( + swarm_info_1['Version']['Index'] != + swarm_info_2['Version']['Index'] + ) + assert swarm_info_2['Spec']['Name'] == 'reimuhakurei' From e1774c4c5b8ae8d28dfeef90236be75a8f54e88f Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 4 Aug 2016 17:03:02 -0700 Subject: [PATCH 48/83] Reference swarm methods in api.md file. Signed-off-by: Joffrey F --- docs/api.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/api.md b/docs/api.md index 9b3a7265..7748254a 100644 --- a/docs/api.md +++ b/docs/api.md @@ -606,6 +606,11 @@ Display system-wide information. Identical to the `docker info` command. 'SwapLimit': 1} ``` +## init_swarm + +Initialize a new Swarm using the current connected engine as the first node. +See the [Swarm documentation](swarm.md#clientinit_swarm). + ## insert *DEPRECATED* @@ -641,6 +646,11 @@ Retrieve network info by id. **Returns** (dict): Network information dictionary +## inspect_swarm + +Retrieve information about the current Swarm. +See the [Swarm documentation](swarm.md#clientinspect_swarm). + ## inspect_volume Retrieve volume info by name. @@ -656,6 +666,11 @@ Retrieve volume info by name. {u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'} ``` +## join_swarm + +Join an existing Swarm. +See the [Swarm documentation](swarm.md#clientjoin_swarm). + ## kill Kill a container or send a signal to a container. @@ -665,6 +680,11 @@ Kill a container or send a signal to a container. * container (str): The container to kill * signal (str or int): The signal to send. Defaults to `SIGKILL` +## leave_swarm + +Leave the current Swarm. +See the [Swarm documentation](swarm.md#clientleave_swarm). + ## load_image Load an image that was previously saved using `Client.get_image` @@ -1054,6 +1074,11 @@ Update resource configs of one or more containers. **Returns** (dict): Dictionary containing a `Warnings` key. +## update_swarm + +Update the current Swarm. +See the [Swarm documentation](swarm.md#clientupdate_swarm). + ## version Nearly identical to the `docker version` command. From 93b4b4134e2c046433649c5e86d9c65ffd84f106 Mon Sep 17 00:00:00 2001 From: George Lester Date: Wed, 13 Jul 2016 21:36:38 -0700 Subject: [PATCH 49/83] Implemented dns_opt support (from api 1.21) Signed-off-by: George Lester --- docker/utils/utils.py | 8 +++++++- docs/api.md | 1 + tests/unit/utils_test.py | 13 +++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 00a7af14..78457161 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -620,7 +620,7 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, device_write_bps=None, device_read_iops=None, device_write_iops=None, oom_kill_disable=False, shm_size=None, sysctls=None, version=None, tmpfs=None, - oom_score_adj=None): + oom_score_adj=None, dns_opt=None): host_config = {} @@ -719,6 +719,12 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, if dns is not None: host_config['Dns'] = dns + if dns_opt is not None: + if version_lt(version, '1.21'): + raise host_config_version_error('dns_opt', '1.21') + + host_config['DnsOptions'] = dns_opt + if security_opt is not None: if not isinstance(security_opt, list): raise host_config_type_error('security_opt', security_opt, 'list') diff --git a/docs/api.md b/docs/api.md index 9b3a7265..1810d5e1 100644 --- a/docs/api.md +++ b/docs/api.md @@ -239,6 +239,7 @@ where unit = b, k, m, or g) * environment (dict or list): A dictionary or a list of strings in the following format `["PASSWORD=xxx"]` or `{"PASSWORD": "xxx"}`. * dns (list): DNS name servers +* dns_opt (list): Additional options to be added to the container's `resolv.conf` file * volumes (str or list): * volumes_from (str or list): List of container names or Ids to get volumes from. Optionally a single string joining container id's with commas diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 47ced433..537c5cfa 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -141,6 +141,19 @@ class HostConfigTest(base.BaseTestCase): TypeError, lambda: create_host_config(version='1.22', oom_score_adj='100')) + def test_create_host_config_with_dns_opt(self): + + tested_opts = ['use-vc', 'no-tld-query'] + config = create_host_config(version='1.21', dns_opt=tested_opts) + dns_opts = config.get('DnsOptions') + + self.assertTrue('use-vc' in dns_opts) + self.assertTrue('no-tld-query' in dns_opts) + + self.assertRaises( + InvalidVersion, lambda: create_host_config(version='1.20', + dns_opt=tested_opts)) + def test_create_endpoint_config_with_aliases(self): config = create_endpoint_config(version='1.22', aliases=['foo', 'bar']) assert config == {'Aliases': ['foo', 'bar']} From a28a0d235593704f42db4462e5dc4ee7257c6ea3 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 9 Aug 2016 13:20:17 -0700 Subject: [PATCH 50/83] Exclude requests 2.11 from setup.py to work around unicode bug Signed-off-by: Joffrey F --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ac58b1f9..85a44994 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ ROOT_DIR = os.path.dirname(__file__) SOURCE_DIR = os.path.join(ROOT_DIR) requirements = [ - 'requests >= 2.5.2', + 'requests >= 2.5.2, < 2.11', 'six >= 1.4.0', 'websocket-client >= 0.32.0', ] From 08b284ab399e9bf19296c020e158968ba3fb800b Mon Sep 17 00:00:00 2001 From: Tomas Tomecek Date: Wed, 27 Jul 2016 10:26:16 +0200 Subject: [PATCH 51/83] docker client consistency: don't quote ':/' E.g. docker client `/v1.21/images/localhost:5000/busybox/push?tag=` docker-py `/v1.21/images/localhost%3A5000%2Fbusybox/push` Signed-off-by: Tomas Tomecek --- docker/client.py | 4 +++- tests/unit/api_test.py | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/docker/client.py b/docker/client.py index c3e5874e..771412e2 100644 --- a/docker/client.py +++ b/docker/client.py @@ -14,6 +14,7 @@ import json import struct +from functools import partial import requests import requests.exceptions @@ -156,7 +157,8 @@ class Client( 'instead'.format(arg, type(arg)) ) - args = map(six.moves.urllib.parse.quote_plus, args) + quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:") + args = map(quote_f, args) if kwargs.get('versioned_api', True): return '{0}/v{1}{2}'.format( diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 696c0739..712f57e0 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -159,9 +159,15 @@ class DockerApiTest(DockerClientTest): '{0}{1}'.format(url_prefix, 'hello/somename/world/someothername') ) - url = self.client._url('/hello/{0}/world', '/some?name') + url = self.client._url('/hello/{0}/world', 'some?name') self.assertEqual( - url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world') + url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world') + ) + + url = self.client._url("/images/{0}/push", "localhost:5000/image") + self.assertEqual( + url, + '{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push') ) def test_url_invalid_resource(self): From a75553b3ca1a8c1d94a49f328f96ef9a1b634c70 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 11 Aug 2016 17:16:41 -0700 Subject: [PATCH 52/83] Add `nodes` and `inspect_node` methods Signed-off-by: Joffrey F --- docker/api/swarm.py | 15 ++++++++++ docs/api.md | 9 ++++++ docs/swarm.md | 52 ++++++++++++++++++++++++++++++++- tests/integration/swarm_test.py | 29 ++++++++++++++++++ 4 files changed, 104 insertions(+), 1 deletion(-) diff --git a/docker/api/swarm.py b/docker/api/swarm.py index 28f9336a..d0993645 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -29,6 +29,12 @@ class SwarmApiMixin(object): url = self._url('/swarm') return self._result(self._get(url), True) + @utils.check_resource + @utils.minimum_version('1.24') + def inspect_node(self, node_id): + url = self._url('/nodes/{0}', node_id) + return self._result(self._get(url), True) + @utils.minimum_version('1.24') def join_swarm(self, remote_addrs, join_token, listen_addr=None, advertise_addr=None): @@ -50,6 +56,15 @@ class SwarmApiMixin(object): self._raise_for_status(response) return True + @utils.minimum_version('1.24') + def nodes(self, filters=None): + url = self._url('/nodes') + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + + return self._result(self._get(url, params=params), True) + @utils.minimum_version('1.24') def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, rotate_manager_token=False): diff --git a/docs/api.md b/docs/api.md index 7748254a..ddfaffeb 100644 --- a/docs/api.md +++ b/docs/api.md @@ -646,6 +646,11 @@ Retrieve network info by id. **Returns** (dict): Network information dictionary +## inspect_node + +Retrieve low-level information about a Swarm node. +See the [Swarm documentation](swarm.md#clientinspect_node). + ## inspect_swarm Retrieve information about the current Swarm. @@ -742,6 +747,10 @@ The above are combined to create a filters dict. **Returns** (dict): List of network objects. +## nodes + +List Swarm nodes. See the [Swarm documentation](swarm.md#clientnodes). + ## pause Pauses all processes within a container. diff --git a/docs/swarm.md b/docs/swarm.md index a9a1d1f5..0cd015a0 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -47,7 +47,6 @@ you will need to specify `force=True` to be able to leave. client.leave_swarm(force=False) ``` - ## Retrieving Swarm status You can retrieve information about your current Swarm status by calling @@ -57,6 +56,15 @@ You can retrieve information about your current Swarm status by calling client.inspect_swarm() ``` +## Listing Swarm nodes + +List all nodes that are part of the current Swarm using `Client.nodes`. +The `filters` argument allows to filter the results. + +```python +client.nodes(filters={'role': 'manager'}) +``` + ## Swarm API documentation ### Client.init_swarm @@ -123,6 +131,37 @@ Create a configuration dictionary for the `external_ca` argument in a * options (dict): An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. +### Client.inspect_node + +Retrieve low-level information about a Swarm node + +**Params:** + +* node_id (string): ID of the node to be inspected. + +**Returns:** A dictionary containing data about this node. See sample below. + +```python +{u'CreatedAt': u'2016-08-11T23:28:39.695834296Z', + u'Description': {u'Engine': {u'EngineVersion': u'1.12.0', + u'Plugins': [{u'Name': u'bridge', u'Type': u'Network'}, + {u'Name': u'host', u'Type': u'Network'}, + {u'Name': u'null', u'Type': u'Network'}, + {u'Name': u'overlay', u'Type': u'Network'}, + {u'Name': u'local', u'Type': u'Volume'}]}, + u'Hostname': u'dockerserv-1.local.net', + u'Platform': {u'Architecture': u'x86_64', u'OS': u'linux'}, + u'Resources': {u'MemoryBytes': 8052109312, u'NanoCPUs': 4000000000}}, + u'ID': u'1kqami616p23dz4hd7km35w63', + u'ManagerStatus': {u'Addr': u'10.0.131.127:2377', + u'Leader': True, + u'Reachability': u'reachable'}, + u'Spec': {u'Availability': u'active', u'Role': u'manager'}, + u'Status': {u'State': u'ready'}, + u'UpdatedAt': u'2016-08-11T23:28:39.979829529Z', + u'Version': {u'Index': 9}} + ``` + ### Client.inspect_swarm Retrieve information about the current Swarm. @@ -182,6 +221,17 @@ Leave a Swarm. **Returns:** `True` if the request went through. Raises an `APIError` if it fails. +### Client.nodes + +List Swarm nodes + +**Params:** + +* filters (dict): Filters to process on the nodes list. Valid filters: + `id`, `name`, `membership` and `role`. Default: `None` + +**Returns:** A list of dictionaries containing data about each swarm node. + ### Client.update_swarm Update the Swarm's configuration diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py index b73f81c4..128628e6 100644 --- a/tests/integration/swarm_test.py +++ b/tests/integration/swarm_test.py @@ -114,3 +114,32 @@ class SwarmTest(helpers.BaseTestCase): swarm_info_2['Version']['Index'] ) assert swarm_info_2['Spec']['Name'] == 'reimuhakurei' + + @requires_api_version('1.24') + def test_list_nodes(self): + assert self.client.init_swarm('eth0') + nodes_list = self.client.nodes() + assert len(nodes_list) == 1 + node = nodes_list[0] + assert 'ID' in node + assert 'Spec' in node + assert node['Spec']['Role'] == 'manager' + + filtered_list = self.client.nodes(filters={ + 'id': node['ID'] + }) + assert len(filtered_list) == 1 + filtered_list = self.client.nodes(filters={ + 'role': 'worker' + }) + assert len(filtered_list) == 0 + + @requires_api_version('1.24') + def test_inspect_node(self): + assert self.client.init_swarm('eth0') + nodes_list = self.client.nodes() + assert len(nodes_list) == 1 + node = nodes_list[0] + node_data = self.client.inspect_node(node['ID']) + assert node['ID'] == node_data['ID'] + assert node['Version'] == node_data['Version'] From 7d147c8ca18ad6c8dfd15f9f06f2892fc57372bb Mon Sep 17 00:00:00 2001 From: Josh Purvis Date: Mon, 15 Aug 2016 14:35:36 -0400 Subject: [PATCH 53/83] Move cpu_shares and cpuset_cpu to HostConfig when API >= 1.18 Signed-off-by: Josh Purvis --- docker/utils/utils.py | 26 +++++++++++++- docs/api.md | 1 - docs/hostconfig.md | 2 ++ tests/integration/container_test.py | 31 +++++++++++++++-- tests/unit/container_test.py | 54 +++++++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 4 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 00a7af14..65f5dd9a 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -620,7 +620,7 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, device_write_bps=None, device_read_iops=None, device_write_iops=None, oom_kill_disable=False, shm_size=None, sysctls=None, version=None, tmpfs=None, - oom_score_adj=None): + oom_score_adj=None, cpu_shares=None, cpuset_cpus=None): host_config = {} @@ -803,6 +803,21 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, host_config['CpuPeriod'] = cpu_period + if cpu_shares: + if version_lt(version, '1.18'): + raise host_config_version_error('cpu_shares', '1.18') + + if not isinstance(cpu_shares, int): + raise host_config_type_error('cpu_shares', cpu_shares, 'int') + + host_config['CpuShares'] = cpu_shares + + if cpuset_cpus: + if version_lt(version, '1.18'): + raise host_config_version_error('cpuset_cpus', '1.18') + + host_config['CpuSetCpus'] = cpuset_cpus + if blkio_weight: if not isinstance(blkio_weight, int): raise host_config_type_error('blkio_weight', blkio_weight, 'int') @@ -975,6 +990,14 @@ def create_container_config( 'labels were only introduced in API version 1.18' ) + if cpuset is not None or cpu_shares is not None: + if version_gte(version, '1.18'): + warnings.warn( + 'The cpuset_cpus and cpu_shares options have been moved to ' + 'host_config in API version 1.18, and will be removed', + DeprecationWarning + ) + if stop_signal is not None and compare_version('1.21', version) < 0: raise errors.InvalidVersion( 'stop_signal was only introduced in API version 1.21' @@ -1004,6 +1027,7 @@ def create_container_config( if mem_limit is not None: mem_limit = parse_bytes(mem_limit) + if memswap_limit is not None: memswap_limit = parse_bytes(memswap_limit) diff --git a/docs/api.md b/docs/api.md index 9b3a7265..960a673b 100644 --- a/docs/api.md +++ b/docs/api.md @@ -245,7 +245,6 @@ from. Optionally a single string joining container id's with commas * network_disabled (bool): Disable networking * name (str): A name for the container * entrypoint (str or list): An entrypoint -* cpu_shares (int): CPU shares (relative weight) * working_dir (str): Path to the working directory * domainname (str or list): Set custom DNS search domains * memswap_limit (int): diff --git a/docs/hostconfig.md b/docs/hostconfig.md index 01c4625f..229a28c0 100644 --- a/docs/hostconfig.md +++ b/docs/hostconfig.md @@ -109,6 +109,8 @@ for example: * cpu_group (int): The length of a CPU period in microseconds. * cpu_period (int): Microseconds of CPU time that the container can get in a CPU period. +* cpu_shares (int): CPU shares (relative weight) +* cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1) * blkio_weight: Block IO weight (relative weight), accepts a weight value between 10 and 1000. * blkio_weight_device: Block IO weight (relative device weight) in the form of: `[{"Path": "device_path", "Weight": weight}]` diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index f347c12a..2d5b6367 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -1101,11 +1101,38 @@ class ContainerUpdateTest(helpers.BaseTestCase): container = self.client.create_container( BUSYBOX, 'top', host_config=self.client.create_host_config( mem_limit=old_mem_limit - ), cpu_shares=102 + ) ) self.tmp_containers.append(container) self.client.start(container) self.client.update_container(container, mem_limit=new_mem_limit) inspect_data = self.client.inspect_container(container) self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit) - self.assertEqual(inspect_data['HostConfig']['CpuShares'], 102) + + +class ContainerCPUTest(helpers.BaseTestCase): + @requires_api_version('1.18') + def test_container_cpu_shares(self): + cpu_shares = 512 + container = self.client.create_container( + BUSYBOX, 'ls', host_config=self.client.create_host_config( + cpu_shares=cpu_shares + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + inspect_data = self.client.inspect_container(container) + self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512) + + @requires_api_version('1.18') + def test_container_cpuset(self): + cpuset_cpus = "0,1" + container = self.client.create_container( + BUSYBOX, 'ls', host_config=self.client.create_host_config( + cpuset_cpus=cpuset_cpus + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + inspect_data = self.client.inspect_container(container) + self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus) diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index 4c94c844..c480462f 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -286,6 +286,33 @@ class CreateContainerTest(DockerClientTest): self.assertEqual(args[1]['headers'], {'Content-Type': 'application/json'}) + @requires_api_version('1.18') + def test_create_container_with_host_config_cpu_shares(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpu_shares=512 + ) + ) + + args = fake_request.call_args + self.assertEqual(args[0][1], + url_prefix + 'containers/create') + + self.assertEqual(json.loads(args[1]['data']), + json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpuShares": 512, + "NetworkMode": "default" + }}''')) + self.assertEqual(args[1]['headers'], + {'Content-Type': 'application/json'}) + def test_create_container_with_cpuset(self): self.client.create_container('busybox', 'ls', cpuset='0,1') @@ -306,6 +333,33 @@ class CreateContainerTest(DockerClientTest): self.assertEqual(args[1]['headers'], {'Content-Type': 'application/json'}) + @requires_api_version('1.18') + def test_create_container_with_host_config_cpuset(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpuset_cpus='0,1' + ) + ) + + args = fake_request.call_args + self.assertEqual(args[0][1], + url_prefix + 'containers/create') + + self.assertEqual(json.loads(args[1]['data']), + json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpuSetCpus": "0,1", + "NetworkMode": "default" + }}''')) + self.assertEqual(args[1]['headers'], + {'Content-Type': 'application/json'}) + def test_create_container_with_cgroup_parent(self): self.client.create_container( 'busybox', 'ls', host_config=self.client.create_host_config( From 0416338bae879574f74537a6ca2eab7a1f87f8a9 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 16 Aug 2016 13:36:42 -0400 Subject: [PATCH 54/83] Remove references to "ExecutionDriver" Docker no longer has an `ExecutionDriver` as of Docker 1.11. The field in the `docker info` API will not be present in 1.13. Found this while working on docker/docker#25721 Signed-off-by: Brian Goff --- tests/helpers.py | 9 --------- tests/integration/container_test.py | 9 ++------- tests/integration/exec_test.py | 23 ----------------------- 3 files changed, 2 insertions(+), 39 deletions(-) diff --git a/tests/helpers.py b/tests/helpers.py index 94ea3887..40baef9c 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -45,15 +45,6 @@ def untar_file(tardata, filename): return result -def exec_driver_is_native(): - global EXEC_DRIVER - if not EXEC_DRIVER: - c = docker_client() - EXEC_DRIVER = c.info()['ExecutionDriver'] - c.close() - return EXEC_DRIVER.startswith('native') or EXEC_DRIVER == '' - - def docker_client(**kwargs): return docker.Client(**docker_client_kwargs(**kwargs)) diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index f347c12a..334c81db 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -159,9 +159,6 @@ class CreateContainerTest(helpers.BaseTestCase): self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names) def create_container_readonly_fs(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - ctnr = self.client.create_container( BUSYBOX, ['mkdir', '/shrine'], host_config=self.client.create_host_config( @@ -806,8 +803,7 @@ class KillTest(helpers.BaseTestCase): self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) - if helpers.exec_driver_is_native(): - self.assertNotEqual(state['ExitCode'], 0) + self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) @@ -821,8 +817,7 @@ class KillTest(helpers.BaseTestCase): self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) - if helpers.exec_driver_is_native(): - self.assertNotEqual(state['ExitCode'], 0) + self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py index 8bf2762a..f377e092 100644 --- a/tests/integration/exec_test.py +++ b/tests/integration/exec_test.py @@ -1,5 +1,3 @@ -import pytest - from docker.utils.socket import next_frame_size from docker.utils.socket import read_exactly @@ -10,9 +8,6 @@ BUSYBOX = helpers.BUSYBOX class ExecTest(helpers.BaseTestCase): def test_execute_command(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -26,9 +21,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'hello\n') def test_exec_command_string(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -42,9 +34,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'hello world\n') def test_exec_command_as_user(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -58,9 +47,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'default\n') def test_exec_command_as_root(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -74,9 +60,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'root\n') def test_exec_command_streaming(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -92,9 +75,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(res, b'hello\nworld\n') def test_exec_start_socket(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) container_id = container['Id'] @@ -116,9 +96,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(data.decode('utf-8'), line) def test_exec_inspect(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] From 95d9306d2a1fd22dffb12a0548abf2d2f744ed9d Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 9 Aug 2016 13:20:17 -0700 Subject: [PATCH 55/83] Exclude requests 2.11 from setup.py to work around unicode bug Signed-off-by: Joffrey F --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ac58b1f9..85a44994 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ ROOT_DIR = os.path.dirname(__file__) SOURCE_DIR = os.path.join(ROOT_DIR) requirements = [ - 'requests >= 2.5.2', + 'requests >= 2.5.2, < 2.11', 'six >= 1.4.0', 'websocket-client >= 0.32.0', ] From 3062ae4348ab916a9afd574cb70891b9131aff11 Mon Sep 17 00:00:00 2001 From: Tomas Tomecek Date: Wed, 27 Jul 2016 10:26:16 +0200 Subject: [PATCH 56/83] docker client consistency: don't quote ':/' E.g. docker client `/v1.21/images/localhost:5000/busybox/push?tag=` docker-py `/v1.21/images/localhost%3A5000%2Fbusybox/push` Signed-off-by: Tomas Tomecek --- docker/client.py | 4 +++- tests/unit/api_test.py | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/docker/client.py b/docker/client.py index 1b5420e9..9f75ce73 100644 --- a/docker/client.py +++ b/docker/client.py @@ -14,6 +14,7 @@ import json import struct +from functools import partial import requests import requests.exceptions @@ -157,7 +158,8 @@ class Client( 'instead'.format(arg, type(arg)) ) - args = map(six.moves.urllib.parse.quote_plus, args) + quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:") + args = map(quote_f, args) if kwargs.get('versioned_api', True): return '{0}/v{1}{2}'.format( diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 696c0739..712f57e0 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -159,9 +159,15 @@ class DockerApiTest(DockerClientTest): '{0}{1}'.format(url_prefix, 'hello/somename/world/someothername') ) - url = self.client._url('/hello/{0}/world', '/some?name') + url = self.client._url('/hello/{0}/world', 'some?name') self.assertEqual( - url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world') + url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world') + ) + + url = self.client._url("/images/{0}/push", "localhost:5000/image") + self.assertEqual( + url, + '{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push') ) def test_url_invalid_resource(self): From 0f47db7fcc4ae2b22500afdd6b029c557d86f5b1 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 16 Aug 2016 13:36:42 -0400 Subject: [PATCH 57/83] Remove references to "ExecutionDriver" Docker no longer has an `ExecutionDriver` as of Docker 1.11. The field in the `docker info` API will not be present in 1.13. Found this while working on docker/docker#25721 Signed-off-by: Brian Goff --- tests/helpers.py | 9 --------- tests/integration/container_test.py | 9 ++------- tests/integration/exec_test.py | 23 ----------------------- 3 files changed, 2 insertions(+), 39 deletions(-) diff --git a/tests/helpers.py b/tests/helpers.py index 94ea3887..40baef9c 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -45,15 +45,6 @@ def untar_file(tardata, filename): return result -def exec_driver_is_native(): - global EXEC_DRIVER - if not EXEC_DRIVER: - c = docker_client() - EXEC_DRIVER = c.info()['ExecutionDriver'] - c.close() - return EXEC_DRIVER.startswith('native') or EXEC_DRIVER == '' - - def docker_client(**kwargs): return docker.Client(**docker_client_kwargs(**kwargs)) diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index f347c12a..334c81db 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -159,9 +159,6 @@ class CreateContainerTest(helpers.BaseTestCase): self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names) def create_container_readonly_fs(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - ctnr = self.client.create_container( BUSYBOX, ['mkdir', '/shrine'], host_config=self.client.create_host_config( @@ -806,8 +803,7 @@ class KillTest(helpers.BaseTestCase): self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) - if helpers.exec_driver_is_native(): - self.assertNotEqual(state['ExitCode'], 0) + self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) @@ -821,8 +817,7 @@ class KillTest(helpers.BaseTestCase): self.assertIn('State', container_info) state = container_info['State'] self.assertIn('ExitCode', state) - if helpers.exec_driver_is_native(): - self.assertNotEqual(state['ExitCode'], 0) + self.assertNotEqual(state['ExitCode'], 0) self.assertIn('Running', state) self.assertEqual(state['Running'], False) diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py index 8bf2762a..f377e092 100644 --- a/tests/integration/exec_test.py +++ b/tests/integration/exec_test.py @@ -1,5 +1,3 @@ -import pytest - from docker.utils.socket import next_frame_size from docker.utils.socket import read_exactly @@ -10,9 +8,6 @@ BUSYBOX = helpers.BUSYBOX class ExecTest(helpers.BaseTestCase): def test_execute_command(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -26,9 +21,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'hello\n') def test_exec_command_string(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -42,9 +34,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'hello world\n') def test_exec_command_as_user(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -58,9 +47,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'default\n') def test_exec_command_as_root(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -74,9 +60,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(exec_log, b'root\n') def test_exec_command_streaming(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] @@ -92,9 +75,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(res, b'hello\nworld\n') def test_exec_start_socket(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) container_id = container['Id'] @@ -116,9 +96,6 @@ class ExecTest(helpers.BaseTestCase): self.assertEqual(data.decode('utf-8'), line) def test_exec_inspect(self): - if not helpers.exec_driver_is_native(): - pytest.skip('Exec driver not native') - container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] From 172e95d52fe4f9bc6146828b34fbed567aec3945 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 24 Jun 2016 15:04:13 -0700 Subject: [PATCH 58/83] Swarm service API implementation Signed-off-by: Joffrey F --- docker/api/__init__.py | 6 +- docker/api/service.py | 232 +++++++++++++++++++++++++++++++++++++ docker/client.py | 5 +- docker/utils/decorators.py | 2 +- 4 files changed, 241 insertions(+), 4 deletions(-) create mode 100644 docker/api/service.py diff --git a/docker/api/__init__.py b/docker/api/__init__.py index b0d60878..3c74677d 100644 --- a/docker/api/__init__.py +++ b/docker/api/__init__.py @@ -4,6 +4,10 @@ from .container import ContainerApiMixin from .daemon import DaemonApiMixin from .exec_api import ExecApiMixin from .image import ImageApiMixin -from .volume import VolumeApiMixin from .network import NetworkApiMixin +from .service import ( + ServiceApiMixin, TaskTemplate, ContainerSpec, Mount, Resources, + RestartPolicy, UpdateConfig +) from .swarm import SwarmApiMixin +from .volume import VolumeApiMixin diff --git a/docker/api/service.py b/docker/api/service.py new file mode 100644 index 00000000..4be96242 --- /dev/null +++ b/docker/api/service.py @@ -0,0 +1,232 @@ +import six + +from .. import errors +from .. import utils + + +class ServiceApiMixin(object): + @utils.minimum_version('1.24') + def services(self, filters=None): + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + url = self._url('/services') + return self._result(self._get(url, params=params), True) + + @utils.minimum_version('1.24') + def create_service( + self, task_config, name=None, labels=None, mode=None, + update_config=None, networks=None, endpoint_config=None + ): + url = self._url('/services/create') + data = { + 'Name': name, + 'Labels': labels, + 'TaskTemplate': task_config, + 'Mode': mode, + 'UpdateConfig': update_config, + 'Networks': networks, + 'Endpoint': endpoint_config + } + return self._result(self._post_json(url, data=data), True) + + @utils.minimum_version('1.24') + @utils.check_resource + def inspect_service(self, service): + url = self._url('/services/{0}', service) + return self._result(self._get(url), True) + + @utils.minimum_version('1.24') + @utils.check_resource + def remove_service(self, service): + url = self._url('/services/{0}', service) + resp = self._delete(url) + self._raise_for_status(resp) + + @utils.minimum_version('1.24') + @utils.check_resource + def update_service(self, service, task_template=None, name=None, + labels=None, mode=None, update_config=None, + networks=None, endpoint_config=None): + url = self._url('/services/{0}/update', service) + data = {} + if name is not None: + data['Name'] = name + if labels is not None: + data['Labels'] = labels + if mode is not None: + data['Mode'] = mode + if task_template is not None: + data['TaskTemplate'] = task_template + if update_config is not None: + data['UpdateConfig'] = update_config + if networks is not None: + data['Networks'] = networks + if endpoint_config is not None: + data['Endpoint'] = endpoint_config + + return self._result(self._post_json(url, data=data), True) + + +class TaskTemplate(dict): + def __init__(self, container_spec, resources=None, restart_policy=None, + placement=None, log_driver=None): + self['ContainerSpec'] = container_spec + if resources: + self['Resources'] = resources + if restart_policy: + self['RestartPolicy'] = restart_policy + if placement: + self['Placement'] = placement + if log_driver: + self['LogDriver'] = log_driver + + @property + def container_spec(self): + return self.get('ContainerSpec') + + @property + def resources(self): + return self.get('Resources') + + @property + def restart_policy(self): + return self.get('RestartPolicy') + + @property + def placement(self): + return self.get('Placement') + + +class ContainerSpec(dict): + def __init__(self, image, command=None, args=None, env=None, workdir=None, + user=None, labels=None, mounts=None, stop_grace_period=None): + self['Image'] = image + self['Command'] = command + self['Args'] = args + + if env is not None: + self['Env'] = env + if workdir is not None: + self['Dir'] = workdir + if user is not None: + self['User'] = user + if labels is not None: + self['Labels'] = labels + if mounts is not None: + for mount in mounts: + if isinstance(mount, six.string_types): + mounts.append(Mount.parse_mount_string(mount)) + mounts.remove(mount) + self['Mounts'] = mounts + if stop_grace_period is not None: + self['StopGracePeriod'] = stop_grace_period + + +class Mount(dict): + def __init__(self, target, source, type='volume', read_only=False, + propagation=None, no_copy=False, labels=None, + driver_config=None): + self['Target'] = target + self['Source'] = source + if type not in ('bind', 'volume'): + raise errors.DockerError( + 'Only acceptable mount types are `bind` and `volume`.' + ) + self['Type'] = type + + if type == 'bind': + if propagation is not None: + self['BindOptions'] = { + 'Propagation': propagation + } + if any(labels, driver_config, no_copy): + raise errors.DockerError( + 'Mount type is binding but volume options have been ' + 'provided.' + ) + else: + volume_opts = {} + if no_copy: + volume_opts['NoCopy'] = True + if labels: + volume_opts['Labels'] = labels + if driver_config: + volume_opts['driver_config'] = driver_config + if volume_opts: + self['VolumeOptions'] = volume_opts + if propagation: + raise errors.DockerError( + 'Mount type is volume but `propagation` argument has been ' + 'provided.' + ) + + @classmethod + def parse_mount_string(cls, string): + parts = string.split(':') + if len(parts) > 3: + raise errors.DockerError( + 'Invalid mount format "{0}"'.format(string) + ) + if len(parts) == 1: + return cls(target=parts[0]) + else: + target = parts[1] + source = parts[0] + read_only = not (len(parts) == 3 or parts[2] == 'ro') + return cls(target, source, read_only=read_only) + + +class Resources(dict): + def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, + mem_reservation=None): + limits = {} + reservation = {} + if cpu_limit is not None: + limits['NanoCPUs'] = cpu_limit + if mem_limit is not None: + limits['MemoryBytes'] = mem_limit + if cpu_reservation is not None: + reservation['NanoCPUs'] = cpu_reservation + if mem_reservation is not None: + reservation['MemoryBytes'] = mem_reservation + + self['Limits'] = limits + self['Reservation'] = reservation + + +class UpdateConfig(dict): + def __init__(self, parallelism=0, delay=None, failure_action='continue'): + self['Parallelism'] = parallelism + if delay is not None: + self['Delay'] = delay + if failure_action not in ('pause', 'continue'): + raise errors.DockerError( + 'failure_action must be either `pause` or `continue`.' + ) + self['FailureAction'] = failure_action + + +class RestartConditionTypesEnum(object): + _values = ( + 'none', + 'on_failure', + 'any', + ) + NONE, ON_FAILURE, ANY = _values + + +class RestartPolicy(dict): + condition_types = RestartConditionTypesEnum + + def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0, + attempts=0, window=0): + if condition not in self.condition_types._values: + raise TypeError( + 'Invalid RestartPolicy condition {0}'.format(condition) + ) + + self['Condition'] = condition + self['Delay'] = delay + self['Attempts'] = attempts + self['Window'] = window diff --git a/docker/client.py b/docker/client.py index 9f75ce73..6c1e1890 100644 --- a/docker/client.py +++ b/docker/client.py @@ -48,9 +48,10 @@ class Client( api.DaemonApiMixin, api.ExecApiMixin, api.ImageApiMixin, - api.VolumeApiMixin, api.NetworkApiMixin, - api.SwarmApiMixin): + api.ServiceApiMixin, + api.SwarmApiMixin, + api.VolumeApiMixin): def __init__(self, base_url=None, version=None, timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False, user_agent=constants.DEFAULT_USER_AGENT): diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py index 46c28a80..2fe880c4 100644 --- a/docker/utils/decorators.py +++ b/docker/utils/decorators.py @@ -13,7 +13,7 @@ def check_resource(f): elif kwargs.get('image'): resource_id = kwargs.pop('image') if isinstance(resource_id, dict): - resource_id = resource_id.get('Id') + resource_id = resource_id.get('Id', resource_id.get('ID')) if not resource_id: raise errors.NullResource( 'image or container param is undefined' From 02e99e4967bebb116a7d9d650647df912c608297 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 17 Aug 2016 18:42:52 -0700 Subject: [PATCH 59/83] Service API integration tests Signed-off-by: Joffrey F --- docker/api/service.py | 7 ++- tests/integration/service_test.py | 100 ++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 tests/integration/service_test.py diff --git a/docker/api/service.py b/docker/api/service.py index 4be96242..db19ae53 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -42,10 +42,11 @@ class ServiceApiMixin(object): url = self._url('/services/{0}', service) resp = self._delete(url) self._raise_for_status(resp) + return True @utils.minimum_version('1.24') @utils.check_resource - def update_service(self, service, task_template=None, name=None, + def update_service(self, service, version, task_template=None, name=None, labels=None, mode=None, update_config=None, networks=None, endpoint_config=None): url = self._url('/services/{0}/update', service) @@ -65,7 +66,9 @@ class ServiceApiMixin(object): if endpoint_config is not None: data['Endpoint'] = endpoint_config - return self._result(self._post_json(url, data=data), True) + resp = self._post_json(url, data=data, params={'version': version}) + self._raise_for_status(resp) + return True class TaskTemplate(dict): diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py new file mode 100644 index 00000000..00109868 --- /dev/null +++ b/tests/integration/service_test.py @@ -0,0 +1,100 @@ +import random + +import docker +# import pytest + +from ..base import requires_api_version +from .. import helpers + + +BUSYBOX = helpers.BUSYBOX + + +class ServiceTest(helpers.BaseTestCase): + def setUp(self): + super(ServiceTest, self).setUp() + try: + self.client.leave_swarm(force=True) + except docker.errors.APIError: + pass + self.client.init_swarm('eth0') + + def tearDown(self): + super(ServiceTest, self).tearDown() + for service in self.client.services(filters={'name': 'dockerpytest_'}): + try: + self.client.remove_service(service['ID']) + except docker.errors.APIError: + pass + try: + self.client.leave_swarm(force=True) + except docker.errors.APIError: + pass + + def get_service_name(self): + return 'dockerpytest_{0:x}'.format(random.getrandbits(64)) + + def create_simple_service(self, name=None): + if name: + name = 'dockerpytest_{0}'.format(name) + else: + name = self.get_service_name() + + container_spec = docker.api.ContainerSpec('busybox', ['echo', 'hello']) + task_tmpl = docker.api.TaskTemplate(container_spec) + return name, self.client.create_service(task_tmpl, name=name) + + @requires_api_version('1.24') + def test_list_services(self): + services = self.client.services() + assert isinstance(services, list) + + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + self.create_simple_service() + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 1 + assert 'dockerpytest_' in test_services[0]['Spec']['Name'] + + def test_inspect_service_by_id(self): + svc_name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_id) + assert 'ID' in svc_info + assert svc_info['ID'] == svc_id['ID'] + + def test_inspect_service_by_name(self): + svc_name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_name) + assert 'ID' in svc_info + assert svc_info['ID'] == svc_id['ID'] + + def test_remove_service_by_id(self): + svc_name, svc_id = self.create_simple_service() + assert self.client.remove_service(svc_id) + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + + def test_rempve_service_by_name(self): + svc_name, svc_id = self.create_simple_service() + assert self.client.remove_service(svc_name) + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + + def test_create_service_simple(self): + name, svc_id = self.create_simple_service() + assert self.client.inspect_service(svc_id) + services = self.client.services(filters={'name': name}) + assert len(services) == 1 + assert services[0]['ID'] == svc_id['ID'] + + def test_update_service_name(self): + name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_id) + svc_version = svc_info['Version']['Index'] + new_name = self.get_service_name() + assert self.client.update_service( + svc_id, svc_version, name=new_name, + task_template=svc_info['Spec']['TaskTemplate'] + ) + svc_info = self.client.inspect_service(svc_id) + assert svc_info['Spec']['Name'] == new_name From 97094e4ea303b59ce05869132cf639305a6d947a Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 19 Aug 2016 16:46:21 -0700 Subject: [PATCH 60/83] New docker.types subpackage containing advanced config dictionary types Tests and docs updated to match docker.utils.types has been moved to docker.types Signed-off-by: Joffrey F --- docker/api/__init__.py | 5 +- docker/api/service.py | 187 +----------------- docker/types/__init__.py | 7 + docker/types/base.py | 7 + .../{utils/types.py => types/containers.py} | 50 +---- docker/types/services.py | 176 +++++++++++++++++ docker/types/swarm.py | 40 ++++ docker/utils/__init__.py | 6 +- docker/utils/utils.py | 2 +- docs/swarm.md | 8 +- setup.py | 3 +- tests/integration/service_test.py | 96 ++++++++- 12 files changed, 345 insertions(+), 242 deletions(-) create mode 100644 docker/types/__init__.py create mode 100644 docker/types/base.py rename docker/{utils/types.py => types/containers.py} (55%) create mode 100644 docker/types/services.py create mode 100644 docker/types/swarm.py diff --git a/docker/api/__init__.py b/docker/api/__init__.py index 3c74677d..bc7e93ce 100644 --- a/docker/api/__init__.py +++ b/docker/api/__init__.py @@ -5,9 +5,6 @@ from .daemon import DaemonApiMixin from .exec_api import ExecApiMixin from .image import ImageApiMixin from .network import NetworkApiMixin -from .service import ( - ServiceApiMixin, TaskTemplate, ContainerSpec, Mount, Resources, - RestartPolicy, UpdateConfig -) +from .service import ServiceApiMixin from .swarm import SwarmApiMixin from .volume import VolumeApiMixin diff --git a/docker/api/service.py b/docker/api/service.py index db19ae53..c62e4946 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -1,28 +1,17 @@ -import six - -from .. import errors from .. import utils class ServiceApiMixin(object): - @utils.minimum_version('1.24') - def services(self, filters=None): - params = { - 'filters': utils.convert_filters(filters) if filters else None - } - url = self._url('/services') - return self._result(self._get(url, params=params), True) - @utils.minimum_version('1.24') def create_service( - self, task_config, name=None, labels=None, mode=None, + self, task_template, name=None, labels=None, mode=None, update_config=None, networks=None, endpoint_config=None ): url = self._url('/services/create') data = { 'Name': name, 'Labels': labels, - 'TaskTemplate': task_config, + 'TaskTemplate': task_template, 'Mode': mode, 'UpdateConfig': update_config, 'Networks': networks, @@ -44,6 +33,14 @@ class ServiceApiMixin(object): self._raise_for_status(resp) return True + @utils.minimum_version('1.24') + def services(self, filters=None): + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + url = self._url('/services') + return self._result(self._get(url, params=params), True) + @utils.minimum_version('1.24') @utils.check_resource def update_service(self, service, version, task_template=None, name=None, @@ -69,167 +66,3 @@ class ServiceApiMixin(object): resp = self._post_json(url, data=data, params={'version': version}) self._raise_for_status(resp) return True - - -class TaskTemplate(dict): - def __init__(self, container_spec, resources=None, restart_policy=None, - placement=None, log_driver=None): - self['ContainerSpec'] = container_spec - if resources: - self['Resources'] = resources - if restart_policy: - self['RestartPolicy'] = restart_policy - if placement: - self['Placement'] = placement - if log_driver: - self['LogDriver'] = log_driver - - @property - def container_spec(self): - return self.get('ContainerSpec') - - @property - def resources(self): - return self.get('Resources') - - @property - def restart_policy(self): - return self.get('RestartPolicy') - - @property - def placement(self): - return self.get('Placement') - - -class ContainerSpec(dict): - def __init__(self, image, command=None, args=None, env=None, workdir=None, - user=None, labels=None, mounts=None, stop_grace_period=None): - self['Image'] = image - self['Command'] = command - self['Args'] = args - - if env is not None: - self['Env'] = env - if workdir is not None: - self['Dir'] = workdir - if user is not None: - self['User'] = user - if labels is not None: - self['Labels'] = labels - if mounts is not None: - for mount in mounts: - if isinstance(mount, six.string_types): - mounts.append(Mount.parse_mount_string(mount)) - mounts.remove(mount) - self['Mounts'] = mounts - if stop_grace_period is not None: - self['StopGracePeriod'] = stop_grace_period - - -class Mount(dict): - def __init__(self, target, source, type='volume', read_only=False, - propagation=None, no_copy=False, labels=None, - driver_config=None): - self['Target'] = target - self['Source'] = source - if type not in ('bind', 'volume'): - raise errors.DockerError( - 'Only acceptable mount types are `bind` and `volume`.' - ) - self['Type'] = type - - if type == 'bind': - if propagation is not None: - self['BindOptions'] = { - 'Propagation': propagation - } - if any(labels, driver_config, no_copy): - raise errors.DockerError( - 'Mount type is binding but volume options have been ' - 'provided.' - ) - else: - volume_opts = {} - if no_copy: - volume_opts['NoCopy'] = True - if labels: - volume_opts['Labels'] = labels - if driver_config: - volume_opts['driver_config'] = driver_config - if volume_opts: - self['VolumeOptions'] = volume_opts - if propagation: - raise errors.DockerError( - 'Mount type is volume but `propagation` argument has been ' - 'provided.' - ) - - @classmethod - def parse_mount_string(cls, string): - parts = string.split(':') - if len(parts) > 3: - raise errors.DockerError( - 'Invalid mount format "{0}"'.format(string) - ) - if len(parts) == 1: - return cls(target=parts[0]) - else: - target = parts[1] - source = parts[0] - read_only = not (len(parts) == 3 or parts[2] == 'ro') - return cls(target, source, read_only=read_only) - - -class Resources(dict): - def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, - mem_reservation=None): - limits = {} - reservation = {} - if cpu_limit is not None: - limits['NanoCPUs'] = cpu_limit - if mem_limit is not None: - limits['MemoryBytes'] = mem_limit - if cpu_reservation is not None: - reservation['NanoCPUs'] = cpu_reservation - if mem_reservation is not None: - reservation['MemoryBytes'] = mem_reservation - - self['Limits'] = limits - self['Reservation'] = reservation - - -class UpdateConfig(dict): - def __init__(self, parallelism=0, delay=None, failure_action='continue'): - self['Parallelism'] = parallelism - if delay is not None: - self['Delay'] = delay - if failure_action not in ('pause', 'continue'): - raise errors.DockerError( - 'failure_action must be either `pause` or `continue`.' - ) - self['FailureAction'] = failure_action - - -class RestartConditionTypesEnum(object): - _values = ( - 'none', - 'on_failure', - 'any', - ) - NONE, ON_FAILURE, ANY = _values - - -class RestartPolicy(dict): - condition_types = RestartConditionTypesEnum - - def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0, - attempts=0, window=0): - if condition not in self.condition_types._values: - raise TypeError( - 'Invalid RestartPolicy condition {0}'.format(condition) - ) - - self['Condition'] = condition - self['Delay'] = delay - self['Attempts'] = attempts - self['Window'] = window diff --git a/docker/types/__init__.py b/docker/types/__init__.py new file mode 100644 index 00000000..46f10d86 --- /dev/null +++ b/docker/types/__init__.py @@ -0,0 +1,7 @@ +# flake8: noqa +from .containers import LogConfig, Ulimit +from .services import ( + ContainerSpec, LogDriver, Mount, Resources, RestartPolicy, TaskTemplate, + UpdateConfig +) +from .swarm import SwarmSpec, SwarmExternalCA diff --git a/docker/types/base.py b/docker/types/base.py new file mode 100644 index 00000000..68910623 --- /dev/null +++ b/docker/types/base.py @@ -0,0 +1,7 @@ +import six + + +class DictType(dict): + def __init__(self, init): + for k, v in six.iteritems(init): + self[k] = v diff --git a/docker/utils/types.py b/docker/types/containers.py similarity index 55% rename from docker/utils/types.py rename to docker/types/containers.py index d778b90d..40a44caf 100644 --- a/docker/utils/types.py +++ b/docker/types/containers.py @@ -1,5 +1,7 @@ import six +from .base import DictType + class LogConfigTypesEnum(object): _values = ( @@ -13,12 +15,6 @@ class LogConfigTypesEnum(object): JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values -class DictType(dict): - def __init__(self, init): - for k, v in six.iteritems(init): - self[k] = v - - class LogConfig(DictType): types = LogConfigTypesEnum @@ -94,45 +90,3 @@ class Ulimit(DictType): @hard.setter def hard(self, value): self['Hard'] = value - - -class SwarmSpec(DictType): - def __init__(self, task_history_retention_limit=None, - snapshot_interval=None, keep_old_snapshots=None, - log_entries_for_slow_followers=None, heartbeat_tick=None, - election_tick=None, dispatcher_heartbeat_period=None, - node_cert_expiry=None, external_ca=None, name=None): - if task_history_retention_limit is not None: - self['Orchestration'] = { - 'TaskHistoryRetentionLimit': task_history_retention_limit - } - if any([snapshot_interval, keep_old_snapshots, - log_entries_for_slow_followers, heartbeat_tick, election_tick]): - self['Raft'] = { - 'SnapshotInterval': snapshot_interval, - 'KeepOldSnapshots': keep_old_snapshots, - 'LogEntriesForSlowFollowers': log_entries_for_slow_followers, - 'HeartbeatTick': heartbeat_tick, - 'ElectionTick': election_tick - } - - if dispatcher_heartbeat_period: - self['Dispatcher'] = { - 'HeartbeatPeriod': dispatcher_heartbeat_period - } - - if node_cert_expiry or external_ca: - self['CAConfig'] = { - 'NodeCertExpiry': node_cert_expiry, - 'ExternalCA': external_ca - } - - if name is not None: - self['Name'] = name - - -class SwarmExternalCA(DictType): - def __init__(self, url, protocol=None, options=None): - self['URL'] = url - self['Protocol'] = protocol - self['Options'] = options diff --git a/docker/types/services.py b/docker/types/services.py new file mode 100644 index 00000000..6a17e93f --- /dev/null +++ b/docker/types/services.py @@ -0,0 +1,176 @@ +import six + +from .. import errors + + +class TaskTemplate(dict): + def __init__(self, container_spec, resources=None, restart_policy=None, + placement=None, log_driver=None): + self['ContainerSpec'] = container_spec + if resources: + self['Resources'] = resources + if restart_policy: + self['RestartPolicy'] = restart_policy + if placement: + self['Placement'] = placement + if log_driver: + self['LogDriver'] = log_driver + + @property + def container_spec(self): + return self.get('ContainerSpec') + + @property + def resources(self): + return self.get('Resources') + + @property + def restart_policy(self): + return self.get('RestartPolicy') + + @property + def placement(self): + return self.get('Placement') + + +class ContainerSpec(dict): + def __init__(self, image, command=None, args=None, env=None, workdir=None, + user=None, labels=None, mounts=None, stop_grace_period=None): + self['Image'] = image + self['Command'] = command + self['Args'] = args + + if env is not None: + self['Env'] = env + if workdir is not None: + self['Dir'] = workdir + if user is not None: + self['User'] = user + if labels is not None: + self['Labels'] = labels + if mounts is not None: + for mount in mounts: + if isinstance(mount, six.string_types): + mounts.append(Mount.parse_mount_string(mount)) + mounts.remove(mount) + self['Mounts'] = mounts + if stop_grace_period is not None: + self['StopGracePeriod'] = stop_grace_period + + +class Mount(dict): + def __init__(self, target, source, type='volume', read_only=False, + propagation=None, no_copy=False, labels=None, + driver_config=None): + self['Target'] = target + self['Source'] = source + if type not in ('bind', 'volume'): + raise errors.DockerError( + 'Only acceptable mount types are `bind` and `volume`.' + ) + self['Type'] = type + + if type == 'bind': + if propagation is not None: + self['BindOptions'] = { + 'Propagation': propagation + } + if any(labels, driver_config, no_copy): + raise errors.DockerError( + 'Mount type is binding but volume options have been ' + 'provided.' + ) + else: + volume_opts = {} + if no_copy: + volume_opts['NoCopy'] = True + if labels: + volume_opts['Labels'] = labels + if driver_config: + volume_opts['driver_config'] = driver_config + if volume_opts: + self['VolumeOptions'] = volume_opts + if propagation: + raise errors.DockerError( + 'Mount type is volume but `propagation` argument has been ' + 'provided.' + ) + + @classmethod + def parse_mount_string(cls, string): + parts = string.split(':') + if len(parts) > 3: + raise errors.DockerError( + 'Invalid mount format "{0}"'.format(string) + ) + if len(parts) == 1: + return cls(target=parts[0]) + else: + target = parts[1] + source = parts[0] + read_only = not (len(parts) == 3 or parts[2] == 'ro') + return cls(target, source, read_only=read_only) + + +class Resources(dict): + def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, + mem_reservation=None): + limits = {} + reservation = {} + if cpu_limit is not None: + limits['NanoCPUs'] = cpu_limit + if mem_limit is not None: + limits['MemoryBytes'] = mem_limit + if cpu_reservation is not None: + reservation['NanoCPUs'] = cpu_reservation + if mem_reservation is not None: + reservation['MemoryBytes'] = mem_reservation + + if limits: + self['Limits'] = limits + if reservation: + self['Reservations'] = reservation + + +class UpdateConfig(dict): + def __init__(self, parallelism=0, delay=None, failure_action='continue'): + self['Parallelism'] = parallelism + if delay is not None: + self['Delay'] = delay + if failure_action not in ('pause', 'continue'): + raise errors.DockerError( + 'failure_action must be either `pause` or `continue`.' + ) + self['FailureAction'] = failure_action + + +class RestartConditionTypesEnum(object): + _values = ( + 'none', + 'on_failure', + 'any', + ) + NONE, ON_FAILURE, ANY = _values + + +class RestartPolicy(dict): + condition_types = RestartConditionTypesEnum + + def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0, + max_attempts=0, window=0): + if condition not in self.condition_types._values: + raise TypeError( + 'Invalid RestartPolicy condition {0}'.format(condition) + ) + + self['Condition'] = condition + self['Delay'] = delay + self['MaxAttempts'] = max_attempts + self['Window'] = window + + +class LogDriver(dict): + def __init__(self, name, options=None): + self['Name'] = name + if options: + self['Options'] = options diff --git a/docker/types/swarm.py b/docker/types/swarm.py new file mode 100644 index 00000000..865fde62 --- /dev/null +++ b/docker/types/swarm.py @@ -0,0 +1,40 @@ +class SwarmSpec(dict): + def __init__(self, task_history_retention_limit=None, + snapshot_interval=None, keep_old_snapshots=None, + log_entries_for_slow_followers=None, heartbeat_tick=None, + election_tick=None, dispatcher_heartbeat_period=None, + node_cert_expiry=None, external_ca=None, name=None): + if task_history_retention_limit is not None: + self['Orchestration'] = { + 'TaskHistoryRetentionLimit': task_history_retention_limit + } + if any([snapshot_interval, keep_old_snapshots, + log_entries_for_slow_followers, heartbeat_tick, election_tick]): + self['Raft'] = { + 'SnapshotInterval': snapshot_interval, + 'KeepOldSnapshots': keep_old_snapshots, + 'LogEntriesForSlowFollowers': log_entries_for_slow_followers, + 'HeartbeatTick': heartbeat_tick, + 'ElectionTick': election_tick + } + + if dispatcher_heartbeat_period: + self['Dispatcher'] = { + 'HeartbeatPeriod': dispatcher_heartbeat_period + } + + if node_cert_expiry or external_ca: + self['CAConfig'] = { + 'NodeCertExpiry': node_cert_expiry, + 'ExternalCA': external_ca + } + + if name is not None: + self['Name'] = name + + +class SwarmExternalCA(dict): + def __init__(self, url, protocol=None, options=None): + self['URL'] = url + self['Protocol'] = protocol + self['Options'] = options diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index 35acc779..4bb3876e 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -8,8 +8,6 @@ from .utils import ( create_ipam_config, create_ipam_pool, parse_devices, normalize_links, ) -from .types import LogConfig, Ulimit -from .types import ( - SwarmExternalCA, SwarmSpec, -) +from ..types import LogConfig, Ulimit +from ..types import SwarmExternalCA, SwarmSpec from .decorators import check_resource, minimum_version, update_headers diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 00a7af14..bea436a3 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -31,7 +31,7 @@ import six from .. import constants from .. import errors from .. import tls -from .types import Ulimit, LogConfig +from ..types import Ulimit, LogConfig if six.PY2: from urllib import splitnport diff --git a/docs/swarm.md b/docs/swarm.md index 0cd015a0..3cc44f87 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -95,7 +95,7 @@ Initialize a new Swarm using the current connected engine as the first node. #### Client.create_swarm_spec -Create a `docker.utils.SwarmSpec` instance that can be used as the `swarm_spec` +Create a `docker.types.SwarmSpec` instance that can be used as the `swarm_spec` argument in `Client.init_swarm`. **Params:** @@ -113,12 +113,12 @@ argument in `Client.init_swarm`. heartbeat to the dispatcher. * node_cert_expiry (int): Automatic expiry for nodes certificates. * external_ca (dict): Configuration for forwarding signing requests to an - external certificate authority. Use `docker.utils.SwarmExternalCA`. + external certificate authority. Use `docker.types.SwarmExternalCA`. * name (string): Swarm's name -**Returns:** `docker.utils.SwarmSpec` instance. +**Returns:** `docker.types.SwarmSpec` instance. -#### docker.utils.SwarmExternalCA +#### docker.types.SwarmExternalCA Create a configuration dictionary for the `external_ca` argument in a `SwarmSpec`. diff --git a/setup.py b/setup.py index 85a44994..c809321e 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,8 @@ setup( url='https://github.com/docker/docker-py/', packages=[ 'docker', 'docker.api', 'docker.auth', 'docker.transport', - 'docker.utils', 'docker.utils.ports', 'docker.ssladapter' + 'docker.utils', 'docker.utils.ports', 'docker.ssladapter', + 'docker.types', ], install_requires=requirements, tests_require=test_requirements, diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py index 00109868..fda62b35 100644 --- a/tests/integration/service_test.py +++ b/tests/integration/service_test.py @@ -40,8 +40,10 @@ class ServiceTest(helpers.BaseTestCase): else: name = self.get_service_name() - container_spec = docker.api.ContainerSpec('busybox', ['echo', 'hello']) - task_tmpl = docker.api.TaskTemplate(container_spec) + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) return name, self.client.create_service(task_tmpl, name=name) @requires_api_version('1.24') @@ -74,7 +76,7 @@ class ServiceTest(helpers.BaseTestCase): test_services = self.client.services(filters={'name': 'dockerpytest_'}) assert len(test_services) == 0 - def test_rempve_service_by_name(self): + def test_remove_service_by_name(self): svc_name, svc_id = self.create_simple_service() assert self.client.remove_service(svc_name) test_services = self.client.services(filters={'name': 'dockerpytest_'}) @@ -87,6 +89,94 @@ class ServiceTest(helpers.BaseTestCase): assert len(services) == 1 assert services[0]['ID'] == svc_id['ID'] + def test_create_service_custom_log_driver(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + log_cfg = docker.types.LogDriver('none') + task_tmpl = docker.types.TaskTemplate( + container_spec, log_driver=log_cfg + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'LogDriver' in res_template + assert 'Name' in res_template['LogDriver'] + assert res_template['LogDriver']['Name'] == 'none' + + def test_create_service_with_volume_mount(self): + vol_name = self.get_service_name() + container_spec = docker.types.ContainerSpec( + 'busybox', ['ls'], + mounts=[ + docker.types.Mount(target='/test', source=vol_name) + ] + ) + self.tmp_volumes.append(vol_name) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert 'Mounts' in cspec + assert len(cspec['Mounts']) == 1 + mount = cspec['Mounts'][0] + assert mount['Target'] == '/test' + assert mount['Source'] == vol_name + assert mount['Type'] == 'volume' + + def test_create_service_with_resources_constraints(self): + container_spec = docker.types.ContainerSpec('busybox', ['true']) + resources = docker.types.Resources( + cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024, + cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024 + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, resources=resources + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'Resources' in res_template + assert res_template['Resources']['Limits'] == resources['Limits'] + assert res_template['Resources']['Reservations'] == resources[ + 'Reservations' + ] + + def test_create_service_with_update_config(self): + container_spec = docker.types.ContainerSpec('busybox', ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig( + parallelism=10, delay=5, failure_action='pause' + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + assert update_config == svc_info['Spec']['UpdateConfig'] + + def test_create_service_with_restart_policy(self): + container_spec = docker.types.ContainerSpec('busybox', ['true']) + policy = docker.types.RestartPolicy( + docker.types.RestartPolicy.condition_types.ANY, + delay=5, max_attempts=5 + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, restart_policy=policy + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate'] + assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy'] + def test_update_service_name(self): name, svc_id = self.create_simple_service() svc_info = self.client.inspect_service(svc_id) From 8e97cb785758653116d5383094ef923f86b11ea9 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 19 Aug 2016 16:51:39 -0700 Subject: [PATCH 61/83] Services API documentation (WIP) Signed-off-by: Joffrey F --- docs/api.md | 25 +++++++++++++ docs/services.md | 93 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 docs/services.md diff --git a/docs/api.md b/docs/api.md index ddfaffeb..12467eda 100644 --- a/docs/api.md +++ b/docs/api.md @@ -302,6 +302,11 @@ Create a network, similar to the `docker network create` command. **Returns** (dict): The created network reference object +## create_service + +Create a service, similar to the `docker service create` command. See the +[services documentation](services.md#Clientcreate_service) for details. + ## create_volume Create and register a named volume @@ -651,6 +656,11 @@ Retrieve network info by id. Retrieve low-level information about a Swarm node. See the [Swarm documentation](swarm.md#clientinspect_node). +## inspect_service + +Create a service, similar to the `docker service create` command. See the +[services documentation](services.md#clientinspect_service) for details. + ## inspect_swarm Retrieve information about the current Swarm. @@ -895,6 +905,11 @@ Remove a network. Similar to the `docker network rm` command. Failure to remove will raise a `docker.errors.APIError` exception. +## remove_service + +Remove a service, similar to the `docker service rm` command. See the +[services documentation](services.md#clientremove_service) for details. + ## remove_volume Remove a volume. Similar to the `docker volume rm` command. @@ -963,6 +978,11 @@ Identical to the `docker search` command. ... ``` +## services + +List services, similar to the `docker service ls` command. See the +[services documentation](services.md#clientservices) for details. + ## start Similar to the `docker start` command, but doesn't support attach options. Use @@ -1083,6 +1103,11 @@ Update resource configs of one or more containers. **Returns** (dict): Dictionary containing a `Warnings` key. +## update_service + +Update a service, similar to the `docker service update` command. See the +[services documentation](services.md#clientupdate_service) for details. + ## update_swarm Update the current Swarm. diff --git a/docs/services.md b/docs/services.md new file mode 100644 index 00000000..f9cd428e --- /dev/null +++ b/docs/services.md @@ -0,0 +1,93 @@ +# Swarm services + +Starting with Engine version 1.12 (API 1.24), it is possible to manage services +using the Docker Engine API. Note that the engine needs to be part of a +[Swarm cluster](swarm.md) before you can use the service-related methods. + +## Creating a service + +The `Client.create_service` method lets you create a new service inside the +cluster. The method takes several arguments, `task_template` being mandatory. +This dictionary of values is most easily produced by instantiating a +`TaskTemplate` object. + +```python +container_spec = docker.types.ContainerSpec( + image='busybox', command=['echo', 'hello'] +) +task_tmpl = docker.types.TaskTemplate(container_spec) +service_id = client.create_service(task_tmpl, name=name) +``` + +## Listing services + +List all existing services using the `Client.services` method. + +```python +client.services(filters={'name': 'mysql'}) +``` + +## Retrieving service configuration + +To retrieve detailed information and configuration for a specific service, you +may use the `Client.inspect_service` method using the service's ID or name. + +```python +client.inspect_service(service='my_service_name') +``` + +## Updating service configuration + +The `Client.update_service` method lets you update a service's configuration. +The mandatory `version` argument (used to prevent concurrent writes) can be +retrieved using `Client.inspect_service`. + +```python +container_spec = docker.types.ContainerSpec( + image='busybox', command=['echo', 'hello world'] +) +task_tmpl = docker.types.TaskTemplate(container_spec) + +svc_version = client.inspect_service(svc_id)['Version']['Index'] + +client.update_service( + svc_id, svc_version, name='new_name', task_template=task_tmpl +) +``` + +## Removing a service + +A service may be removed simply using the `Client.remove_service` method. +Either the service name or service ID can be used as argument. + +```python +client.remove_service('my_service_name') +``` + +## Service API documentation + +### Client.create_service + +### Client.inspect_service + +### Client.remove_service + +### Client.services + +### Client.update_service + +### Configuration objects (`docker.types`) + +#### ContainerSpec + +#### LogDriver + +#### Mount + +#### Resources + +#### RestartPolicy + +#### TaskTemplate + +#### UpdateConfig From 1e2c58de9efdffc23dea78c9853fa08adb9109f3 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 19 Aug 2016 17:02:33 -0700 Subject: [PATCH 62/83] Add new pages to mkdocs index Signed-off-by: Joffrey F --- mkdocs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mkdocs.yml b/mkdocs.yml index 67b40893..6cfaa543 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,8 @@ pages: - Host devices: host-devices.md - Host configuration: hostconfig.md - Network configuration: networks.md +- Swarm management: swarm.md +- Swarm services: services.md - Using tmpfs: tmpfs.md - Using with Docker Machine: machine.md - Change Log: change_log.md From f53cdc3a0704e501170b326048d8b90d90e6a4ed Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 22 Aug 2016 16:11:48 -0700 Subject: [PATCH 63/83] Rename LogDriver to DriverConfig for genericity The class can be used for both log driver and volume driver specs. Use a name that reflects this. Signed-off-by: Joffrey F --- docker/types/__init__.py | 2 +- docker/types/services.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/types/__init__.py b/docker/types/__init__.py index 46f10d86..3609581d 100644 --- a/docker/types/__init__.py +++ b/docker/types/__init__.py @@ -1,7 +1,7 @@ # flake8: noqa from .containers import LogConfig, Ulimit from .services import ( - ContainerSpec, LogDriver, Mount, Resources, RestartPolicy, TaskTemplate, + ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate, UpdateConfig ) from .swarm import SwarmSpec, SwarmExternalCA diff --git a/docker/types/services.py b/docker/types/services.py index 6a17e93f..dcc84f9c 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -169,7 +169,7 @@ class RestartPolicy(dict): self['Window'] = window -class LogDriver(dict): +class DriverConfig(dict): def __init__(self, name, options=None): self['Name'] = name if options: From 7d5a1eeb7a46f17136aaf1041660d043a85051fc Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 22 Aug 2016 16:13:06 -0700 Subject: [PATCH 64/83] Add services documentation Signed-off-by: Joffrey F --- docs/services.md | 163 +++++++++++++++++++++++++++++- tests/integration/service_test.py | 2 +- 2 files changed, 163 insertions(+), 2 deletions(-) diff --git a/docs/services.md b/docs/services.md index f9cd428e..a6bb7d63 100644 --- a/docs/services.md +++ b/docs/services.md @@ -68,26 +68,187 @@ client.remove_service('my_service_name') ### Client.create_service +Create a service. + +**Params:** + +* task_template (dict): Specification of the task to start as part of the new + service. See the [TaskTemplate class](#TaskTemplate) for details. +* name (string): User-defined name for the service. Optional. +* labels (dict): A map of labels to associate with the service. Optional. +* mode (string): Scheduling mode for the service (`replicated` or `global`). + Defaults to `replicated`. +* update_config (dict): Specification for the update strategy of the service. + See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`. +* networks (list): List of network names or IDs to attach the service to. + Default: `None`. +* endpoint_config (dict): Properties that can be configured to access and load + balance a service. Default: `None`. + +**Returns:** A dictionary containing an `ID` key for the newly created service. + ### Client.inspect_service +Return information on a service. + +**Params:** + +* service (string): A service identifier (either its name or service ID) + +**Returns:** `True` if successful. Raises an `APIError` otherwise. + ### Client.remove_service +Stop and remove a service. + +**Params:** + +* service (string): A service identifier (either its name or service ID) + +**Returns:** `True` if successful. Raises an `APIError` otherwise. + ### Client.services +List services. + +**Params:** + +* filters (dict): Filters to process on the nodes list. Valid filters: + `id` and `name`. Default: `None`. + +**Returns:** A list of dictionaries containing data about each service. + ### Client.update_service +Update a service. + +**Params:** + +* service (string): A service identifier (either its name or service ID). +* version (int): The version number of the service object being updated. This + is required to avoid conflicting writes. +* task_template (dict): Specification of the updated task to start as part of + the service. See the [TaskTemplate class](#TaskTemplate) for details. +* name (string): New name for the service. Optional. +* labels (dict): A map of labels to associate with the service. Optional. +* mode (string): Scheduling mode for the service (`replicated` or `global`). + Defaults to `replicated`. +* update_config (dict): Specification for the update strategy of the service. + See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`. +* networks (list): List of network names or IDs to attach the service to. + Default: `None`. +* endpoint_config (dict): Properties that can be configured to access and load + balance a service. Default: `None`. + +**Returns:** `True` if successful. Raises an `APIError` otherwise. + ### Configuration objects (`docker.types`) #### ContainerSpec -#### LogDriver +A `ContainerSpec` object describes the behavior of containers that are part +of a task, and is used when declaring a `TaskTemplate`. + +**Params:** + +* image (string): The image name to use for the container. +* command (string or list): The command to be run in the image. +* args (list): Arguments to the command. +* env (dict): Environment variables. +* dir (string): The working directory for commands to run in. +* user (string): The user inside the container. +* labels (dict): A map of labels to associate with the service. +* mounts (list): A list of specifications for mounts to be added to containers + created as part of the service. See the [Mount class](#Mount) for details. +* stop_grace_period (int): Amount of time to wait for the container to + terminate before forcefully killing it. + +#### DriverConfig + +A `LogDriver` object indicates which driver to use, as well as its +configuration. It can be used for the `log_driver` in a `ContainerSpec`, +and for the `driver_config` in a volume `Mount`. + +**Params:** + +* name (string): Name of the logging driver to use. +* options (dict): Driver-specific options. Default: `None`. #### Mount +A `Mount` object describes a mounted folder's configuration inside a +container. A list of `Mount`s would be used as part of a `ContainerSpec`. + +* target (string): Container path. +* source (string): Mount source (e.g. a volume name or a host path). +* type (string): The mount type (`bind` or `volume`). Default: `volume`. +* read_only (bool): Whether the mount should be read-only. +* propagation (string): A propagation mode with the value `[r]private`, + `[r]shared`, or `[r]slave`. Only valid for the `bind` type. +* no_copy (bool): False if the volume should be populated with the data from + the target. Default: `False`. Only valid for the `volume` type. +* labels (dict): User-defined name and labels for the volume. Only valid for + the `volume` type. +* driver_config (dict): Volume driver configuration. + See the [DriverConfig class](#DriverConfig) for details. Only valid for the + `volume` type. + #### Resources +A `Resources` object configures resource allocation for containers when +made part of a `ContainerSpec`. + +**Params:** + +* cpu_limit (int): CPU limit in units of 10^9 CPU shares. +* mem_limit (int): Memory limit in Bytes. +* cpu_reservation (int): CPU reservation in units of 10^9 CPU shares. +* mem_reservation (int): Memory reservation in Bytes. + #### RestartPolicy +A `RestartPolicy` object is used when creating a `ContainerSpec`. It dictates +whether a container should restart after stopping or failing. + +* condition (string): Condition for restart (`none`, `on-failure`, or `any`). + Default: `none`. +* delay (int): Delay between restart attempts. Default: 0 +* attempts (int): Maximum attempts to restart a given container before giving + up. Default value is 0, which is ignored. +* window (int): Time window used to evaluate the restart policy. Default value + is 0, which is unbounded. + + #### TaskTemplate +A `TaskTemplate` object can be used to describe the task specification to be +used when creating or updating a service. + +**Params:** + +* container_spec (dict): Container settings for containers started as part of + this task. See the [ContainerSpec class](#ContainerSpec) for details. +* log_driver (dict): Log configuration for containers created as part of the + service. See the [DriverConfig class](#DriverConfig) for details. +* resources (dict): Resource requirements which apply to each individual + container created as part of the service. See the + [Resources class](#Resources) for details. +* restart_policy (dict): Specification for the restart policy which applies + to containers created as part of this service. See the + [RestartPolicy class](#RestartPolicy) for details. +* placement (list): A list of constraints. + + #### UpdateConfig + +An `UpdateConfig` object can be used to specify the way container updates +should be performed by a service. + +**Params:** + +* parallelism (int): Maximum number of tasks to be updated in one iteration + (0 means unlimited parallelism). Default: 0. +* delay (int): Amount of time between updates. +* failure_action (string): Action to take if an updated task fails to run, or + stops running during the update. Acceptable values are `continue` and + `pause`. Default: `continue` diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py index fda62b35..3113df18 100644 --- a/tests/integration/service_test.py +++ b/tests/integration/service_test.py @@ -93,7 +93,7 @@ class ServiceTest(helpers.BaseTestCase): container_spec = docker.types.ContainerSpec( 'busybox', ['echo', 'hello'] ) - log_cfg = docker.types.LogDriver('none') + log_cfg = docker.types.DriverConfig('none') task_tmpl = docker.types.TaskTemplate( container_spec, log_driver=log_cfg ) From 775b581c04dfa5f7d421ad74f969f25869fa8151 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 22 Aug 2016 19:12:27 -0700 Subject: [PATCH 65/83] Private images support in create_service / update_service Refactor auth header computation Add tasks methods and documentation. Signed-off-by: Joffrey F --- docker/api/image.py | 41 +++++-------------------------- docker/api/service.py | 41 +++++++++++++++++++++++++++++-- docker/auth/auth.py | 20 +++++++++++++++ docker/types/services.py | 5 ++++ docs/api.md | 21 ++++++++++++++++ tests/integration/service_test.py | 1 - 6 files changed, 91 insertions(+), 38 deletions(-) diff --git a/docker/api/image.py b/docker/api/image.py index 2bdbce83..4d6561e5 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -166,28 +166,10 @@ class ImageApiMixin(object): headers = {} if utils.compare_version('1.5', self._version) >= 0: - # If we don't have any auth data so far, try reloading the config - # file one more time in case anything showed up in there. if auth_config is None: - log.debug('Looking for auth config') - if not self._auth_configs: - log.debug( - "No auth config in memory - loading from filesystem" - ) - self._auth_configs = auth.load_config() - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - # Do not fail here if no authentication exists for this - # specific registry as we can have a readonly pull. Just - # put the header if we can. - if authcfg: - log.debug('Found auth config') - # auth_config needs to be a dict in the format used by - # auth.py username , password, serveraddress, email - headers['X-Registry-Auth'] = auth.encode_header( - authcfg - ) - else: - log.debug('No auth config found') + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header else: log.debug('Sending supplied auth config') headers['X-Registry-Auth'] = auth.encode_header(auth_config) @@ -222,21 +204,10 @@ class ImageApiMixin(object): headers = {} if utils.compare_version('1.5', self._version) >= 0: - # If we don't have any auth data so far, try reloading the config - # file one more time in case anything showed up in there. if auth_config is None: - log.debug('Looking for auth config') - if not self._auth_configs: - log.debug( - "No auth config in memory - loading from filesystem" - ) - self._auth_configs = auth.load_config() - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - # Do not fail here if no authentication exists for this - # specific registry as we can have a readonly pull. Just - # put the header if we can. - if authcfg: - headers['X-Registry-Auth'] = auth.encode_header(authcfg) + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header else: log.debug('Sending supplied auth config') headers['X-Registry-Auth'] = auth.encode_header(auth_config) diff --git a/docker/api/service.py b/docker/api/service.py index c62e4946..baebbadf 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -1,4 +1,6 @@ +from .. import errors from .. import utils +from ..auth import auth class ServiceApiMixin(object): @@ -8,6 +10,16 @@ class ServiceApiMixin(object): update_config=None, networks=None, endpoint_config=None ): url = self._url('/services/create') + headers = {} + image = task_template.get('ContainerSpec', {}).get('Image', None) + if image is None: + raise errors.DockerException( + 'Missing mandatory Image key in ContainerSpec' + ) + registry, repo_name = auth.resolve_repository_name(image) + auth_header = auth.get_config_header(self, registry) + if auth_header: + headers['X-Registry-Auth'] = auth_header data = { 'Name': name, 'Labels': labels, @@ -17,7 +29,9 @@ class ServiceApiMixin(object): 'Networks': networks, 'Endpoint': endpoint_config } - return self._result(self._post_json(url, data=data), True) + return self._result( + self._post_json(url, data=data, headers=headers), True + ) @utils.minimum_version('1.24') @utils.check_resource @@ -25,6 +39,12 @@ class ServiceApiMixin(object): url = self._url('/services/{0}', service) return self._result(self._get(url), True) + @utils.minimum_version('1.24') + @utils.check_resource + def inspect_task(self, task): + url = self._url('/tasks/{0}', task) + return self._result(self._get(url), True) + @utils.minimum_version('1.24') @utils.check_resource def remove_service(self, service): @@ -41,6 +61,14 @@ class ServiceApiMixin(object): url = self._url('/services') return self._result(self._get(url, params=params), True) + @utils.minimum_version('1.24') + def tasks(self, filters=None): + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + url = self._url('/tasks') + return self._result(self._get(url, params=params), True) + @utils.minimum_version('1.24') @utils.check_resource def update_service(self, service, version, task_template=None, name=None, @@ -48,6 +76,7 @@ class ServiceApiMixin(object): networks=None, endpoint_config=None): url = self._url('/services/{0}/update', service) data = {} + headers = {} if name is not None: data['Name'] = name if labels is not None: @@ -55,6 +84,12 @@ class ServiceApiMixin(object): if mode is not None: data['Mode'] = mode if task_template is not None: + image = task_template.get('ContainerSpec', {}).get('Image', None) + if image is not None: + registry, repo_name = auth.resolve_repository_name(image) + auth_header = auth.get_config_header(self, registry) + if auth_header: + headers['X-Registry-Auth'] = auth_header data['TaskTemplate'] = task_template if update_config is not None: data['UpdateConfig'] = update_config @@ -63,6 +98,8 @@ class ServiceApiMixin(object): if endpoint_config is not None: data['Endpoint'] = endpoint_config - resp = self._post_json(url, data=data, params={'version': version}) + resp = self._post_json( + url, data=data, params={'version': version}, headers=headers + ) self._raise_for_status(resp) return True diff --git a/docker/auth/auth.py b/docker/auth/auth.py index b61a8d09..7195f56a 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -51,6 +51,26 @@ def resolve_index_name(index_name): return index_name +def get_config_header(client, registry): + log.debug('Looking for auth config') + if not client._auth_configs: + log.debug( + "No auth config in memory - loading from filesystem" + ) + client._auth_configs = load_config() + authcfg = resolve_authconfig(client._auth_configs, registry) + # Do not fail here if no authentication exists for this + # specific registry as we can have a readonly pull. Just + # put the header if we can. + if authcfg: + log.debug('Found auth config') + # auth_config needs to be a dict in the format used by + # auth.py username , password, serveraddress, email + return encode_header(authcfg) + log.debug('No auth config found') + return None + + def split_repo_name(repo_name): parts = repo_name.split('/', 1) if len(parts) == 1 or ( diff --git a/docker/types/services.py b/docker/types/services.py index dcc84f9c..2c1a830c 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -36,7 +36,12 @@ class TaskTemplate(dict): class ContainerSpec(dict): def __init__(self, image, command=None, args=None, env=None, workdir=None, user=None, labels=None, mounts=None, stop_grace_period=None): + from ..utils import split_command # FIXME: circular import + self['Image'] = image + + if isinstance(command, six.string_types): + command = split_command(command) self['Command'] = command self['Args'] = args diff --git a/docs/api.md b/docs/api.md index 12467eda..5857892f 100644 --- a/docs/api.md +++ b/docs/api.md @@ -666,6 +666,16 @@ Create a service, similar to the `docker service create` command. See the Retrieve information about the current Swarm. See the [Swarm documentation](swarm.md#clientinspect_swarm). +## inspect_task + +Retrieve information about a task. + +**Params**: + +* task (str): Task identifier + +**Returns** (dict): Task information dictionary + ## inspect_volume Retrieve volume info by name. @@ -1055,6 +1065,17 @@ Tag an image into a repository. Identical to the `docker tag` command. **Returns** (bool): True if successful +## tasks + +Retrieve a list of tasks. + +**Params**: + +* filters (dict): A map of filters to process on the tasks list. Valid filters: + `id`, `name`, `service`, `node`, `label` and `desired-state`. + +**Returns** (list): List of task dictionaries. + ## top Display the running processes of a container. diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py index 3113df18..2b99316b 100644 --- a/tests/integration/service_test.py +++ b/tests/integration/service_test.py @@ -1,7 +1,6 @@ import random import docker -# import pytest from ..base import requires_api_version from .. import helpers From fc72ac66e99551c10375398ee7afa552fbe867f2 Mon Sep 17 00:00:00 2001 From: Kay Yan Date: Wed, 13 Jul 2016 14:55:33 +0800 Subject: [PATCH 66/83] support MemoryReservation and KernelMemory Signed-off-by: Kay Yan --- docker/utils/utils.py | 18 ++++++++++++++++-- tests/unit/utils_test.py | 14 ++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 082bd9b0..a5fbe0ba 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -613,8 +613,10 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, cap_drop=None, devices=None, extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None, security_opt=None, ulimits=None, log_config=None, - mem_limit=None, memswap_limit=None, mem_swappiness=None, - cgroup_parent=None, group_add=None, cpu_quota=None, + mem_limit=None, memswap_limit=None, + mem_reservation=None, kernel_memory=None, + mem_swappiness=None, cgroup_parent=None, + group_add=None, cpu_quota=None, cpu_period=None, blkio_weight=None, blkio_weight_device=None, device_read_bps=None, device_write_bps=None, device_read_iops=None, @@ -638,6 +640,18 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None, if memswap_limit is not None: host_config['MemorySwap'] = parse_bytes(memswap_limit) + if mem_reservation: + if version_lt(version, '1.21'): + raise host_config_version_error('mem_reservation', '1.21') + + host_config['MemoryReservation'] = parse_bytes(mem_reservation) + + if kernel_memory: + if version_lt(version, '1.21'): + raise host_config_version_error('kernel_memory', '1.21') + + host_config['KernelMemory'] = parse_bytes(kernel_memory) + if mem_swappiness is not None: if version_lt(version, '1.20'): raise host_config_version_error('mem_swappiness', '1.20') diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index b3773914..3476f041 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -171,6 +171,20 @@ class HostConfigTest(base.BaseTestCase): with pytest.raises(InvalidVersion): create_endpoint_config(version='1.21', aliases=['foo', 'bar']) + def test_create_host_config_with_mem_reservation(self): + config = create_host_config(version='1.21', mem_reservation=67108864) + self.assertEqual(config.get('MemoryReservation'), 67108864) + self.assertRaises( + InvalidVersion, lambda: create_host_config( + version='1.20', mem_reservation=67108864)) + + def test_create_host_config_with_kernel_memory(self): + config = create_host_config(version='1.21', kernel_memory=67108864) + self.assertEqual(config.get('KernelMemory'), 67108864) + self.assertRaises( + InvalidVersion, lambda: create_host_config( + version='1.20', kernel_memory=67108864)) + class UlimitTest(base.BaseTestCase): def test_create_host_config_dict_ulimit(self): From 902c7a76ccf23e2e210e982cc832a2770cfc99f4 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 23 Aug 2016 17:05:08 -0700 Subject: [PATCH 67/83] Docs and tests for pids_limit. Signed-off-by: Joffrey F --- docs/hostconfig.md | 8 +++++--- tests/unit/utils_test.py | 9 +++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/docs/hostconfig.md b/docs/hostconfig.md index 6645bd1f..008d5cf2 100644 --- a/docs/hostconfig.md +++ b/docs/hostconfig.md @@ -111,11 +111,12 @@ for example: CPU period. * cpu_shares (int): CPU shares (relative weight) * cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1) -* blkio_weight: Block IO weight (relative weight), accepts a weight value between 10 and 1000. +* blkio_weight: Block IO weight (relative weight), accepts a weight value + between 10 and 1000. * blkio_weight_device: Block IO weight (relative device weight) in the form of: `[{"Path": "device_path", "Weight": weight}]` -* device_read_bps: Limit read rate (bytes per second) from a device in the form of: - `[{"Path": "device_path", "Rate": rate}]` +* device_read_bps: Limit read rate (bytes per second) from a device in the + form of: `[{"Path": "device_path", "Rate": rate}]` * device_write_bps: Limit write rate (bytes per second) from a device. * device_read_iops: Limit read rate (IO per second) from a device. * device_write_iops: Limit write rate (IO per second) from a device. @@ -128,6 +129,7 @@ for example: * sysctls (dict): Kernel parameters to set in the container. * userns_mode (str): Sets the user namespace mode for the container when user namespace remapping option is enabled. Supported values are: `host` +* pids_limit (int): Tune a container’s pids limit. Set -1 for unlimited. **Returns** (dict) HostConfig dictionary diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 3476f041..2a2759d0 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -185,6 +185,15 @@ class HostConfigTest(base.BaseTestCase): InvalidVersion, lambda: create_host_config( version='1.20', kernel_memory=67108864)) + def test_create_host_config_with_pids_limit(self): + config = create_host_config(version='1.23', pids_limit=1024) + self.assertEqual(config.get('PidsLimit'), 1024) + + with pytest.raises(InvalidVersion): + create_host_config(version='1.22', pids_limit=1024) + with pytest.raises(TypeError): + create_host_config(version='1.22', pids_limit='1024') + class UlimitTest(base.BaseTestCase): def test_create_host_config_dict_ulimit(self): From 5bedd32a6942e89eacd4f63298551404856be5fc Mon Sep 17 00:00:00 2001 From: fermayo Date: Thu, 25 Aug 2016 13:34:54 +0200 Subject: [PATCH 68/83] Fix creating containers with env vars with unicode characters Signed-off-by: Fernando Mayo --- docker/utils/utils.py | 2 +- tests/unit/container_test.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index a5fbe0ba..8385a760 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -993,7 +993,7 @@ def format_environment(environment): def format_env(key, value): if value is None: return key - return '{key}={value}'.format(key=key, value=value) + return u'{key}={value}'.format(key=key, value=value) return [format_env(*var) for var in six.iteritems(environment)] diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index c480462f..3cea42fb 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + import datetime import json import signal @@ -1155,6 +1157,24 @@ class CreateContainerTest(DockerClientTest): args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS ) + def test_create_container_with_unicode_envvars(self): + envvars_dict = { + 'foo': u'☃', + } + + expected = [ + u'foo=☃' + ] + + self.client.create_container( + 'busybox', 'true', + environment=envvars_dict, + ) + + args = fake_request.call_args + self.assertEqual(args[0][1], url_prefix + 'containers/create') + self.assertEqual(json.loads(args[1]['data'])['Env'], expected) + class ContainerTest(DockerClientTest): def test_list_containers(self): From 764d7b38c484f8dd45eafb47d0add602de5d3ada Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 23 Aug 2016 16:51:52 -0700 Subject: [PATCH 69/83] Support version parameter in `Client.from_env` Signed-off-by: Joffrey F --- docker/client.py | 3 ++- tests/unit/client_test.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docker/client.py b/docker/client.py index 75867536..dc28ac46 100644 --- a/docker/client.py +++ b/docker/client.py @@ -114,7 +114,8 @@ class Client( @classmethod def from_env(cls, **kwargs): - return cls(**kwargs_from_env(**kwargs)) + version = kwargs.pop('version', None) + return cls(version=version, **kwargs_from_env(**kwargs)) def _retrieve_server_version(self): try: diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py index b21f1d6a..6ceb8cbb 100644 --- a/tests/unit/client_test.py +++ b/tests/unit/client_test.py @@ -25,6 +25,14 @@ class ClientTest(base.BaseTestCase): client = Client.from_env() self.assertEqual(client.base_url, "https://192.168.59.103:2376") + def test_from_env_with_version(self): + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='1') + client = Client.from_env(version='2.32') + self.assertEqual(client.base_url, "https://192.168.59.103:2376") + self.assertEqual(client._version, '2.32') + class DisableSocketTest(base.BaseTestCase): class DummySocket(object): From a665dfb3750058aaaa074799d5262876cb821884 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 31 Aug 2016 18:26:16 -0700 Subject: [PATCH 70/83] Add support for labels and enable_ipv6 in create_network Tests + docs Signed-off-by: Joffrey F --- docker/api/network.py | 19 ++++++++++++++++++- docs/api.md | 15 +++++++++------ tests/integration/network_test.py | 27 ++++++++++++++++++++++++++- 3 files changed, 53 insertions(+), 8 deletions(-) diff --git a/docker/api/network.py b/docker/api/network.py index 34cd8987..c4f48c20 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -22,7 +22,8 @@ class NetworkApiMixin(object): @minimum_version('1.21') def create_network(self, name, driver=None, options=None, ipam=None, - check_duplicate=None, internal=False): + check_duplicate=None, internal=False, labels=None, + enable_ipv6=False): if options is not None and not isinstance(options, dict): raise TypeError('options must be a dictionary') @@ -34,6 +35,22 @@ class NetworkApiMixin(object): 'CheckDuplicate': check_duplicate } + if labels is not None: + if version_lt(self._version, '1.23'): + raise InvalidVersion( + 'network labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + + if enable_ipv6: + if version_lt(self._version, '1.23'): + raise InvalidVersion( + 'enable_ipv6 was introduced in API 1.23' + ) + data['EnableIPv6'] = True + if internal: if version_lt(self._version, '1.22'): raise InvalidVersion('Internal networks are not ' diff --git a/docs/api.md b/docs/api.md index 895d7d45..6af330af 100644 --- a/docs/api.md +++ b/docs/api.md @@ -283,22 +283,25 @@ The utility can be used as follows: ```python >>> import docker.utils >>> my_envs = docker.utils.parse_env_file('/path/to/file') ->>> docker.utils.create_container_config('1.18', '_mongodb', 'foobar', environment=my_envs) +>>> client.create_container('myimage', 'command', environment=my_envs) ``` -You can now use this with 'environment' for `create_container`. - - ## create_network -Create a network, similar to the `docker network create` command. +Create a network, similar to the `docker network create` command. See the +[networks documentation](networks.md) for details. **Params**: * name (str): Name of the network * driver (str): Name of the driver used to create the network - * options (dict): Driver options as a key-value dictionary +* ipam (dict): Optional custom IP scheme for the network +* check_duplicate (bool): Request daemon to check for networks with same name. + Default: `True`. +* internal (bool): Restrict external access to the network. Default `False`. +* labels (dict): Map of labels to set on the network. Default `None`. +* enable_ipv6 (bool): Enable IPv6 on the network. Default `False`. **Returns** (dict): The created network reference object diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py index 27e1b14d..70dff060 100644 --- a/tests/integration/network_test.py +++ b/tests/integration/network_test.py @@ -300,7 +300,8 @@ class TestNetworks(helpers.BaseTestCase): net_name, net_id = self.create_network() with self.assertRaises(docker.errors.APIError): self.client.create_network(net_name, check_duplicate=True) - self.client.create_network(net_name, check_duplicate=False) + net_id = self.client.create_network(net_name, check_duplicate=False) + self.tmp_networks.append(net_id['Id']) @requires_api_version('1.22') def test_connect_with_links(self): @@ -387,3 +388,27 @@ class TestNetworks(helpers.BaseTestCase): _, net_id = self.create_network(internal=True) net = self.client.inspect_network(net_id) assert net['Internal'] is True + + @requires_api_version('1.23') + def test_create_network_with_labels(self): + _, net_id = self.create_network(labels={ + 'com.docker.py.test': 'label' + }) + + net = self.client.inspect_network(net_id) + assert 'Labels' in net + assert len(net['Labels']) == 1 + assert net['Labels'] == { + 'com.docker.py.test': 'label' + } + + @requires_api_version('1.23') + def test_create_network_with_labels_wrong_type(self): + with pytest.raises(TypeError): + self.create_network(labels=['com.docker.py.test=label', ]) + + @requires_api_version('1.23') + def test_create_network_ipv6_enabled(self): + _, net_id = self.create_network(enable_ipv6=True) + net = self.client.inspect_network(net_id) + assert net['EnableIPv6'] is True From 6552076856bed2925b1611326630b341f27f41b2 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 31 Aug 2016 18:41:17 -0700 Subject: [PATCH 71/83] Add support for force disconnect Signed-off-by: Joffrey F --- docker/api/network.py | 11 +++++++++-- docs/api.md | 2 ++ tests/integration/network_test.py | 30 +++++++++++++++++++++++++++++- tests/unit/network_test.py | 2 +- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/docker/api/network.py b/docker/api/network.py index c4f48c20..0ee0dab6 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -93,8 +93,15 @@ class NetworkApiMixin(object): @check_resource @minimum_version('1.21') - def disconnect_container_from_network(self, container, net_id): - data = {"container": container} + def disconnect_container_from_network(self, container, net_id, + force=False): + data = {"Container": container} + if force: + if version_lt(self._version, '1.22'): + raise InvalidVersion( + 'Forced disconnect was introduced in API 1.22' + ) + data['Force'] = force url = self._url("/networks/{0}/disconnect", net_id) res = self._post_json(url, data=data) self._raise_for_status(res) diff --git a/docs/api.md b/docs/api.md index 6af330af..1699344a 100644 --- a/docs/api.md +++ b/docs/api.md @@ -355,6 +355,8 @@ Inspect changes on a container's filesystem. * container (str): container-id/name to be disconnected from a network * net_id (str): network id +* force (bool): Force the container to disconnect from a network. + Default: `False` ## events diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py index 70dff060..6726db4b 100644 --- a/tests/integration/network_test.py +++ b/tests/integration/network_test.py @@ -115,7 +115,8 @@ class TestNetworks(helpers.BaseTestCase): network_data = self.client.inspect_network(net_id) self.assertEqual( list(network_data['Containers'].keys()), - [container['Id']]) + [container['Id']] + ) with pytest.raises(docker.errors.APIError): self.client.connect_container_to_network(container, net_id) @@ -127,6 +128,33 @@ class TestNetworks(helpers.BaseTestCase): with pytest.raises(docker.errors.APIError): self.client.disconnect_container_from_network(container, net_id) + @requires_api_version('1.22') + def test_connect_and_force_disconnect_container(self): + net_name, net_id = self.create_network() + + container = self.client.create_container('busybox', 'top') + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + self.assertFalse(network_data.get('Containers')) + + self.client.connect_container_to_network(container, net_id) + network_data = self.client.inspect_network(net_id) + self.assertEqual( + list(network_data['Containers'].keys()), + [container['Id']] + ) + + self.client.disconnect_container_from_network(container, net_id, True) + network_data = self.client.inspect_network(net_id) + self.assertFalse(network_data.get('Containers')) + + with pytest.raises(docker.errors.APIError): + self.client.disconnect_container_from_network( + container, net_id, force=True + ) + @requires_api_version('1.22') def test_connect_with_aliases(self): net_name, net_id = self.create_network() diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py index 5bba9db2..2521688d 100644 --- a/tests/unit/network_test.py +++ b/tests/unit/network_test.py @@ -184,4 +184,4 @@ class NetworkTest(DockerClientTest): self.assertEqual( json.loads(post.call_args[1]['data']), - {'container': container_id}) + {'Container': container_id}) From 9799c2d69b593c5f5dce8ff44a477207dc118e46 Mon Sep 17 00:00:00 2001 From: Joel Martin Date: Fri, 2 Sep 2016 15:29:00 -0500 Subject: [PATCH 72/83] Fix Mount bind type sanity check any() expects a single collection argument, not a list of arguments. Signed-off-by: Joel Martin --- docker/types/services.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/types/services.py b/docker/types/services.py index 2c1a830c..8488d6e2 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -80,7 +80,7 @@ class Mount(dict): self['BindOptions'] = { 'Propagation': propagation } - if any(labels, driver_config, no_copy): + if any([labels, driver_config, no_copy]): raise errors.DockerError( 'Mount type is binding but volume options have been ' 'provided.' From 3769c089e8dbac13ea6a65373f87d3b8ee539c5f Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Mon, 5 Sep 2016 17:48:09 +0200 Subject: [PATCH 73/83] Fix licenses * Complete main LICENSE * Remove unnecessary licenses from individual files Signed-off-by: Ben Firshman --- LICENSE | 13 +------------ docker/__init__.py | 14 -------------- docker/auth/auth.py | 14 -------------- docker/client.py | 14 -------------- docker/errors.py | 13 ------------- docker/transport/unixconn.py | 13 ------------- docker/utils/utils.py | 14 -------------- tests/unit/api_test.py | 14 -------------- tests/unit/fake_api.py | 14 -------------- 9 files changed, 1 insertion(+), 122 deletions(-) diff --git a/LICENSE b/LICENSE index d6456956..75191a4d 100644 --- a/LICENSE +++ b/LICENSE @@ -176,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/docker/__init__.py b/docker/__init__.py index 84d0734f..ad53805e 100644 --- a/docker/__init__.py +++ b/docker/__init__.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - from .version import version, version_info __version__ = version diff --git a/docker/auth/auth.py b/docker/auth/auth.py index 7195f56a..50f86f63 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import base64 import json import logging diff --git a/docker/client.py b/docker/client.py index ef718a72..b811d36c 100644 --- a/docker/client.py +++ b/docker/client.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import json import struct from functools import partial diff --git a/docker/errors.py b/docker/errors.py index e85910cd..97be802d 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -1,16 +1,3 @@ -# Copyright 2014 dotCloud inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import requests diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py index f4d83ef3..e09b6bfa 100644 --- a/docker/transport/unixconn.py +++ b/docker/transport/unixconn.py @@ -1,16 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. import six import requests.adapters import socket diff --git a/docker/utils/utils.py b/docker/utils/utils.py index c108a835..d46f8fcd 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import base64 import io import os diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 5850afa2..389b5f53 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import datetime import json import os diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py index 835d73f2..54d5566b 100644 --- a/tests/unit/fake_api.py +++ b/tests/unit/fake_api.py @@ -1,17 +1,3 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - from . import fake_stat from docker import constants From 291470146f7264148997bda2a57124352fd04769 Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Mon, 5 Sep 2016 19:21:09 +0200 Subject: [PATCH 74/83] Add make docs command for building docs Signed-off-by: Ben Firshman --- Dockerfile-docs | 9 +++++++++ Makefile | 6 ++++++ docs-requirements.txt | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 Dockerfile-docs diff --git a/Dockerfile-docs b/Dockerfile-docs new file mode 100644 index 00000000..1103ffd1 --- /dev/null +++ b/Dockerfile-docs @@ -0,0 +1,9 @@ +FROM python:2.7 + +RUN mkdir /home/docker-py +WORKDIR /home/docker-py + +COPY docs-requirements.txt /home/docker-py/docs-requirements.txt +RUN pip install -r docs-requirements.txt + +COPY . /home/docker-py diff --git a/Makefile b/Makefile index a635edfa..3a8f5e8c 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,9 @@ build: build-py3: docker build -t docker-py3 -f Dockerfile-py3 . +build-docs: + docker build -t docker-py-docs -f Dockerfile-docs . + build-dind-certs: docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs . @@ -57,3 +60,6 @@ integration-dind-ssl: build-dind-certs build build-py3 flake8: build docker run docker-py flake8 docker tests + +docs: build-docs + docker run -v `pwd`/docs:/home/docker-py/docs/ -p 8000:8000 docker-py-docs mkdocs serve -a 0.0.0.0:8000 diff --git a/docs-requirements.txt b/docs-requirements.txt index abc8d72d..aede1cba 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -1 +1 @@ -mkdocs==0.9 +mkdocs==0.15.3 From fbe1686e629804fb47dabea1eda5c6d664f0a6b7 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 2 Sep 2016 19:59:47 -0700 Subject: [PATCH 75/83] Add credentials store support Signed-off-by: Joffrey F --- docker/auth/auth.py | 37 +++++++++++++++++++++++++++++++++++++ requirements.txt | 3 ++- setup.py | 1 + 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/docker/auth/auth.py b/docker/auth/auth.py index 50f86f63..fdaa7b5f 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -3,6 +3,7 @@ import json import logging import os +import dockerpycreds import six from .. import errors @@ -11,6 +12,7 @@ INDEX_NAME = 'docker.io' INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME) DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json') LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' +TOKEN_USERNAME = '' log = logging.getLogger(__name__) @@ -74,6 +76,13 @@ def resolve_authconfig(authconfig, registry=None): with full URLs are stripped down to hostnames before checking for a match. Returns None if no match was found. """ + if 'credsStore' in authconfig: + log.debug( + 'Using credentials store "{0}"'.format(authconfig['credsStore']) + ) + return _resolve_authconfig_credstore( + authconfig, registry, authconfig['credsStore'] + ) # Default to the public index server registry = resolve_index_name(registry) if registry else INDEX_NAME log.debug("Looking for auth entry for {0}".format(repr(registry))) @@ -91,6 +100,34 @@ def resolve_authconfig(authconfig, registry=None): return None +def _resolve_authconfig_credstore(authconfig, registry, credstore_name): + if not registry or registry == INDEX_NAME: + # The ecosystem is a little schizophrenic with index.docker.io VS + # docker.io - in that case, it seems the full URL is necessary. + registry = 'https://index.docker.io/v1/' + log.debug("Looking for auth entry for {0}".format(repr(registry))) + if registry not in authconfig: + log.debug("No entry found") + store = dockerpycreds.Store(credstore_name) + try: + data = store.get(registry) + res = { + 'ServerAddress': registry, + } + if data['Username'] == TOKEN_USERNAME: + res['IdentityToken'] = data['Secret'] + else: + res.update({ + 'Username': data['Username'], + 'Password': data['Secret'], + }) + return res + except dockerpycreds.StoreError as e: + log.error('Credentials store error: {0}'.format(repr(e))) + + return None + + def convert_to_hostname(url): return url.replace('http://', '').replace('https://', '').split('/', 1)[0] diff --git a/requirements.txt b/requirements.txt index a79b7bf8..078163af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ requests==2.5.3 six>=1.4.0 websocket-client==0.32.0 backports.ssl_match_hostname>=3.5 ; python_version < '3.5' -ipaddress==1.0.16 ; python_version < '3.3' \ No newline at end of file +ipaddress==1.0.16 ; python_version < '3.3' +docker-pycreds==0.1.0 \ No newline at end of file diff --git a/setup.py b/setup.py index c809321e..3877e966 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,7 @@ requirements = [ 'requests >= 2.5.2, < 2.11', 'six >= 1.4.0', 'websocket-client >= 0.32.0', + 'docker-pycreds >= 0.1.0' ] if sys.platform == 'win32': From 219a8699f9c92de9cb4634d482ce700bb1ec76fc Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 6 Sep 2016 14:58:41 -0700 Subject: [PATCH 76/83] Better credentials store error handling in resolve_authconfig Signed-off-by: Joffrey F --- docker/auth/auth.py | 11 ++++++----- requirements.txt | 2 +- setup.py | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docker/auth/auth.py b/docker/auth/auth.py index fdaa7b5f..ea15def8 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -106,8 +106,6 @@ def _resolve_authconfig_credstore(authconfig, registry, credstore_name): # docker.io - in that case, it seems the full URL is necessary. registry = 'https://index.docker.io/v1/' log.debug("Looking for auth entry for {0}".format(repr(registry))) - if registry not in authconfig: - log.debug("No entry found") store = dockerpycreds.Store(credstore_name) try: data = store.get(registry) @@ -122,10 +120,13 @@ def _resolve_authconfig_credstore(authconfig, registry, credstore_name): 'Password': data['Secret'], }) return res + except dockerpycreds.CredentialsNotFound as e: + log.debug('No entry found') + return None except dockerpycreds.StoreError as e: - log.error('Credentials store error: {0}'.format(repr(e))) - - return None + raise errors.DockerException( + 'Credentials store error: {0}'.format(repr(e)) + ) def convert_to_hostname(url): diff --git a/requirements.txt b/requirements.txt index 078163af..4c0d5c20 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ six>=1.4.0 websocket-client==0.32.0 backports.ssl_match_hostname>=3.5 ; python_version < '3.5' ipaddress==1.0.16 ; python_version < '3.3' -docker-pycreds==0.1.0 \ No newline at end of file +docker-pycreds==0.2.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 3877e966..1afd873b 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ requirements = [ 'requests >= 2.5.2, < 2.11', 'six >= 1.4.0', 'websocket-client >= 0.32.0', - 'docker-pycreds >= 0.1.0' + 'docker-pycreds >= 0.2.0' ] if sys.platform == 'win32': From 65fb5be4cd30bb98f968986789bad00072b6001a Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 15 Jun 2016 15:54:09 -0700 Subject: [PATCH 77/83] Add support for changes param in import_image* methods Reduce code duplication in import_image* methods Signed-off-by: Joffrey F --- docker/api/image.py | 170 +++++++++++++++++++++++++------------------- 1 file changed, 96 insertions(+), 74 deletions(-) diff --git a/docker/api/image.py b/docker/api/image.py index 4d6561e5..7f25f9d9 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -1,4 +1,5 @@ import logging +import os import six import warnings @@ -42,87 +43,79 @@ class ImageApiMixin(object): return [x['Id'] for x in res] return res - def import_image(self, src=None, repository=None, tag=None, image=None): - if src: - if isinstance(src, six.string_types): - try: - result = self.import_image_from_file( - src, repository=repository, tag=tag) - except IOError: - result = self.import_image_from_url( - src, repository=repository, tag=tag) - else: - result = self.import_image_from_data( - src, repository=repository, tag=tag) - elif image: - result = self.import_image_from_image( - image, repository=repository, tag=tag) - else: - raise Exception("Must specify a src or image") + def import_image(self, src=None, repository=None, tag=None, image=None, + changes=None, stream_src=False): + if not (src or image): + raise errors.DockerException( + 'Must specify src or image to import from' + ) + u = self._url('/images/create') - return result + params = _import_image_params( + repository, tag, image, + src=(src if isinstance(src, six.string_types) else None), + changes=changes + ) + headers = {'Content-Type': 'application/tar'} - def import_image_from_data(self, data, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - } - return self._result( - self._post(u, data=data, params=params, headers=headers)) - - def import_image_from_file(self, filename, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - } - with open(filename, 'rb') as f: + if image or params.get('fromSrc') != '-': # from image or URL return self._result( - self._post(u, data=f, params=params, headers=headers, - timeout=None)) + self._post(u, data=None, params=params) + ) + elif isinstance(src, six.string_types): # from file path + with open(src, 'rb') as f: + return self._result( + self._post( + u, data=f, params=params, headers=headers, timeout=None + ) + ) + else: # from raw data + if stream_src: + headers['Transfer-Encoding'] = 'chunked' + return self._result( + self._post(u, data=src, params=params, headers=headers) + ) - def import_image_from_stream(self, stream, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - 'Transfer-Encoding': 'chunked', - } + def import_image_from_data(self, data, repository=None, tag=None, + changes=None): + u = self._url('/images/create') + params = _import_image_params( + repository, tag, src='-', changes=changes + ) + headers = {'Content-Type': 'application/tar'} return self._result( - self._post(u, data=stream, params=params, headers=headers)) + self._post( + u, data=data, params=params, headers=headers, timeout=None + ) + ) + return self.import_image( + src=data, repository=repository, tag=tag, changes=changes + ) - def import_image_from_url(self, url, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': url, - 'repo': repository, - 'tag': tag - } - return self._result( - self._post(u, data=None, params=params)) + def import_image_from_file(self, filename, repository=None, tag=None, + changes=None): + return self.import_image( + src=filename, repository=repository, tag=tag, changes=changes + ) - def import_image_from_image(self, image, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromImage': image, - 'repo': repository, - 'tag': tag - } - return self._result( - self._post(u, data=None, params=params)) + def import_image_from_stream(self, stream, repository=None, tag=None, + changes=None): + return self.import_image( + src=stream, stream_src=True, repository=repository, tag=tag, + changes=changes + ) + + def import_image_from_url(self, url, repository=None, tag=None, + changes=None): + return self.import_image( + src=url, repository=repository, tag=tag, changes=changes + ) + + def import_image_from_image(self, image, repository=None, tag=None, + changes=None): + return self.import_image( + image=image, repository=repository, tag=tag, changes=changes + ) @utils.check_resource def insert(self, image, url, path): @@ -246,3 +239,32 @@ class ImageApiMixin(object): res = self._post(url, params=params) self._raise_for_status(res) return res.status_code == 201 + + +def is_file(src): + try: + return ( + isinstance(src, six.string_types) and + os.path.isfile(src) + ) + except TypeError: # a data string will make isfile() raise a TypeError + return False + + +def _import_image_params(repo, tag, image=None, src=None, + changes=None): + params = { + 'repo': repo, + 'tag': tag, + } + if image: + params['fromImage'] = image + elif src and not is_file(src): + params['fromSrc'] = src + else: + params['fromSrc'] = '-' + + if changes: + params['changes'] = changes + + return params From 75497e07528a3d84a6ddd343aa66837ebb304e2a Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 15 Jun 2016 16:39:24 -0700 Subject: [PATCH 78/83] Add test for import_image with changes param Signed-off-by: Joffrey F --- Makefile | 5 ++-- tests/integration/image_test.py | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 3a8f5e8c..5083b79c 100644 --- a/Makefile +++ b/Makefile @@ -3,9 +3,8 @@ all: test clean: - rm -rf tests/__pycache__ - rm -rf tests/*/__pycache__ - docker rm -vf dpy-dind + -docker rm -vf dpy-dind + find -name "__pycache__" | xargs rm -rf build: docker build -t docker-py . diff --git a/tests/integration/image_test.py b/tests/integration/image_test.py index 9f383665..a61b58ae 100644 --- a/tests/integration/image_test.py +++ b/tests/integration/image_test.py @@ -208,6 +208,48 @@ class ImportImageTest(helpers.BaseTestCase): img_id = result['status'] self.tmp_imgs.append(img_id) + def test_import_image_from_data_with_changes(self): + with self.dummy_tar_stream(n_bytes=500) as f: + content = f.read() + + statuses = self.client.import_image_from_data( + content, repository='test/import-from-bytes', + changes=['USER foobar', 'CMD ["echo"]'] + ) + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + img_id = result['status'] + self.tmp_imgs.append(img_id) + + img_data = self.client.inspect_image(img_id) + assert img_data is not None + assert img_data['Config']['Cmd'] == ['echo'] + assert img_data['Config']['User'] == 'foobar' + + def test_import_image_with_changes(self): + with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename: + statuses = self.client.import_image( + src=tar_filename, repository='test/import-from-file', + changes=['USER foobar', 'CMD ["echo"]'] + ) + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + img_id = result['status'] + self.tmp_imgs.append(img_id) + + img_data = self.client.inspect_image(img_id) + assert img_data is not None + assert img_data['Config']['Cmd'] == ['echo'] + assert img_data['Config']['User'] == 'foobar' + @contextlib.contextmanager def temporary_http_file_server(self, stream): '''Serve data from an IO stream over HTTP.''' From 0cdf7376253499923746f160106a757611988341 Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Wed, 7 Sep 2016 10:47:06 +0200 Subject: [PATCH 79/83] Fix unit test which doesn't do anything It also overrode the fake API inspect endpoint with a broken response. Signed-off-by: Ben Firshman --- tests/unit/container_test.py | 16 ++++++++++------ tests/unit/fake_api.py | 31 ------------------------------- 2 files changed, 10 insertions(+), 37 deletions(-) diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index 3cea42fb..8871b854 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -751,14 +751,18 @@ class CreateContainerTest(DockerClientTest): ) def test_create_container_with_mac_address(self): - mac_address_expected = "02:42:ac:11:00:0a" + expected = "02:42:ac:11:00:0a" - container = self.client.create_container( - 'busybox', ['sleep', '60'], mac_address=mac_address_expected) + self.client.create_container( + 'busybox', + ['sleep', '60'], + mac_address=expected + ) - res = self.client.inspect_container(container['Id']) - self.assertEqual(mac_address_expected, - res['NetworkSettings']['MacAddress']) + args = fake_request.call_args + self.assertEqual(args[0][1], url_prefix + 'containers/create') + data = json.loads(args[1]['data']) + assert data['MacAddress'] == expected def test_create_container_with_links(self): link_path = 'path' diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py index 54d5566b..1e9d318d 100644 --- a/tests/unit/fake_api.py +++ b/tests/unit/fake_api.py @@ -169,35 +169,6 @@ def get_fake_inspect_image(): return status_code, response -def get_fake_port(): - status_code = 200 - response = { - 'HostConfig': { - 'Binds': None, - 'ContainerIDFile': '', - 'Links': None, - 'LxcConf': None, - 'PortBindings': { - '1111': None, - '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}], - '2222': None - }, - 'Privileged': False, - 'PublishAllPorts': False - }, - 'NetworkSettings': { - 'Bridge': 'docker0', - 'PortMapping': None, - 'Ports': { - '1111': None, - '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}], - '2222': None}, - 'MacAddress': '02:42:ac:11:00:0a' - } - } - return status_code, response - - def get_fake_insert_image(): status_code = 200 response = {'StatusCode': 0} @@ -495,8 +466,6 @@ fake_responses = { post_fake_pause_container, '{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix): post_fake_unpause_container, - '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix): - get_fake_port, '{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix): post_fake_restart_container, '{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix): From 0430d00f2f3a5f3328cd1646d8ff6542b5ba6ff6 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 7 Sep 2016 17:49:07 -0700 Subject: [PATCH 80/83] Handle bufsize < 0 in makefile() as a substitute for default Signed-off-by: Joffrey F --- docker/transport/npipesocket.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py index 35418ef1..9010cebe 100644 --- a/docker/transport/npipesocket.py +++ b/docker/transport/npipesocket.py @@ -94,7 +94,7 @@ class NpipeSocket(object): if mode.strip('b') != 'r': raise NotImplementedError() rawio = NpipeFileIOBase(self) - if bufsize is None: + if bufsize is None or bufsize < 0: bufsize = io.DEFAULT_BUFFER_SIZE return io.BufferedReader(rawio, buffer_size=bufsize) From 06489235964470c9fdb12ec3a30e82aaa9586a28 Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Thu, 8 Sep 2016 09:10:27 +0100 Subject: [PATCH 81/83] Add .PHONY for each makefile instruction Makes it much easier to keep this maintained properly. See also: http://clarkgrubb.com/makefile-style-guide#phony-targets Replaces #1164 Signed-off-by: Ben Firshman --- Makefile | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 5083b79c..1fb21d44 100644 --- a/Makefile +++ b/Makefile @@ -1,37 +1,47 @@ -.PHONY: all build test integration-test unit-test build-py3 unit-test-py3 integration-test-py3 - +.PHONY: all all: test +.PHONY: clean clean: -docker rm -vf dpy-dind find -name "__pycache__" | xargs rm -rf +.PHONY: build build: docker build -t docker-py . +.PHONY: build-py3 build-py3: docker build -t docker-py3 -f Dockerfile-py3 . +.PHONY: build-docs build-docs: docker build -t docker-py-docs -f Dockerfile-docs . +.PHONY: build-dind-certs build-dind-certs: docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs . +.PHONY: test test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl +.PHONY: unit-test unit-test: build docker run docker-py py.test tests/unit +.PHONY: unit-test-py3 unit-test-py3: build-py3 docker run docker-py3 py.test tests/unit +.PHONY: integration-test integration-test: build docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py py.test tests/integration +.PHONY: integration-test-py3 integration-test-py3: build-py3 docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration +.PHONY: integration-dind integration-dind: build build-py3 docker rm -vf dpy-dind || : docker run -d --name dpy-dind --privileged dockerswarm/dind:1.12.0 docker daemon\ @@ -42,6 +52,7 @@ integration-dind: build build-py3 py.test tests/integration docker rm -vf dpy-dind +.PHONY: integration-dind-ssl integration-dind-ssl: build-dind-certs build build-py3 docker run -d --name dpy-dind-certs dpy-dind-certs docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\ @@ -57,8 +68,10 @@ integration-dind-ssl: build-dind-certs build build-py3 --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration docker rm -vf dpy-dind-ssl dpy-dind-certs +.PHONY: flake8 flake8: build docker run docker-py flake8 docker tests +.PHONY: docs docs: build-docs docker run -v `pwd`/docs:/home/docker-py/docs/ -p 8000:8000 docker-py-docs mkdocs serve -a 0.0.0.0:8000 From e6601e2e55fefa4216bcbf5d1f6c8dbbe7053f54 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 8 Jun 2016 17:27:25 -0700 Subject: [PATCH 82/83] Remove default adapters when connecting through a unix socket. Signed-off-by: Joffrey F --- docker/client.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docker/client.py b/docker/client.py index b811d36c..6e8b2789 100644 --- a/docker/client.py +++ b/docker/client.py @@ -60,6 +60,7 @@ class Client( if base_url.startswith('http+unix://'): self._custom_adapter = UnixAdapter(base_url, timeout) self.mount('http+docker://', self._custom_adapter) + self._unmount('http://', 'https://') self.base_url = 'http+docker://localunixsocket' elif base_url.startswith('npipe://'): if not constants.IS_WINDOWS_PLATFORM: @@ -368,6 +369,10 @@ class Client( [x for x in self._multiplexed_buffer_helper(res)] ) + def _unmount(self, *args): + for proto in args: + self.adapters.pop(proto) + def get_adapter(self, url): try: return super(Client, self).get_adapter(url) From 3eb93f666289a1e4b5099a83e1c18950e8dd9d78 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 7 Sep 2016 17:42:45 -0700 Subject: [PATCH 83/83] Bump version Signed-off-by: Joffrey F --- docker/version.py | 2 +- docs/change_log.md | 43 +++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 4 +++- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/docker/version.py b/docker/version.py index dea7b7cb..40accf13 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "1.10.0-dev" +version = "1.10.0" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) diff --git a/docs/change_log.md b/docs/change_log.md index 089c0034..d37e48fd 100644 --- a/docs/change_log.md +++ b/docs/change_log.md @@ -1,6 +1,49 @@ Change Log ========== +1.10.0 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.0+is%3Aclosed) + +### Features + +* Added swarm mode and service management methods. See the documentation for + details. +* Added support for IPv6 Docker host addresses in the `Client` constructor. +* Added (read-only) support for the Docker credentials store. +* Added support for custom `auth_config` in `Client.push`. +* Added support for `labels` in `Client.create_volume`. +* Added support for `labels` and `enable_ipv6` in `Client.create_network`. +* Added support for `force` param in + `Client.disconnect_container_from_network`. +* Added support for `pids_limit`, `sysctls`, `userns_mode`, `cpuset_cpus`, + `cpu_shares`, `mem_reservation` and `kernel_memory` parameters in + `Client.create_host_config`. +* Added support for `link_local_ips` in `create_endpoint_config`. +* Added support for a `changes` parameter in `Client.import_image`. +* Added support for a `version` parameter in `Client.from_env`. + +### Bugfixes + +* Fixed a bug where `Client.build` would crash if the `config.json` file + contained a `HttpHeaders` entry. +* Fixed a bug where passing `decode=True` in some streaming methods would + crash when the daemon's response had an unexpected format. +* Fixed a bug where `environment` values with unicode characters weren't + handled properly in `create_container`. +* Fixed a bug where using the `npipe` protocol would sometimes break with + `ValueError: buffer size must be strictly positive`. + +### Miscellaneous + +* Fixed an issue where URL-quoting in docker-py was inconsistent with the + quoting done by the Docker CLI client. +* The client now sends TCP upgrade headers to hint potential proxies about + connection hijacking. +* The client now defaults to using the `npipe` protocol on Windows. + + 1.9.0 ----- diff --git a/setup.cfg b/setup.cfg index ccc93cfc..ad388d24 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,5 @@ [bdist_wheel] - universal = 1 + +[metadata] +description_file = README.md