From a1393ee8ac4a55e6215079baacc9ec7777a9c965 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 23 Sep 2015 14:05:03 -0700 Subject: [PATCH 01/11] Don't break when volume binds contain unicode characters Also includes a few unit tests for utils.convert_volume_binds Signed-off-by: Joffrey F --- docker/utils/utils.py | 19 +++++++-- tests/utils_test.py | 91 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 6 deletions(-) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 36edf8de..1fce1377 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -242,6 +242,9 @@ def convert_volume_binds(binds): result = [] for k, v in binds.items(): + if isinstance(k, six.binary_type): + k = k.decode('utf-8') + if isinstance(v, dict): if 'ro' in v and 'mode' in v: raise ValueError( @@ -249,6 +252,10 @@ def convert_volume_binds(binds): .format(repr(v)) ) + bind = v['bind'] + if isinstance(bind, six.binary_type): + bind = bind.decode('utf-8') + if 'ro' in v: mode = 'ro' if v['ro'] else 'rw' elif 'mode' in v: @@ -256,11 +263,15 @@ def convert_volume_binds(binds): else: mode = 'rw' - result.append('{0}:{1}:{2}'.format( - k, v['bind'], mode - )) + result.append( + six.text_type('{0}:{1}:{2}').format(k, bind, mode) + ) else: - result.append('{0}:{1}:rw'.format(k, v)) + if isinstance(v, six.binary_type): + v = v.decode('utf-8') + result.append( + six.text_type('{0}:{1}:rw').format(k, v) + ) return result diff --git a/tests/utils_test.py b/tests/utils_test.py index 45929f73..8ac1dcb9 100644 --- a/tests/utils_test.py +++ b/tests/utils_test.py @@ -1,15 +1,20 @@ +# -*- coding: utf-8 -*- + import os import os.path import shutil import tempfile +import pytest +import six + from docker.client import Client from docker.constants import DEFAULT_DOCKER_API_VERSION from docker.errors import DockerException from docker.utils import ( parse_repository_tag, parse_host, convert_filters, kwargs_from_env, create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file, - exclude_paths, + exclude_paths, convert_volume_binds, ) from docker.utils.ports import build_port_bindings, split_port from docker.auth import resolve_repository_name, resolve_authconfig @@ -17,7 +22,6 @@ from docker.auth import resolve_repository_name, resolve_authconfig from . import base from .helpers import make_tree -import pytest TEST_CERT_DIR = os.path.join( os.path.dirname(__file__), @@ -192,6 +196,89 @@ class UtilsTest(base.BaseTestCase): local_tempfile.close() return local_tempfile.name + def test_convert_volume_binds_empty(self): + self.assertEqual(convert_volume_binds({}), []) + self.assertEqual(convert_volume_binds([]), []) + + def test_convert_volume_binds_list(self): + data = ['/a:/a:ro', '/b:/c:z'] + self.assertEqual(convert_volume_binds(data), data) + + def test_convert_volume_binds_complete(self): + data = { + '/mnt/vol1': { + 'bind': '/data', + 'mode': 'ro' + } + } + self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro']) + + def test_convert_volume_binds_compact(self): + data = { + '/mnt/vol1': '/data' + } + self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw']) + + def test_convert_volume_binds_no_mode(self): + data = { + '/mnt/vol1': { + 'bind': '/data' + } + } + self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw']) + + def test_convert_volume_binds_unicode_bytes_input(self): + if six.PY2: + expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')] + + data = { + '/mnt/지연': { + 'bind': '/unicode/박', + 'mode': 'rw' + } + } + self.assertEqual( + convert_volume_binds(data), expected + ) + else: + expected = ['/mnt/지연:/unicode/박:rw'] + + data = { + bytes('/mnt/지연', 'utf-8'): { + 'bind': bytes('/unicode/박', 'utf-8'), + 'mode': 'rw' + } + } + self.assertEqual( + convert_volume_binds(data), expected + ) + + def test_convert_volume_binds_unicode_unicode_input(self): + if six.PY2: + expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')] + + data = { + unicode('/mnt/지연', 'utf-8'): { + 'bind': unicode('/unicode/박', 'utf-8'), + 'mode': 'rw' + } + } + self.assertEqual( + convert_volume_binds(data), expected + ) + else: + expected = ['/mnt/지연:/unicode/박:rw'] + + data = { + '/mnt/지연': { + 'bind': '/unicode/박', + 'mode': 'rw' + } + } + self.assertEqual( + convert_volume_binds(data), expected + ) + def test_parse_repository_tag(self): self.assertEqual(parse_repository_tag("root"), ("root", None)) From 53589e5f0a4ddfc3cec8f48b5f3d807e2deb0ace Mon Sep 17 00:00:00 2001 From: Aanand Prasad Date: Wed, 23 Sep 2015 17:00:37 +0200 Subject: [PATCH 02/11] Implement methods for managing networks Signed-off-by: Aanand Prasad --- docker/api/__init__.py | 1 + docker/api/network.py | 49 ++++++++++++++ docker/client.py | 3 +- tests/integration_test.py | 104 +++++++++++++++++++++++++++++ tests/test.py | 134 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 290 insertions(+), 1 deletion(-) create mode 100644 docker/api/network.py diff --git a/docker/api/__init__.py b/docker/api/__init__.py index 79796349..9e744289 100644 --- a/docker/api/__init__.py +++ b/docker/api/__init__.py @@ -5,3 +5,4 @@ from .daemon import DaemonApiMixin from .exec_api import ExecApiMixin from .image import ImageApiMixin from .volume import VolumeApiMixin +from .network import NetworkApiMixin diff --git a/docker/api/network.py b/docker/api/network.py new file mode 100644 index 00000000..f6ad4a7b --- /dev/null +++ b/docker/api/network.py @@ -0,0 +1,49 @@ +import json + +from ..utils import check_resource + + +class NetworkApiMixin(object): + def networks(self, names=None, ids=None): + filters = {} + if names: + filters['name'] = names + if ids: + filters['id'] = ids + + params = {'filters': json.dumps(filters)} + + url = self._url("/networks") + res = self._get(url, params=params) + return self._result(res, json=True) + + def create_network(self, name, driver=None): + data = { + 'name': name, + 'driver': driver, + } + url = self._url("/networks/create") + res = self._post_json(url, data=data) + return self._result(res, json=True) + + def remove_network(self, net_id): + url = self._url("/networks/{0}", net_id) + res = self._delete(url) + self._raise_for_status(res) + + def inspect_network(self, net_id): + url = self._url("/networks/{0}", net_id) + res = self._get(url) + return self._result(res, json=True) + + @check_resource + def connect_container_to_network(self, container, net_id): + data = {"container": container} + url = self._url("/networks/{0}/connect", net_id) + self._post_json(url, data=data) + + @check_resource + def disconnect_container_from_network(self, container, net_id): + data = {"container": container} + url = self._url("/networks/{0}/disconnect", net_id) + self._post_json(url, data=data) diff --git a/docker/client.py b/docker/client.py index 9decd610..79efc9f7 100644 --- a/docker/client.py +++ b/docker/client.py @@ -39,7 +39,8 @@ class Client( api.DaemonApiMixin, api.ExecApiMixin, api.ImageApiMixin, - api.VolumeApiMixin): + api.VolumeApiMixin, + api.NetworkApiMixin): def __init__(self, base_url=None, version=None, timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False): super(Client, self).__init__() diff --git a/tests/integration_test.py b/tests/integration_test.py index a715ef6e..f4e20894 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -21,6 +21,7 @@ import random import shutil import signal import socket +import sys import tarfile import tempfile import threading @@ -95,6 +96,7 @@ class BaseTestCase(unittest.TestCase): self.tmp_containers = [] self.tmp_folders = [] self.tmp_volumes = [] + self.tmp_networks = [] def tearDown(self): for img in self.tmp_imgs: @@ -108,6 +110,11 @@ class BaseTestCase(unittest.TestCase): self.client.remove_container(container) except docker.errors.APIError: pass + for network in self.tmp_networks: + try: + self.client.remove_network(network) + except docker.errors.APIError: + pass for folder in self.tmp_folders: shutil.rmtree(folder) @@ -1590,6 +1597,103 @@ class TestBuildWithDockerignore(Cleanup, BaseTestCase): ['not-ignored'], ) + +####################### +# NETWORK TESTS # +####################### + + +@requires_api_version('1.21') +class TestNetworks(BaseTestCase): + def create_network(self, *args, **kwargs): + net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14] + net_id = self.client.create_network(net_name, *args, **kwargs)['id'] + self.tmp_networks.append(net_id) + return (net_name, net_id) + + def test_list_networks(self): + networks = self.client.networks() + initial_size = len(networks) + + net_name, net_id = self.create_network() + + networks = self.client.networks() + self.assertEqual(len(networks), initial_size + 1) + self.assertTrue(net_id in [n['id'] for n in networks]) + + networks_by_name = self.client.networks(names=[net_name]) + self.assertEqual([n['id'] for n in networks_by_name], [net_id]) + + networks_by_partial_id = self.client.networks(ids=[net_id[:8]]) + self.assertEqual([n['id'] for n in networks_by_partial_id], [net_id]) + + def test_inspect_network(self): + net_name, net_id = self.create_network() + + net = self.client.inspect_network(net_id) + self.assertEqual(net, { + u'name': net_name, + u'id': net_id, + u'driver': 'bridge', + u'containers': {}, + }) + + def test_create_network_with_host_driver_fails(self): + net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14] + + with pytest.raises(APIError): + self.client.create_network(net_name, driver='host') + + def test_remove_network(self): + initial_size = len(self.client.networks()) + + net_name, net_id = self.create_network() + self.assertEqual(len(self.client.networks()), initial_size + 1) + + self.client.remove_network(net_id) + self.assertEqual(len(self.client.networks()), initial_size) + + def test_connect_and_disconnect_container(self): + net_name, net_id = self.create_network() + + container = self.client.create_container('busybox', 'top') + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + self.assertFalse(network_data.get('containers')) + + self.client.connect_container_to_network(container, net_id) + network_data = self.client.inspect_network(net_id) + self.assertEqual( + list(network_data['containers'].keys()), + [container['Id']]) + + self.client.disconnect_container_from_network(container, net_id) + network_data = self.client.inspect_network(net_id) + self.assertFalse(network_data.get('containers')) + + def test_connect_on_container_create(self): + net_name, net_id = self.create_network() + + container = self.client.create_container( + image='busybox', + command='top', + host_config=self.client.create_host_config(network_mode=net_name), + ) + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + self.assertEqual( + list(network_data['containers'].keys()), + [container['Id']]) + + self.client.disconnect_container_from_network(container, net_id) + network_data = self.client.inspect_network(net_id) + self.assertFalse(network_data.get('containers')) + + ####################### # PY SPECIFIC TESTS # ####################### diff --git a/tests/test.py b/tests/test.py index 719ac9ef..42c925f2 100644 --- a/tests/test.py +++ b/tests/test.py @@ -369,6 +369,41 @@ class DockerClientTest(Cleanup, base.BaseTestCase): timeout=DEFAULT_TIMEOUT_SECONDS ) + def test_list_networks(self): + networks = [ + { + "name": "none", + "id": "8e4e55c6863ef424", + "type": "null", + "endpoints": [] + }, + { + "name": "host", + "id": "062b6d9ea7913fde", + "type": "host", + "endpoints": [] + }, + ] + + get = mock.Mock(return_value=response( + status_code=200, content=json.dumps(networks).encode('utf-8'))) + + with mock.patch('docker.Client.get', get): + self.assertEqual(self.client.networks(), networks) + + self.assertEqual(get.call_args[0][0], url_prefix + 'networks') + + filters = json.loads(get.call_args[1]['params']['filters']) + self.assertFalse(filters) + + self.client.networks(names=['foo']) + filters = json.loads(get.call_args[1]['params']['filters']) + self.assertEqual(filters, {'name': ['foo']}) + + self.client.networks(ids=['123']) + filters = json.loads(get.call_args[1]['params']['filters']) + self.assertEqual(filters, {'id': ['123']}) + ##################### # CONTAINER TESTS # ##################### @@ -2229,6 +2264,105 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(args[0][0], 'DELETE') self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name)) + ##################### + # NETWORK TESTS # + ##################### + + def test_create_network(self): + network_data = { + "id": 'abc12345', + "warning": "", + } + + network_response = response(status_code=200, content=network_data) + post = mock.Mock(return_value=network_response) + + with mock.patch('docker.Client.post', post): + result = self.client.create_network('foo') + self.assertEqual(result, network_data) + + self.assertEqual( + post.call_args[0][0], + url_prefix + 'networks/create') + + self.assertEqual( + json.loads(post.call_args[1]['data']), + {"name": "foo"}) + + self.client.create_network('foo', 'bridge') + + self.assertEqual( + json.loads(post.call_args[1]['data']), + {"name": "foo", "driver": "bridge"}) + + def test_remove_network(self): + network_id = 'abc12345' + delete = mock.Mock(return_value=response(status_code=200)) + + with mock.patch('docker.Client.delete', delete): + self.client.remove_network(network_id) + + args = delete.call_args + self.assertEqual(args[0][0], + url_prefix + 'networks/{0}'.format(network_id)) + + def test_inspect_network(self): + network_id = 'abc12345' + network_name = 'foo' + network_data = { + six.u('name'): network_name, + six.u('id'): network_id, + six.u('driver'): 'bridge', + six.u('containers'): {}, + } + + network_response = response(status_code=200, content=network_data) + get = mock.Mock(return_value=network_response) + + with mock.patch('docker.Client.get', get): + result = self.client.inspect_network(network_id) + self.assertEqual(result, network_data) + + args = get.call_args + self.assertEqual(args[0][0], + url_prefix + 'networks/{0}'.format(network_id)) + + def test_connect_container_to_network(self): + network_id = 'abc12345' + container_id = 'def45678' + + post = mock.Mock(return_value=response(status_code=201)) + + with mock.patch('docker.Client.post', post): + self.client.connect_container_to_network( + {'Id': container_id}, network_id) + + self.assertEqual( + post.call_args[0][0], + url_prefix + 'networks/{0}/connect'.format(network_id)) + + self.assertEqual( + json.loads(post.call_args[1]['data']), + {'container': container_id}) + + def test_disconnect_container_from_network(self): + network_id = 'abc12345' + container_id = 'def45678' + + post = mock.Mock(return_value=response(status_code=201)) + + with mock.patch('docker.Client.post', post): + self.client.disconnect_container_from_network( + {'Id': container_id}, network_id) + + self.assertEqual( + post.call_args[0][0], + url_prefix + 'networks/{0}/disconnect'.format(network_id)) + + self.assertEqual( + json.loads(post.call_args[1]['data']), + {'container': container_id}) + ####################### # PY SPECIFIC TESTS # ####################### From d141976303de48d511f5e5da95edfb8e536db84f Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 08:02:03 -0700 Subject: [PATCH 03/11] Add minimum_version decorators on NetworkApi methods Signed-off-by: Joffrey F --- docker/api/network.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docker/api/network.py b/docker/api/network.py index f6ad4a7b..2dea6795 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -1,9 +1,10 @@ import json -from ..utils import check_resource +from ..utils import check_resource, minimum_version class NetworkApiMixin(object): + @minimum_version('1.21') def networks(self, names=None, ids=None): filters = {} if names: @@ -17,6 +18,7 @@ class NetworkApiMixin(object): res = self._get(url, params=params) return self._result(res, json=True) + @minimum_version('1.21') def create_network(self, name, driver=None): data = { 'name': name, @@ -26,23 +28,27 @@ class NetworkApiMixin(object): res = self._post_json(url, data=data) return self._result(res, json=True) + @minimum_version('1.21') def remove_network(self, net_id): url = self._url("/networks/{0}", net_id) res = self._delete(url) self._raise_for_status(res) + @minimum_version('1.21') def inspect_network(self, net_id): url = self._url("/networks/{0}", net_id) res = self._get(url) return self._result(res, json=True) @check_resource + @minimum_version('1.21') def connect_container_to_network(self, container, net_id): data = {"container": container} url = self._url("/networks/{0}/connect", net_id) self._post_json(url, data=data) @check_resource + @minimum_version('1.21') def disconnect_container_from_network(self, container, net_id): data = {"container": container} url = self._url("/networks/{0}/disconnect", net_id) From 55a0783f4bb1cba4e6063cee9d34714c9a940556 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 12:07:08 -0700 Subject: [PATCH 04/11] Fix tests Skips were masking some errors / compatibility issues Signed-off-by: Joffrey F --- tests/integration_test.py | 5 ++--- tests/test.py | 17 +++++++++++------ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/tests/integration_test.py b/tests/integration_test.py index f4e20894..da380c1e 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -21,7 +21,6 @@ import random import shutil import signal import socket -import sys import tarfile import tempfile import threading @@ -1606,7 +1605,7 @@ class TestBuildWithDockerignore(Cleanup, BaseTestCase): @requires_api_version('1.21') class TestNetworks(BaseTestCase): def create_network(self, *args, **kwargs): - net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14] + net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14] net_id = self.client.create_network(net_name, *args, **kwargs)['id'] self.tmp_networks.append(net_id) return (net_name, net_id) @@ -1639,7 +1638,7 @@ class TestNetworks(BaseTestCase): }) def test_create_network_with_host_driver_fails(self): - net_name = 'dockerpy{}'.format(random.randrange(sys.maxint))[:14] + net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14] with pytest.raises(APIError): self.client.create_network(net_name, driver='host') diff --git a/tests/test.py b/tests/test.py index 42c925f2..dbb755f1 100644 --- a/tests/test.py +++ b/tests/test.py @@ -369,6 +369,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): timeout=DEFAULT_TIMEOUT_SECONDS ) + @base.requires_api_version('1.21') def test_list_networks(self): networks = [ { @@ -2208,9 +2209,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(args[0][0], 'POST') self.assertEqual(args[0][1], url_prefix + 'volumes') - self.assertEqual(args[1]['data'], { - 'Name': name, 'Driver': None, 'DriverOpts': None - }) + self.assertEqual(json.loads(args[1]['data']), {'Name': name}) @base.requires_api_version('1.21') def test_create_volume_with_driver(self): @@ -2221,8 +2220,9 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(args[0][0], 'POST') self.assertEqual(args[0][1], url_prefix + 'volumes') - self.assertIn('Driver', args[1]['data']) - self.assertEqual(args[1]['data']['Driver'], driver_name) + data = json.loads(args[1]['data']) + self.assertIn('Driver', data) + self.assertEqual(data['Driver'], driver_name) @base.requires_api_version('1.21') def test_create_volume_invalid_opts_type(self): @@ -2258,7 +2258,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): def test_remove_volume(self): name = 'perfectcherryblossom' result = self.client.remove_volume(name) - self.assertIsNone(result) + self.assertTrue(result) args = fake_request.call_args self.assertEqual(args[0][0], 'DELETE') @@ -2268,6 +2268,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): # NETWORK TESTS # ##################### + @base.requires_api_version('1.21') def test_create_network(self): network_data = { "id": 'abc12345', @@ -2295,6 +2296,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): json.loads(post.call_args[1]['data']), {"name": "foo", "driver": "bridge"}) + @base.requires_api_version('1.21') def test_remove_network(self): network_id = 'abc12345' delete = mock.Mock(return_value=response(status_code=200)) @@ -2306,6 +2308,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(args[0][0], url_prefix + 'networks/{0}'.format(network_id)) + @base.requires_api_version('1.21') def test_inspect_network(self): network_id = 'abc12345' network_name = 'foo' @@ -2327,6 +2330,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(args[0][0], url_prefix + 'networks/{0}'.format(network_id)) + @base.requires_api_version('1.21') def test_connect_container_to_network(self): network_id = 'abc12345' container_id = 'def45678' @@ -2345,6 +2349,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): json.loads(post.call_args[1]['data']), {'container': container_id}) + @base.requires_api_version('1.21') def test_disconnect_container_from_network(self): network_id = 'abc12345' container_id = 'def45678' From 6a66a2248d997e2effa98b4cc82546bdd31048c7 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 12:22:22 -0700 Subject: [PATCH 05/11] Docs: Remove float from valid cpu_shares types Fixes #775 Signed-off-by: Joffrey F --- docs/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api.md b/docs/api.md index 690fe495..103b235a 100644 --- a/docs/api.md +++ b/docs/api.md @@ -214,7 +214,7 @@ from. Optionally a single string joining container id's with commas * network_disabled (bool): Disable networking * name (str): A name for the container * entrypoint (str or list): An entrypoint -* cpu_shares (int or float): CPU shares (relative weight) +* cpu_shares (int): CPU shares (relative weight) * working_dir (str): Path to the working directory * domainname (str or list): Set custom DNS search domains * memswap_limit (int): From 147df4d21feed69ff19a7da99577ba8e7b110b3b Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Wed, 16 Sep 2015 14:09:50 -0700 Subject: [PATCH 06/11] put/get archive implementation Signed-off-by: Joffrey F --- docker/api/container.py | 30 ++++++++++++++ docker/client.py | 3 ++ docker/utils/__init__.py | 2 +- docker/utils/utils.py | 8 ++++ docs/api.md | 37 +++++++++++++++++ tests/helpers.py | 21 ++++++++++ tests/integration_test.py | 85 +++++++++++++++++++++++++++++++++++++++ tests/utils_test.py | 14 ++++++- 8 files changed, 198 insertions(+), 2 deletions(-) diff --git a/docker/api/container.py b/docker/api/container.py index 94889e97..142bd0f6 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -75,6 +75,12 @@ class ContainerApiMixin(object): @utils.check_resource def copy(self, container, resource): + if utils.version_gte(self._version, '1.20'): + warnings.warn( + 'Client.copy() is deprecated for API version >= 1.20, ' + 'please use get_archive() instead', + DeprecationWarning + ) res = self._post_json( self._url("/containers/{0}/copy".format(container)), data={"Resource": resource}, @@ -145,6 +151,21 @@ class ContainerApiMixin(object): self._raise_for_status(res) return res.raw + @utils.check_resource + @utils.minimum_version('1.20') + def get_archive(self, container, path): + params = { + 'path': path + } + url = self._url('/containers/{0}/archive', container) + res = self._get(url, params=params, stream=True) + self._raise_for_status(res) + encoded_stat = res.headers.get('x-docker-container-path-stat') + return ( + res.raw, + utils.decode_json_header(encoded_stat) if encoded_stat else None + ) + @utils.check_resource def inspect_container(self, container): return self._result( @@ -214,6 +235,15 @@ class ContainerApiMixin(object): return h_ports + @utils.check_resource + @utils.minimum_version('1.20') + def put_archive(self, container, path, data): + params = {'path': path} + url = self._url('/containers/{0}/archive', container) + res = self._put(url, params=params, data=data) + self._raise_for_status(res) + return res.status_code == 200 + @utils.check_resource def remove_container(self, container, v=False, link=False, force=False): params = {'v': v, 'link': link, 'force': force} diff --git a/docker/client.py b/docker/client.py index 79efc9f7..d2194726 100644 --- a/docker/client.py +++ b/docker/client.py @@ -109,6 +109,9 @@ class Client( def _get(self, url, **kwargs): return self.get(url, **self._set_request_timeout(kwargs)) + def _put(self, url, **kwargs): + return self.put(url, **self._set_request_timeout(kwargs)) + def _delete(self, url, **kwargs): return self.delete(url, **self._set_request_timeout(kwargs)) diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index fd0ef5c0..92e03e9b 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -3,7 +3,7 @@ from .utils import ( mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host, kwargs_from_env, convert_filters, create_host_config, create_container_config, parse_bytes, ping_registry, parse_env_file, - version_lt, version_gte + version_lt, version_gte, decode_json_header ) # flake8: noqa from .types import Ulimit, LogConfig # flake8: noqa diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 1fce1377..89837b78 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 import io import os import os.path @@ -66,6 +67,13 @@ def mkbuildcontext(dockerfile): return f +def decode_json_header(header): + data = base64.b64decode(header) + if six.PY3: + data = data.decode('utf-8') + return json.loads(data) + + def tar(path, exclude=None, dockerfile=None): f = tempfile.NamedTemporaryFile() t = tarfile.open(mode='w', fileobj=f) diff --git a/docs/api.md b/docs/api.md index 103b235a..6cd69fe6 100644 --- a/docs/api.md +++ b/docs/api.md @@ -165,6 +165,8 @@ non-running ones ## copy Identical to the `docker cp` command. Get files/folders from the container. +**Deprecated for API version >= 1.20** – Consider using +[`get_archive`](#get_archive) **instead.** **Params**: @@ -376,6 +378,27 @@ Export the contents of a filesystem as a tar archive to STDOUT. **Returns** (str): The filesystem tar archive as a str +## get_archive + +Retrieve a file or folder from a container in the form of a tar archive. + +**Params**: + +* container (str): The container where the file is located +* path (str): Path to the file or folder to retrieve + +**Returns** (tuple): First element is a raw tar data stream. Second element is +a dict containing `stat` information on the specified `path`. + +```python +>>> import docker +>>> c = docker.Client() +>>> ctnr = c.create_container('busybox', 'true') +>>> strm, stat = c.get_archive(ctnr, '/bin/sh') +>>> print(stat) +{u'linkTarget': u'', u'mode': 493, u'mtime': u'2015-09-16T12:34:23-07:00', u'name': u'sh', u'size': 962860} +``` + ## get_image Get an image from the docker daemon. Similar to the `docker save` command. @@ -712,6 +735,20 @@ command. yourname/app/tags/latest}"}\\n'] ``` +## put_archive + +Insert a file or folder in an existing container using a tar archive as source. + +**Params**: + +* container (str): The container where the file(s) will be extracted +* path (str): Path inside the container where the file(s) will be extracted. + Must exist. +* data (bytes): tar data to be extracted + +**Returns** (bool): True if the call succeeds. `docker.errors.APIError` will +be raised if an error occurs. + ## remove_container Remove a container. Similar to the `docker rm` command. diff --git a/tests/helpers.py b/tests/helpers.py index 95692db7..392be3b8 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,5 +1,6 @@ import os import os.path +import tarfile import tempfile @@ -14,3 +15,23 @@ def make_tree(dirs, files): f.write("content") return base + + +def simple_tar(path): + f = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w', fileobj=f) + + abs_path = os.path.abspath(path) + t.add(abs_path, arcname=os.path.basename(path), recursive=False) + + t.close() + f.seek(0) + return f + + +def untar_file(tardata, filename): + with tarfile.open(mode='r', fileobj=tardata) as t: + f = t.extractfile(filename) + result = f.read() + f.close() + return result diff --git a/tests/integration_test.py b/tests/integration_test.py index da380c1e..8a927084 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -37,6 +37,7 @@ import docker from docker.errors import APIError, NotFound from docker.utils import kwargs_from_env +from . import helpers from .base import requires_api_version from .test import Cleanup @@ -427,6 +428,90 @@ class CreateContainerWithLogConfigTest(BaseTestCase): self.assertEqual(container_log_config['Config'], {}) +@requires_api_version('1.20') +class GetArchiveTest(BaseTestCase): + def test_get_file_archive_from_container(self): + data = 'The Maid and the Pocket Watch of Blood' + ctnr = self.client.create_container( + BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data), + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + self.client.wait(ctnr) + with tempfile.NamedTemporaryFile() as destination: + strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') + for d in strm: + destination.write(d) + destination.seek(0) + retrieved_data = helpers.untar_file(destination, 'data.txt') + if six.PY3: + retrieved_data = retrieved_data.decode('utf-8') + self.assertEqual(data, retrieved_data.strip()) + + def test_get_file_stat_from_container(self): + data = 'The Maid and the Pocket Watch of Blood' + ctnr = self.client.create_container( + BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data), + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + self.client.wait(ctnr) + strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') + self.assertIn('name', stat) + self.assertEqual(stat['name'], 'data.txt') + self.assertIn('size', stat) + self.assertEqual(stat['size'], len(data)) + + +@requires_api_version('1.20') +class PutArchiveTest(BaseTestCase): + def test_copy_file_to_container(self): + data = b'Deaf To All But The Song' + with tempfile.NamedTemporaryFile() as test_file: + test_file.write(data) + test_file.seek(0) + ctnr = self.client.create_container( + BUSYBOX, + 'cat {0}'.format( + os.path.join('/vol1', os.path.basename(test_file.name)) + ), + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + with helpers.simple_tar(test_file.name) as test_tar: + self.client.put_archive(ctnr, '/vol1', test_tar) + self.client.start(ctnr) + self.client.wait(ctnr) + logs = self.client.logs(ctnr) + if six.PY3: + logs = logs.decode('utf-8') + data = data.decode('utf-8') + self.assertEqual(logs.strip(), data) + + def test_copy_directory_to_container(self): + files = ['a.py', 'b.py', 'foo/b.py'] + dirs = ['foo', 'bar'] + base = helpers.make_tree(dirs, files) + ctnr = self.client.create_container( + BUSYBOX, 'ls -p /vol1', volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + with docker.utils.tar(base) as test_tar: + self.client.put_archive(ctnr, '/vol1', test_tar) + self.client.start(ctnr) + self.client.wait(ctnr) + logs = self.client.logs(ctnr) + if six.PY3: + logs = logs.decode('utf-8') + results = logs.strip().split() + self.assertIn('a.py', results) + self.assertIn('b.py', results) + self.assertIn('foo/', results) + self.assertIn('bar/', results) + + class TestCreateContainerReadOnlyFs(BaseTestCase): def runTest(self): if not exec_driver_is_native(): diff --git a/tests/utils_test.py b/tests/utils_test.py index 8ac1dcb9..b1adde26 100644 --- a/tests/utils_test.py +++ b/tests/utils_test.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- +import base64 +import json import os import os.path import shutil @@ -14,7 +16,7 @@ from docker.errors import DockerException from docker.utils import ( parse_repository_tag, parse_host, convert_filters, kwargs_from_env, create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file, - exclude_paths, convert_volume_binds, + exclude_paths, convert_volume_binds, decode_json_header ) from docker.utils.ports import build_port_bindings, split_port from docker.auth import resolve_repository_name, resolve_authconfig @@ -370,6 +372,16 @@ class UtilsTest(base.BaseTestCase): for filters, expected in tests: self.assertEqual(convert_filters(filters), expected) + def test_decode_json_header(self): + obj = {'a': 'b', 'c': 1} + data = None + if six.PY3: + data = base64.b64encode(bytes(json.dumps(obj), 'utf-8')) + else: + data = base64.b64encode(json.dumps(obj)) + decoded_data = decode_json_header(data) + self.assertEqual(obj, decoded_data) + def test_resolve_repository_name(self): # docker hub library image self.assertEqual( From a3a345e2f6ce201e588dff666c4931e0723ff059 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Fri, 9 Oct 2015 12:01:16 -0700 Subject: [PATCH 07/11] Use url-safe base64 when encoding auth header + regression test Signed-off-by: Joffrey F --- docker/auth/auth.py | 2 +- tests/utils_test.py | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docker/auth/auth.py b/docker/auth/auth.py index 366bc67e..1ee9f812 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -102,7 +102,7 @@ def decode_auth(auth): def encode_header(auth): auth_json = json.dumps(auth).encode('ascii') - return base64.b64encode(auth_json) + return base64.urlsafe_b64encode(auth_json) def parse_auth(entries): diff --git a/tests/utils_test.py b/tests/utils_test.py index b1adde26..04183f9f 100644 --- a/tests/utils_test.py +++ b/tests/utils_test.py @@ -19,7 +19,9 @@ from docker.utils import ( exclude_paths, convert_volume_binds, decode_json_header ) from docker.utils.ports import build_port_bindings, split_port -from docker.auth import resolve_repository_name, resolve_authconfig +from docker.auth import ( + resolve_repository_name, resolve_authconfig, encode_header +) from . import base from .helpers import make_tree @@ -376,12 +378,21 @@ class UtilsTest(base.BaseTestCase): obj = {'a': 'b', 'c': 1} data = None if six.PY3: - data = base64.b64encode(bytes(json.dumps(obj), 'utf-8')) + data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8')) else: - data = base64.b64encode(json.dumps(obj)) + data = base64.urlsafe_b64encode(json.dumps(obj)) decoded_data = decode_json_header(data) self.assertEqual(obj, decoded_data) + def test_803_urlsafe_encode(self): + auth_data = { + 'username': 'root', + 'password': 'GR?XGR?XGR?XGR?X' + } + encoded = encode_header(auth_data) + assert b'/' not in encoded + assert b'_' in encoded + def test_resolve_repository_name(self): # docker hub library image self.assertEqual( From 9697923bca04bfbcbd5c122144b15016ecdd9f65 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 14:58:54 -0700 Subject: [PATCH 08/11] Update changelog and bump version to 1.5.0 Signed-off-by: Joffrey F --- docker/version.py | 2 +- docs/change_log.md | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/docker/version.py b/docker/version.py index 38ce7ffa..4ebafdde 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "1.5.0-dev" +version = "1.5.0" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) diff --git a/docs/change_log.md b/docs/change_log.md index da3a9b0c..f2b272ae 100644 --- a/docs/change_log.md +++ b/docs/change_log.md @@ -1,6 +1,43 @@ Change Log ========== +1.5.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.5.0+is%3Aclosed) + +### Features + +* Added support for the networking API introduced in Docker 1.9.0 + (`Client.networks`, `Client.create_network`, `Client.remove_network`, + `Client.inspect_network`, `Client.connect_container_to_network`, + `Client.disconnect_container_from_network`). +* Added support for the volumes API introduced in Docker 1.9.0 + (`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`, + `Client.remove_volume`). +* Added support for the `group_add` parameter in `create_host_config`. +* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameteres + in `create_host_config`. +* Added support for the archive API endpoint (`Client.get_archive`, + `Client.put_archive`). +* Added support for `ps_args` parameter in `Client.top`. + + +### Bugfixes + +* Fixed a bug where specifying volume binds with unicode characters would + fail. +* Fixed a bug where providing an explicit protocol in `Client.port` would fail + to yield the expected result. +* Fixed a bug where the priority protocol returned by `Client.port` would be UDP + instead of the expected TCP. + +### Miscellaneous + +* Broke up Client code into several files to facilitate maintenance and + contribution. +* Added contributing guidelines to the repository. + 1.4.0 ----- From 0932ac230ba5ca8ef13b77be89e2882b9386c24d Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 17:28:43 -0700 Subject: [PATCH 09/11] Add devices param to the hostconfig documentation Signed-off-by: Joffrey F --- docs/hostconfig.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/hostconfig.md b/docs/hostconfig.md index 39b7a237..9bd42c9f 100644 --- a/docs/hostconfig.md +++ b/docs/hostconfig.md @@ -101,6 +101,14 @@ for example: allowed to consume. * group_add (list): List of additional group names and/or IDs that the container process will run as. +* devices (list): A list of devices to add to the container specified as dicts + in the form: + ``` + { "PathOnHost": "/dev/deviceName", + "PathInContainer": "/dev/deviceName", + "CgroupPermissions": "mrw" + } + ``` **Returns** (dict) HostConfig dictionary From b1f25317deae569d332cf5faaccda8710932e6ef Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 8 Oct 2015 17:17:45 -0700 Subject: [PATCH 10/11] Use DOCKER_CONFIG environment variable to look up auth config When provided, default paths are ignored. Signed-off-by: Joffrey F --- docker/auth/auth.py | 106 ++++++++++++++++++++++---------------------- tests/test.py | 33 ++++++++++++-- 2 files changed, 83 insertions(+), 56 deletions(-) diff --git a/docker/auth/auth.py b/docker/auth/auth.py index 1ee9f812..b02c3ed3 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -13,7 +13,6 @@ # limitations under the License. import base64 -import fileinput import json import logging import os @@ -132,78 +131,79 @@ def parse_auth(entries): return conf +def find_config_file(config_path=None): + environment_path = os.path.join( + os.environ.get('DOCKER_CONFIG'), + os.path.basename(DOCKER_CONFIG_FILENAME) + ) if os.environ.get('DOCKER_CONFIG') else None + + paths = [ + config_path, # 1 + environment_path, # 2 + os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3 + os.path.join( + os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME + ) # 4 + ] + + for path in paths: + if path and os.path.exists(path): + return path + return None + + def load_config(config_path=None): """ Loads authentication data from a Docker configuration file in the given root directory or if config_path is passed use given path. + Lookup priority: + explicit config_path parameter > DOCKER_CONFIG environment variable > + ~/.docker/config.json > ~/.dockercfg """ - conf = {} - data = None - # Prefer ~/.docker/config.json. - config_file = config_path or os.path.join(os.path.expanduser('~'), - DOCKER_CONFIG_FILENAME) + config_file = find_config_file(config_path) - log.debug("Trying {0}".format(config_file)) - - if os.path.exists(config_file): - try: - with open(config_file) as f: - for section, data in six.iteritems(json.load(f)): - if section != 'auths': - continue - log.debug("Found 'auths' section") - return parse_auth(data) - log.debug("Couldn't find 'auths' section") - except (IOError, KeyError, ValueError) as e: - # Likely missing new Docker config file or it's in an - # unknown format, continue to attempt to read old location - # and format. - log.debug(e) - pass - else: + if not config_file: log.debug("File doesn't exist") - - config_file = config_path or os.path.join(os.path.expanduser('~'), - LEGACY_DOCKER_CONFIG_FILENAME) - - log.debug("Trying {0}".format(config_file)) - - if not os.path.exists(config_file): - log.debug("File doesn't exist - returning empty config") return {} - log.debug("Attempting to parse as JSON") try: with open(config_file) as f: - return parse_auth(json.load(f)) - except Exception as e: + data = json.load(f) + if data.get('auths'): + log.debug("Found 'auths' section") + return parse_auth(data) + else: + log.debug("Couldn't find 'auths' section") + f.seek(0) + return parse_auth(json.load(f)) + except (IOError, KeyError, ValueError) as e: + # Likely missing new Docker config file or it's in an + # unknown format, continue to attempt to read old location + # and format. log.debug(e) - pass - # If that fails, we assume the configuration file contains a single - # authentication token for the public registry in the following format: - # - # auth = AUTH_TOKEN - # email = email@domain.com log.debug("Attempting to parse legacy auth file format") try: data = [] - for line in fileinput.input(config_file): - data.append(line.strip().split(' = ')[1]) - if len(data) < 2: - # Not enough data - raise errors.InvalidConfigFile( - 'Invalid or empty configuration file!') + with open(config_file) as f: + for line in f.readlines(): + data.append(line.strip().split(' = ')[1]) + if len(data) < 2: + # Not enough data + raise errors.InvalidConfigFile( + 'Invalid or empty configuration file!' + ) username, password = decode_auth(data[0]) - conf[INDEX_NAME] = { - 'username': username, - 'password': password, - 'email': data[1], - 'serveraddress': INDEX_URL, + return { + INDEX_NAME: { + 'username': username, + 'password': password, + 'email': data[1], + 'serveraddress': INDEX_URL, + } } - return conf except Exception as e: log.debug(e) pass diff --git a/tests/test.py b/tests/test.py index dbb755f1..20ec88cf 100644 --- a/tests/test.py +++ b/tests/test.py @@ -2387,7 +2387,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase): f.write('auth = {0}\n'.format(auth_)) f.write('email = sakuya@scarlet.net') cfg = docker.auth.load_config(dockercfg_path) - self.assertTrue(docker.auth.INDEX_NAME in cfg) + assert docker.auth.INDEX_NAME in cfg self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None) cfg = cfg[docker.auth.INDEX_NAME] self.assertEqual(cfg['username'], 'sakuya') @@ -2412,10 +2412,10 @@ class DockerClientTest(Cleanup, base.BaseTestCase): } with open(dockercfg_path, 'w') as f: - f.write(json.dumps(config)) + json.dump(config, f) cfg = docker.auth.load_config(dockercfg_path) - self.assertTrue(registry in cfg) + assert registry in cfg self.assertNotEqual(cfg[registry], None) cfg = cfg[registry] self.assertEqual(cfg['username'], 'sakuya') @@ -2423,6 +2423,33 @@ class DockerClientTest(Cleanup, base.BaseTestCase): self.assertEqual(cfg['email'], 'sakuya@scarlet.net') self.assertEqual(cfg.get('auth'), None) + def test_load_config_custom_config_env(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join(folder, 'config.json') + registry = 'https://your.private.registry.io' + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + config = { + registry: { + 'auth': '{0}'.format(auth_), + 'email': 'sakuya@scarlet.net' + } + } + + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): + cfg = docker.auth.load_config(None) + assert registry in cfg + self.assertNotEqual(cfg[registry], None) + cfg = cfg[registry] + self.assertEqual(cfg['username'], 'sakuya') + self.assertEqual(cfg['password'], 'izayoi') + self.assertEqual(cfg['email'], 'sakuya@scarlet.net') + self.assertEqual(cfg.get('auth'), None) + def test_tar_with_excludes(self): dirs = [ 'foo', From 7f3692ceeda92ca3690394822cbd3c99378c0d7e Mon Sep 17 00:00:00 2001 From: Stephen Moore Date: Sun, 20 Sep 2015 15:48:07 +1000 Subject: [PATCH 11/11] Fix attach method over SSL connections Signed-off-by: Stephen Moore --- Makefile | 20 +++++++--- docker/client.py | 20 ++++++++-- pytest.ini | 2 +- tests/Dockerfile-dind-certs | 20 ++++++++++ tests/integration_test.py | 73 ++++++++++++++++++++++++++++++++++++- 5 files changed, 123 insertions(+), 12 deletions(-) create mode 100644 tests/Dockerfile-dind-certs diff --git a/Makefile b/Makefile index 30421e50..c051bac1 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,10 @@ build: build-py3: docker build -t docker-py3 -f Dockerfile-py3 . -test: flake8 unit-test unit-test-py3 integration-dind +build-dind-certs: + docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs . + +test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl unit-test: build docker run docker-py py.test tests/test.py tests/utils_test.py @@ -26,10 +29,17 @@ integration-test-py3: build-py3 docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration_test.py integration-dind: build build-py3 - docker run -d --name dpy-dind --privileged dockerswarm/dind:1.8.1 docker -d -H tcp://0.0.0.0:2375 - docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py py.test tests/integration_test.py - docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3 py.test tests/integration_test.py + docker run -d --name dpy-dind --env="DOCKER_HOST=tcp://localhost:2375" --privileged dockerswarm/dind:1.8.1 docker -d -H tcp://0.0.0.0:2375 + docker run --volumes-from dpy-dind --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py py.test tests/integration_test.py + docker run --volumes-from dpy-dind --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3 py.test tests/integration_test.py docker rm -vf dpy-dind +integration-dind-ssl: build-dind-certs build build-py3 + docker run -d --name dpy-dind-certs dpy-dind-certs + docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl -v /tmp --privileged dockerswarm/dind:1.8.1 docker daemon --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 + docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py py.test tests/integration_test.py + docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration_test.py + docker rm -vf dpy-dind-ssl dpy-dind-certs + flake8: build - docker run docker-py flake8 docker tests \ No newline at end of file + docker run docker-py flake8 docker tests diff --git a/docker/client.py b/docker/client.py index d2194726..d339f3b3 100644 --- a/docker/client.py +++ b/docker/client.py @@ -188,6 +188,8 @@ class Client( self._raise_for_status(response) if six.PY3: sock = response.raw._fp.fp.raw + if self.base_url.startswith("https://"): + sock = sock._sock else: sock = response.raw._fp.fp._sock try: @@ -244,10 +246,7 @@ class Client( # Disable timeout on the underlying socket to prevent # Read timed out(s) for long running processes socket = self._get_raw_response_socket(response) - if six.PY3: - socket._sock.settimeout(None) - else: - socket.settimeout(None) + self._disable_socket_timeout(socket) while True: header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES) @@ -276,6 +275,19 @@ class Client( for out in response.iter_content(chunk_size=1, decode_unicode=True): yield out + def _disable_socket_timeout(self, socket): + """ Depending on the combination of python version and whether we're + connecting over http or https, we might need to access _sock, which + may or may not exist; or we may need to just settimeout on socket + itself, which also may or may not have settimeout on it. + + To avoid missing the correct one, we try both. + """ + if hasattr(socket, "settimeout"): + socket.settimeout(None) + if hasattr(socket, "_sock") and hasattr(socket._sock, "settimeout"): + socket._sock.settimeout(None) + def _get_result(self, container, stream, res): cont = self.inspect_container(container) return self._get_result_tty(stream, res, cont['Config']['Tty']) diff --git a/pytest.ini b/pytest.ini index 21b47a6a..f9c7990e 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,2 @@ [pytest] -addopts = --tb=short -rxs +addopts = --tb=short -rxs -s diff --git a/tests/Dockerfile-dind-certs b/tests/Dockerfile-dind-certs new file mode 100644 index 00000000..9e8c042b --- /dev/null +++ b/tests/Dockerfile-dind-certs @@ -0,0 +1,20 @@ +FROM python:2.7 +RUN mkdir /tmp/certs +VOLUME /certs + +WORKDIR /tmp/certs +RUN openssl genrsa -aes256 -passout pass:foobar -out ca-key.pem 4096 +RUN echo "[req]\nprompt=no\ndistinguished_name = req_distinguished_name\n[req_distinguished_name]\ncountryName=AU" > /tmp/config +RUN openssl req -new -x509 -passin pass:foobar -config /tmp/config -days 365 -key ca-key.pem -sha256 -out ca.pem +RUN openssl genrsa -out server-key.pem -passout pass:foobar 4096 +RUN openssl req -subj "/CN=docker" -sha256 -new -key server-key.pem -out server.csr +RUN echo subjectAltName = DNS:docker,DNS:localhost > extfile.cnf +RUN openssl x509 -req -days 365 -passin pass:foobar -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile extfile.cnf +RUN openssl genrsa -out key.pem 4096 +RUN openssl req -passin pass:foobar -subj '/CN=client' -new -key key.pem -out client.csr +RUN echo extendedKeyUsage = clientAuth > extfile.cnf +RUN openssl x509 -req -passin pass:foobar -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile extfile.cnf +RUN chmod -v 0400 ca-key.pem key.pem server-key.pem +RUN chmod -v 0444 ca.pem server-cert.pem cert.pem + +CMD cp -R /tmp/certs/* /certs && while true; do sleep 1; done diff --git a/tests/integration_test.py b/tests/integration_test.py index 8a927084..c264a405 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -14,6 +14,7 @@ import base64 import contextlib +import errno import json import io import os @@ -21,6 +22,7 @@ import random import shutil import signal import socket +import struct import tarfile import tempfile import threading @@ -76,7 +78,17 @@ def setup_module(): try: c.inspect_image(BUSYBOX) except NotFound: - c.pull(BUSYBOX) + os.write(2, "\npulling busybox\n".encode('utf-8')) + for data in c.pull('busybox', stream=True): + data = json.loads(data.decode('utf-8')) + os.write(2, ("%c[2K\r" % 27).encode('utf-8')) + status = data.get("status") + progress = data.get("progress") + detail = "{0} - {1}".format(status, progress).encode('utf-8') + os.write(2, detail) + os.write(2, "\npulled busybox\n".encode('utf-8')) + + # Double make sure we now have busybox c.inspect_image(BUSYBOX) c.close() @@ -887,7 +899,6 @@ class TestContainerTop(BaseTestCase): self.client.start(container) res = self.client.top(container['Id']) - print(res) self.assertEqual( res['Titles'], ['UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD'] @@ -1213,6 +1224,64 @@ class TestRunContainerStreaming(BaseTestCase): self.assertTrue(sock.fileno() > -1) +class TestRunContainerReadingSocket(BaseTestCase): + def runTest(self): + line = 'hi there and stuff and things, words!' + command = "echo '{0}'".format(line) + container = self.client.create_container(BUSYBOX, command, + detach=True, tty=False) + ident = container['Id'] + self.tmp_containers.append(ident) + + opts = {"stdout": 1, "stream": 1, "logs": 1} + pty_stdout = self.client.attach_socket(ident, opts) + self.client.start(ident) + + recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) + + def read(n=4096): + """Code stolen from dockerpty to read the socket""" + try: + if hasattr(pty_stdout, 'recv'): + return pty_stdout.recv(n) + return os.read(pty_stdout.fileno(), n) + except EnvironmentError as e: + if e.errno not in recoverable_errors: + raise + + def next_packet_size(): + """Code stolen from dockerpty to get the next packet size""" + data = six.binary_type() + while len(data) < 8: + next_data = read(8 - len(data)) + if not next_data: + return 0 + data = data + next_data + + if data is None: + return 0 + + if len(data) == 8: + _, actual = struct.unpack('>BxxxL', data) + return actual + + next_size = next_packet_size() + self.assertEqual(next_size, len(line)+1) + + data = six.binary_type() + while len(data) < next_size: + next_data = read(next_size - len(data)) + if not next_data: + assert False, "Failed trying to read in the dataz" + data += next_data + self.assertEqual(data.decode('utf-8'), "{0}\n".format(line)) + pty_stdout.close() + + # Prevent segfault at the end of the test run + if hasattr(pty_stdout, "_response"): + del pty_stdout._response + + class TestPauseUnpauseContainer(BaseTestCase): def runTest(self): container = self.client.create_container(BUSYBOX, ['sleep', '9999'])