Merge pull request #2637 from docker/4.3.0-release

4.3.0 release
This commit is contained in:
Anca Iordache 2020-08-10 18:35:10 +02:00 committed by GitHub
commit 30089ec681
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 277 additions and 36 deletions

4
Jenkinsfile vendored
View File

@ -31,7 +31,7 @@ def buildImages = { ->
}
def getDockerVersions = { ->
def dockerVersions = ["19.03.5"]
def dockerVersions = ["19.03.12"]
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2") {
def result = sh(script: """docker run --rm \\
--entrypoint=python \\
@ -66,7 +66,7 @@ def runTests = { Map settings ->
throw new Exception("Need test image object, e.g.: `runTests(testImage: img)`")
}
if (!dockerVersion) {
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '1.12.3')`")
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '19.03.12')`")
}
if (!pythonVersion) {
throw new Exception("Need Python version being tested, e.g.: `runTests(pythonVersion: 'py2.7')`")

View File

@ -41,8 +41,8 @@ integration-test: build
integration-test-py3: build-py3
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file}
TEST_API_VERSION ?= 1.35
TEST_ENGINE_VERSION ?= 19.03.5
TEST_API_VERSION ?= 1.39
TEST_ENGINE_VERSION ?= 19.03.12
.PHONY: setup-network
setup-network:

View File

@ -480,6 +480,9 @@ class ContainerApiMixin(object):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
@ -636,6 +639,8 @@ class ContainerApiMixin(object):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
@ -694,7 +699,8 @@ class ContainerApiMixin(object):
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
encode_stream=False):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@ -705,6 +711,8 @@ class ContainerApiMixin(object):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@ -729,8 +737,13 @@ class ContainerApiMixin(object):
params = {
'path': path
}
headers = {
"Accept-Encoding": "gzip, deflate"
} if encode_stream else {
"Accept-Encoding": "identity"
}
url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True)
res = self._get(url, params=params, stream=True, headers=headers)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
@ -1120,7 +1133,7 @@ class ContainerApiMixin(object):
else:
if decode:
raise errors.InvalidArgument(
"decode is only available in conjuction with stream=True"
"decode is only available in conjunction with stream=True"
)
return self._result(self._get(url, params={'stream': False}),
json=True)

View File

@ -216,7 +216,7 @@ class NetworkApiMixin(object):
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
link_local_ips=None, driver_opt=None):
"""
Connect a container to a network.
@ -240,7 +240,8 @@ class NetworkApiMixin(object):
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
driver_opt=driver_opt
),
}

View File

@ -1,7 +1,7 @@
import sys
from .version import version
DEFAULT_DOCKER_API_VERSION = '1.35'
DEFAULT_DOCKER_API_VERSION = '1.39'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8

View File

@ -225,7 +225,8 @@ class Container(Model):
"""
return self.client.api.export(self.id, chunk_size)
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
encode_stream=False):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
@ -235,6 +236,8 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@ -255,7 +258,8 @@ class Container(Model):
... f.write(chunk)
>>> f.close()
"""
return self.client.api.get_archive(self.id, path, chunk_size)
return self.client.api.get_archive(self.id, path,
chunk_size, encode_stream)
def kill(self, signal=None):
"""
@ -579,6 +583,9 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
@ -998,6 +1005,7 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps',
'device_write_iops',
'devices',
'device_requests',
'dns_opt',
'dns_search',
'dns',

View File

@ -46,6 +46,8 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`

View File

@ -32,7 +32,7 @@ class TLSConfig(object):
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
# leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@ -62,7 +62,7 @@ class TLSConfig(object):
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
# "tls" and "tls_verify" must have both or neither cert/key files In
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
@ -71,7 +71,7 @@ class TLSConfig(object):
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_config must be a tuple of'
'client_cert must be a tuple of'
' (client certificate, key file)'
)
@ -79,7 +79,7 @@ class TLSConfig(object):
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
@ -88,7 +88,7 @@ class TLSConfig(object):
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `tls_ca_cert`.'
'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):

View File

@ -1,5 +1,7 @@
# flake8: noqa
from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
from .containers import (
ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig

View File

@ -154,6 +154,104 @@ class Ulimit(DictType):
self['Hard'] = value
class DeviceRequest(DictType):
"""
Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(self, **kwargs):
driver = kwargs.get('driver', kwargs.get('Driver'))
count = kwargs.get('count', kwargs.get('Count'))
device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
options = kwargs.get('options', kwargs.get('Options'))
if driver is None:
driver = ''
elif not isinstance(driver, six.string_types):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
elif not isinstance(count, int):
raise ValueError('DeviceRequest.count must be an integer')
if device_ids is None:
device_ids = []
elif not isinstance(device_ids, list):
raise ValueError('DeviceRequest.device_ids must be a list')
if capabilities is None:
capabilities = []
elif not isinstance(capabilities, list):
raise ValueError('DeviceRequest.capabilities must be a list')
if options is None:
options = {}
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
super(DeviceRequest, self).__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
'Capabilities': capabilities,
'Options': options
})
@property
def driver(self):
return self['Driver']
@driver.setter
def driver(self, value):
self['Driver'] = value
@property
def count(self):
return self['Count']
@count.setter
def count(self, value):
self['Count'] = value
@property
def device_ids(self):
return self['DeviceIDs']
@device_ids.setter
def device_ids(self, value):
self['DeviceIDs'] = value
@property
def capabilities(self):
return self['Capabilities']
@capabilities.setter
def capabilities(self, value):
self['Capabilities'] = value
@property
def options(self):
return self['Options']
@options.setter
def options(self, value):
self['Options'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
@ -176,7 +274,7 @@ class HostConfig(dict):
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None):
device_cgroup_rules=None, device_requests=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@ -536,6 +634,19 @@ class HostConfig(dict):
)
self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'

View File

@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
ipv6_address=None, link_local_ips=None):
ipv6_address=None, link_local_ips=None, driver_opt=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@ -33,6 +33,15 @@ class EndpointConfig(dict):
if ipam_config:
self['IPAMConfig'] = ipam_config
if driver_opt:
if version_lt(version, '1.32'):
raise errors.InvalidVersion(
'DriverOpts is not supported for API version < 1.32'
)
if not isinstance(driver_opt, dict):
raise TypeError('driver_opt must be a dictionary')
self['DriverOpts'] = driver_opt
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):

View File

@ -1,2 +1,2 @@
version = "4.2.2"
version = "4.3.0"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])

View File

@ -1,6 +1,21 @@
Change log
==========
4.3.0
-----
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/64?closed=1)
### Features
- Add `DeviceRequest` type to expose host resources such as GPUs
- Add support for `DriverOpts` in EndpointConfig
- Disable compression by default when using container.get_archive method
### Miscellaneous
- Update default API version to v1.39
- Update test engine version to 19.03.12
4.2.2
-----
@ -36,7 +51,6 @@ Change log
- Add support for docker contexts through `docker.ContextAPI`
4.1.0
-----
@ -130,7 +144,7 @@ Change log
### Bugfixes
* Fix base_url to keep TCP protocol on utils.py by letting the responsability of changing the
* Fix base_url to keep TCP protocol on utils.py by letting the responsibility of changing the
protocol to `parse_host` afterwards, letting `base_url` with the original value.
* XFAIL test_attach_stream_and_cancel on TLS
@ -1234,7 +1248,7 @@ like the others
(`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`,
`Client.remove_volume`).
* Added support for the `group_add` parameter in `create_host_config`.
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameteres
* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameters
in `create_host_config`.
* Added support for the archive API endpoint (`Client.get_archive`,
`Client.put_archive`).

View File

@ -11,8 +11,7 @@ paramiko==2.4.2
pycparser==2.17
pyOpenSSL==18.0.0
pyparsing==2.2.0
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
pywin32==227; sys_platform == 'win32'
requests==2.20.0
six==1.10.0
urllib3==1.24.3

View File

@ -24,10 +24,7 @@ extras_require = {
':python_version < "3.3"': 'ipaddress >= 1.0.16',
# win32 APIs if on Windows (required for npipe support)
# Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
# on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223',
':sys_platform == "win32"': 'pywin32==227',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.

View File

@ -4,7 +4,7 @@ FROM python:${PYTHON_VERSION}
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
RUN apt-get update && apt-get -y install \
gnupg2 \

View File

@ -279,7 +279,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
expected_msgs = [
"logger: no log driver named 'asdf' is registered",
"looking up logging plugin asdf: plugin \"asdf\" not found",
"error looking up logging plugin asdf: plugin \"asdf\" not found",
]
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500

View File

@ -275,6 +275,27 @@ class TestNetworks(BaseAPIIntegrationTest):
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
@requires_api_version('1.32')
def test_create_with_driveropt(self):
container = self.client.create_container(
TEST_IMG, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
driver_opt={'com.docker-py.setting': 'on'}
)
}
),
host_config=self.client.create_host_config(network_mode='bridge')
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
net_cfg = container_data['NetworkSettings']['Networks']['bridge']
assert 'DriverOpts' in net_cfg
assert 'com.docker-py.setting' in net_cfg['DriverOpts']
assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on'
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()

View File

@ -5,6 +5,7 @@ import json
import signal
import docker
from docker.api import APIClient
import pytest
import six
@ -12,7 +13,7 @@ from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
fake_inspect_container, url_base
)
try:
@ -767,6 +768,67 @@ class CreateContainerTest(BaseAPIClientTest):
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_device_requests(self):
client = APIClient(version='1.40')
fake_api.fake_responses.setdefault(
'{0}/v1.40/containers/create'.format(fake_api.prefix),
fake_api.post_fake_create_container,
)
client.create_container(
'busybox', 'true', host_config=client.create_host_config(
device_requests=[
{
'device_ids': [
'0',
'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
]
},
{
'driver': 'nvidia',
'Count': -1,
'capabilities': [
['gpu', 'utility']
],
'options': {
'key': 'value'
}
}
]
)
)
args = fake_request.call_args
assert args[0][1] == url_base + 'v1.40/' + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = client.create_host_config()
expected_payload['HostConfig']['DeviceRequests'] = [
{
'Driver': '',
'Count': 0,
'DeviceIDs': [
'0',
'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
],
'Capabilities': [],
'Options': {}
},
{
'Driver': 'nvidia',
'Count': -1,
'DeviceIDs': [],
'Capabilities': [
['gpu', 'utility']
],
'Options': {
'key': 'value'
}
}
]
assert json.loads(args[1]['data']) == expected_payload
assert args[1]['headers']['Content-Type'] == 'application/json'
assert set(args[1]['headers']) <= {'Content-Type', 'User-Agent'}
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),

View File

@ -136,7 +136,8 @@ class NetworkTest(BaseAPIClientTest):
container={'Id': container_id},
net_id=network_id,
aliases=['foo', 'bar'],
links=[('baz', 'quux')]
links=[('baz', 'quux')],
driver_opt={'com.docker-py.setting': 'yes'},
)
assert post.call_args[0][0] == (
@ -148,6 +149,7 @@ class NetworkTest(BaseAPIClientTest):
'EndpointConfig': {
'Aliases': ['foo', 'bar'],
'Links': ['baz:quux'],
'DriverOpts': {'com.docker-py.setting': 'yes'},
},
}

View File

@ -450,7 +450,7 @@ class ContainerTest(unittest.TestCase):
container = client.containers.get(FAKE_CONTAINER_ID)
container.get_archive('foo')
client.api.get_archive.assert_called_with(
FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE, False
)
def test_image(self):

View File

@ -335,7 +335,7 @@ class ExcludePathsTest(unittest.TestCase):
# Dockerignore reference stipulates that absolute paths are
# equivalent to relative paths, hence /../foo should be
# equivalent to ../foo. It also stipulates that paths are run
# through Go's filepath.Clean, which explicitely "replace
# through Go's filepath.Clean, which explicitly "replace
# "/.." by "/" at the beginning of a path".
assert exclude_paths(
base,