mirror of https://github.com/docker/docker-py.git
commit
e045331e32
|
|
@ -0,0 +1,9 @@
|
|||
FROM python:2.7
|
||||
|
||||
RUN mkdir /home/docker-py
|
||||
WORKDIR /home/docker-py
|
||||
|
||||
COPY docs-requirements.txt /home/docker-py/docs-requirements.txt
|
||||
RUN pip install -r docs-requirements.txt
|
||||
|
||||
COPY . /home/docker-py
|
||||
13
LICENSE
13
LICENSE
|
|
@ -176,18 +176,7 @@
|
|||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
51
Makefile
51
Makefile
|
|
@ -1,48 +1,77 @@
|
|||
.PHONY: all build test integration-test unit-test build-py3 unit-test-py3 integration-test-py3
|
||||
|
||||
.PHONY: all
|
||||
all: test
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/*/__pycache__
|
||||
docker rm -vf dpy-dind
|
||||
-docker rm -vf dpy-dind
|
||||
find -name "__pycache__" | xargs rm -rf
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
docker build -t docker-py .
|
||||
|
||||
.PHONY: build-py3
|
||||
build-py3:
|
||||
docker build -t docker-py3 -f Dockerfile-py3 .
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
docker build -t docker-py-docs -f Dockerfile-docs .
|
||||
|
||||
.PHONY: build-dind-certs
|
||||
build-dind-certs:
|
||||
docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs .
|
||||
|
||||
.PHONY: test
|
||||
test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test: build
|
||||
docker run docker-py py.test tests/unit
|
||||
|
||||
.PHONY: unit-test-py3
|
||||
unit-test-py3: build-py3
|
||||
docker run docker-py3 py.test tests/unit
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: build
|
||||
docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py py.test tests/integration
|
||||
|
||||
.PHONY: integration-test-py3
|
||||
integration-test-py3: build-py3
|
||||
docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration
|
||||
|
||||
.PHONY: integration-dind
|
||||
integration-dind: build build-py3
|
||||
docker rm -vf dpy-dind || :
|
||||
docker run -d --name dpy-dind --privileged dockerswarm/dind:1.10.3 docker daemon -H tcp://0.0.0.0:2375
|
||||
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py py.test tests/integration
|
||||
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3 py.test tests/integration
|
||||
docker run -d --name dpy-dind --privileged dockerswarm/dind:1.12.0 docker daemon\
|
||||
-H tcp://0.0.0.0:2375
|
||||
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py\
|
||||
py.test tests/integration
|
||||
docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3\
|
||||
py.test tests/integration
|
||||
docker rm -vf dpy-dind
|
||||
|
||||
.PHONY: integration-dind-ssl
|
||||
integration-dind-ssl: build-dind-certs build build-py3
|
||||
docker run -d --name dpy-dind-certs dpy-dind-certs
|
||||
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl -v /tmp --privileged dockerswarm/dind:1.10.3 docker daemon --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem --tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375
|
||||
docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py py.test tests/integration
|
||||
docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --link=dpy-dind-ssl:docker docker-py3 py.test tests/integration
|
||||
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\
|
||||
--env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\
|
||||
-v /tmp --privileged dockerswarm/dind:1.12.0 docker daemon --tlsverify\
|
||||
--tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
|
||||
--tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375
|
||||
docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
|
||||
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
|
||||
--link=dpy-dind-ssl:docker docker-py py.test tests/integration
|
||||
docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
|
||||
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
|
||||
--link=dpy-dind-ssl:docker docker-py3 py.test tests/integration
|
||||
docker rm -vf dpy-dind-ssl dpy-dind-certs
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: build
|
||||
docker run docker-py flake8 docker tests
|
||||
|
||||
.PHONY: docs
|
||||
docs: build-docs
|
||||
docker run -v `pwd`/docs:/home/docker-py/docs/ -p 8000:8000 docker-py-docs mkdocs serve -a 0.0.0.0:8000
|
||||
|
|
|
|||
|
|
@ -1,17 +1,3 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .version import version, version_info
|
||||
|
||||
__version__ = version
|
||||
|
|
|
|||
|
|
@ -4,5 +4,7 @@ from .container import ContainerApiMixin
|
|||
from .daemon import DaemonApiMixin
|
||||
from .exec_api import ExecApiMixin
|
||||
from .image import ImageApiMixin
|
||||
from .volume import VolumeApiMixin
|
||||
from .network import NetworkApiMixin
|
||||
from .service import ServiceApiMixin
|
||||
from .swarm import SwarmApiMixin
|
||||
from .volume import VolumeApiMixin
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class BuildApiMixin(object):
|
|||
custom_context=False, encoding=None, pull=False,
|
||||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False):
|
||||
remote = context = headers = None
|
||||
remote = context = None
|
||||
headers = {}
|
||||
container_limits = container_limits or {}
|
||||
if path is None and fileobj is None:
|
||||
raise TypeError("Either path or fileobj needs to be provided.")
|
||||
|
|
@ -134,8 +135,7 @@ class BuildApiMixin(object):
|
|||
', '.join(repr(k) for k in self._auth_configs.keys())
|
||||
)
|
||||
)
|
||||
if headers is None:
|
||||
headers = {}
|
||||
|
||||
if utils.compare_version('1.19', self._version) >= 0:
|
||||
headers['X-Registry-Config'] = auth.encode_header(
|
||||
self._auth_configs
|
||||
|
|
|
|||
|
|
@ -15,12 +15,18 @@ class ContainerApiMixin(object):
|
|||
'logs': logs and 1 or 0,
|
||||
'stdout': stdout and 1 or 0,
|
||||
'stderr': stderr and 1 or 0,
|
||||
'stream': stream and 1 or 0,
|
||||
'stream': stream and 1 or 0
|
||||
}
|
||||
u = self._url("/containers/{0}/attach", container)
|
||||
response = self._post(u, params=params, stream=stream)
|
||||
|
||||
return self._get_result(container, stream, response)
|
||||
headers = {
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp'
|
||||
}
|
||||
|
||||
u = self._url("/containers/{0}/attach", container)
|
||||
response = self._post(u, headers=headers, params=params, stream=stream)
|
||||
|
||||
return self._read_from_socket(response, stream)
|
||||
|
||||
@utils.check_resource
|
||||
def attach_socket(self, container, params=None, ws=False):
|
||||
|
|
@ -34,9 +40,18 @@ class ContainerApiMixin(object):
|
|||
if ws:
|
||||
return self._attach_websocket(container, params)
|
||||
|
||||
headers = {
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp'
|
||||
}
|
||||
|
||||
u = self._url("/containers/{0}/attach", container)
|
||||
return self._get_raw_response_socket(self.post(
|
||||
u, None, params=self._attach_params(params), stream=True))
|
||||
return self._get_raw_response_socket(
|
||||
self.post(
|
||||
u, None, params=self._attach_params(params), stream=True,
|
||||
headers=headers
|
||||
)
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
def commit(self, container, repository=None, tag=None, message=None,
|
||||
|
|
|
|||
|
|
@ -56,8 +56,6 @@ class ExecApiMixin(object):
|
|||
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
|
||||
socket=False):
|
||||
# we want opened socket if socket == True
|
||||
if socket:
|
||||
stream = True
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
|
||||
|
|
@ -66,10 +64,18 @@ class ExecApiMixin(object):
|
|||
'Detach': detach
|
||||
}
|
||||
|
||||
headers = {} if detach else {
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp'
|
||||
}
|
||||
|
||||
res = self._post_json(
|
||||
self._url('/exec/{0}/start', exec_id), data=data, stream=stream
|
||||
self._url('/exec/{0}/start', exec_id),
|
||||
headers=headers,
|
||||
data=data,
|
||||
stream=True
|
||||
)
|
||||
|
||||
if socket:
|
||||
return self._get_raw_response_socket(res)
|
||||
return self._get_result_tty(stream, res, tty)
|
||||
return self._read_from_socket(res, stream)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import logging
|
||||
import os
|
||||
import six
|
||||
import warnings
|
||||
|
||||
|
|
@ -42,87 +43,79 @@ class ImageApiMixin(object):
|
|||
return [x['Id'] for x in res]
|
||||
return res
|
||||
|
||||
def import_image(self, src=None, repository=None, tag=None, image=None):
|
||||
if src:
|
||||
if isinstance(src, six.string_types):
|
||||
try:
|
||||
result = self.import_image_from_file(
|
||||
src, repository=repository, tag=tag)
|
||||
except IOError:
|
||||
result = self.import_image_from_url(
|
||||
src, repository=repository, tag=tag)
|
||||
else:
|
||||
result = self.import_image_from_data(
|
||||
src, repository=repository, tag=tag)
|
||||
elif image:
|
||||
result = self.import_image_from_image(
|
||||
image, repository=repository, tag=tag)
|
||||
else:
|
||||
raise Exception("Must specify a src or image")
|
||||
def import_image(self, src=None, repository=None, tag=None, image=None,
|
||||
changes=None, stream_src=False):
|
||||
if not (src or image):
|
||||
raise errors.DockerException(
|
||||
'Must specify src or image to import from'
|
||||
)
|
||||
u = self._url('/images/create')
|
||||
|
||||
return result
|
||||
params = _import_image_params(
|
||||
repository, tag, image,
|
||||
src=(src if isinstance(src, six.string_types) else None),
|
||||
changes=changes
|
||||
)
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
|
||||
def import_image_from_data(self, data, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'fromSrc': '-',
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
headers = {
|
||||
'Content-Type': 'application/tar',
|
||||
}
|
||||
return self._result(
|
||||
self._post(u, data=data, params=params, headers=headers))
|
||||
|
||||
def import_image_from_file(self, filename, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'fromSrc': '-',
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
headers = {
|
||||
'Content-Type': 'application/tar',
|
||||
}
|
||||
with open(filename, 'rb') as f:
|
||||
if image or params.get('fromSrc') != '-': # from image or URL
|
||||
return self._result(
|
||||
self._post(u, data=f, params=params, headers=headers,
|
||||
timeout=None))
|
||||
self._post(u, data=None, params=params)
|
||||
)
|
||||
elif isinstance(src, six.string_types): # from file path
|
||||
with open(src, 'rb') as f:
|
||||
return self._result(
|
||||
self._post(
|
||||
u, data=f, params=params, headers=headers, timeout=None
|
||||
)
|
||||
)
|
||||
else: # from raw data
|
||||
if stream_src:
|
||||
headers['Transfer-Encoding'] = 'chunked'
|
||||
return self._result(
|
||||
self._post(u, data=src, params=params, headers=headers)
|
||||
)
|
||||
|
||||
def import_image_from_stream(self, stream, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'fromSrc': '-',
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
headers = {
|
||||
'Content-Type': 'application/tar',
|
||||
'Transfer-Encoding': 'chunked',
|
||||
}
|
||||
def import_image_from_data(self, data, repository=None, tag=None,
|
||||
changes=None):
|
||||
u = self._url('/images/create')
|
||||
params = _import_image_params(
|
||||
repository, tag, src='-', changes=changes
|
||||
)
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
return self._result(
|
||||
self._post(u, data=stream, params=params, headers=headers))
|
||||
self._post(
|
||||
u, data=data, params=params, headers=headers, timeout=None
|
||||
)
|
||||
)
|
||||
return self.import_image(
|
||||
src=data, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_url(self, url, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'fromSrc': url,
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
return self._result(
|
||||
self._post(u, data=None, params=params))
|
||||
def import_image_from_file(self, filename, repository=None, tag=None,
|
||||
changes=None):
|
||||
return self.import_image(
|
||||
src=filename, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_image(self, image, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'fromImage': image,
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
return self._result(
|
||||
self._post(u, data=None, params=params))
|
||||
def import_image_from_stream(self, stream, repository=None, tag=None,
|
||||
changes=None):
|
||||
return self.import_image(
|
||||
src=stream, stream_src=True, repository=repository, tag=tag,
|
||||
changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_url(self, url, repository=None, tag=None,
|
||||
changes=None):
|
||||
return self.import_image(
|
||||
src=url, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_image(self, image, repository=None, tag=None,
|
||||
changes=None):
|
||||
return self.import_image(
|
||||
image=image, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
@utils.check_resource
|
||||
def insert(self, image, url, path):
|
||||
|
|
@ -166,28 +159,10 @@ class ImageApiMixin(object):
|
|||
headers = {}
|
||||
|
||||
if utils.compare_version('1.5', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if auth_config is None:
|
||||
log.debug('Looking for auth config')
|
||||
if not self._auth_configs:
|
||||
log.debug(
|
||||
"No auth config in memory - loading from filesystem"
|
||||
)
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug('Found auth config')
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
headers['X-Registry-Auth'] = auth.encode_header(
|
||||
authcfg
|
||||
)
|
||||
else:
|
||||
log.debug('No auth config found')
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
|
@ -205,7 +180,7 @@ class ImageApiMixin(object):
|
|||
return self._result(response)
|
||||
|
||||
def push(self, repository, tag=None, stream=False,
|
||||
insecure_registry=False, decode=False):
|
||||
insecure_registry=False, auth_config=None, decode=False):
|
||||
if insecure_registry:
|
||||
warnings.warn(
|
||||
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
|
||||
|
|
@ -222,17 +197,13 @@ class ImageApiMixin(object):
|
|||
headers = {}
|
||||
|
||||
if utils.compare_version('1.5', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no authentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
|
||||
if auth_config is None:
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
||||
response = self._post_json(
|
||||
u, None, headers=headers, stream=stream, params=params
|
||||
|
|
@ -268,3 +239,32 @@ class ImageApiMixin(object):
|
|||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
return res.status_code == 201
|
||||
|
||||
|
||||
def is_file(src):
|
||||
try:
|
||||
return (
|
||||
isinstance(src, six.string_types) and
|
||||
os.path.isfile(src)
|
||||
)
|
||||
except TypeError: # a data string will make isfile() raise a TypeError
|
||||
return False
|
||||
|
||||
|
||||
def _import_image_params(repo, tag, image=None, src=None,
|
||||
changes=None):
|
||||
params = {
|
||||
'repo': repo,
|
||||
'tag': tag,
|
||||
}
|
||||
if image:
|
||||
params['fromImage'] = image
|
||||
elif src and not is_file(src):
|
||||
params['fromSrc'] = src
|
||||
else:
|
||||
params['fromSrc'] = '-'
|
||||
|
||||
if changes:
|
||||
params['changes'] = changes
|
||||
|
||||
return params
|
||||
|
|
|
|||
|
|
@ -22,7 +22,8 @@ class NetworkApiMixin(object):
|
|||
|
||||
@minimum_version('1.21')
|
||||
def create_network(self, name, driver=None, options=None, ipam=None,
|
||||
check_duplicate=None, internal=False):
|
||||
check_duplicate=None, internal=False, labels=None,
|
||||
enable_ipv6=False):
|
||||
if options is not None and not isinstance(options, dict):
|
||||
raise TypeError('options must be a dictionary')
|
||||
|
||||
|
|
@ -34,6 +35,22 @@ class NetworkApiMixin(object):
|
|||
'CheckDuplicate': check_duplicate
|
||||
}
|
||||
|
||||
if labels is not None:
|
||||
if version_lt(self._version, '1.23'):
|
||||
raise InvalidVersion(
|
||||
'network labels were introduced in API 1.23'
|
||||
)
|
||||
if not isinstance(labels, dict):
|
||||
raise TypeError('labels must be a dictionary')
|
||||
data["Labels"] = labels
|
||||
|
||||
if enable_ipv6:
|
||||
if version_lt(self._version, '1.23'):
|
||||
raise InvalidVersion(
|
||||
'enable_ipv6 was introduced in API 1.23'
|
||||
)
|
||||
data['EnableIPv6'] = True
|
||||
|
||||
if internal:
|
||||
if version_lt(self._version, '1.22'):
|
||||
raise InvalidVersion('Internal networks are not '
|
||||
|
|
@ -60,12 +77,13 @@ class NetworkApiMixin(object):
|
|||
@minimum_version('1.21')
|
||||
def connect_container_to_network(self, container, net_id,
|
||||
ipv4_address=None, ipv6_address=None,
|
||||
aliases=None, links=None):
|
||||
aliases=None, links=None,
|
||||
link_local_ips=None):
|
||||
data = {
|
||||
"Container": container,
|
||||
"EndpointConfig": self.create_endpoint_config(
|
||||
aliases=aliases, links=links, ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address
|
||||
ipv6_address=ipv6_address, link_local_ips=link_local_ips
|
||||
),
|
||||
}
|
||||
|
||||
|
|
@ -75,8 +93,15 @@ class NetworkApiMixin(object):
|
|||
|
||||
@check_resource
|
||||
@minimum_version('1.21')
|
||||
def disconnect_container_from_network(self, container, net_id):
|
||||
data = {"container": container}
|
||||
def disconnect_container_from_network(self, container, net_id,
|
||||
force=False):
|
||||
data = {"Container": container}
|
||||
if force:
|
||||
if version_lt(self._version, '1.22'):
|
||||
raise InvalidVersion(
|
||||
'Forced disconnect was introduced in API 1.22'
|
||||
)
|
||||
data['Force'] = force
|
||||
url = self._url("/networks/{0}/disconnect", net_id)
|
||||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,105 @@
|
|||
from .. import errors
|
||||
from .. import utils
|
||||
from ..auth import auth
|
||||
|
||||
|
||||
class ServiceApiMixin(object):
|
||||
@utils.minimum_version('1.24')
|
||||
def create_service(
|
||||
self, task_template, name=None, labels=None, mode=None,
|
||||
update_config=None, networks=None, endpoint_config=None
|
||||
):
|
||||
url = self._url('/services/create')
|
||||
headers = {}
|
||||
image = task_template.get('ContainerSpec', {}).get('Image', None)
|
||||
if image is None:
|
||||
raise errors.DockerException(
|
||||
'Missing mandatory Image key in ContainerSpec'
|
||||
)
|
||||
registry, repo_name = auth.resolve_repository_name(image)
|
||||
auth_header = auth.get_config_header(self, registry)
|
||||
if auth_header:
|
||||
headers['X-Registry-Auth'] = auth_header
|
||||
data = {
|
||||
'Name': name,
|
||||
'Labels': labels,
|
||||
'TaskTemplate': task_template,
|
||||
'Mode': mode,
|
||||
'UpdateConfig': update_config,
|
||||
'Networks': networks,
|
||||
'Endpoint': endpoint_config
|
||||
}
|
||||
return self._result(
|
||||
self._post_json(url, data=data, headers=headers), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def inspect_service(self, service):
|
||||
url = self._url('/services/{0}', service)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def inspect_task(self, task):
|
||||
url = self._url('/tasks/{0}', task)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def remove_service(self, service):
|
||||
url = self._url('/services/{0}', service)
|
||||
resp = self._delete(url)
|
||||
self._raise_for_status(resp)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def services(self, filters=None):
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
url = self._url('/services')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def tasks(self, filters=None):
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
url = self._url('/tasks')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource
|
||||
def update_service(self, service, version, task_template=None, name=None,
|
||||
labels=None, mode=None, update_config=None,
|
||||
networks=None, endpoint_config=None):
|
||||
url = self._url('/services/{0}/update', service)
|
||||
data = {}
|
||||
headers = {}
|
||||
if name is not None:
|
||||
data['Name'] = name
|
||||
if labels is not None:
|
||||
data['Labels'] = labels
|
||||
if mode is not None:
|
||||
data['Mode'] = mode
|
||||
if task_template is not None:
|
||||
image = task_template.get('ContainerSpec', {}).get('Image', None)
|
||||
if image is not None:
|
||||
registry, repo_name = auth.resolve_repository_name(image)
|
||||
auth_header = auth.get_config_header(self, registry)
|
||||
if auth_header:
|
||||
headers['X-Registry-Auth'] = auth_header
|
||||
data['TaskTemplate'] = task_template
|
||||
if update_config is not None:
|
||||
data['UpdateConfig'] = update_config
|
||||
if networks is not None:
|
||||
data['Networks'] = networks
|
||||
if endpoint_config is not None:
|
||||
data['Endpoint'] = endpoint_config
|
||||
|
||||
resp = self._post_json(
|
||||
url, data=data, params={'version': version}, headers=headers
|
||||
)
|
||||
self._raise_for_status(resp)
|
||||
return True
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
from .. import utils
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwarmApiMixin(object):
|
||||
|
||||
def create_swarm_spec(self, *args, **kwargs):
|
||||
return utils.SwarmSpec(*args, **kwargs)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None):
|
||||
url = self._url('/swarm/init')
|
||||
if swarm_spec is not None and not isinstance(swarm_spec, dict):
|
||||
raise TypeError('swarm_spec must be a dictionary')
|
||||
data = {
|
||||
'AdvertiseAddr': advertise_addr,
|
||||
'ListenAddr': listen_addr,
|
||||
'ForceNewCluster': force_new_cluster,
|
||||
'Spec': swarm_spec,
|
||||
}
|
||||
response = self._post_json(url, data=data)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_swarm(self):
|
||||
url = self._url('/swarm')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.check_resource
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_node(self, node_id):
|
||||
url = self._url('/nodes/{0}', node_id)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
|
||||
advertise_addr=None):
|
||||
data = {
|
||||
"RemoteAddrs": remote_addrs,
|
||||
"ListenAddr": listen_addr,
|
||||
"JoinToken": join_token,
|
||||
"AdvertiseAddr": advertise_addr,
|
||||
}
|
||||
url = self._url('/swarm/join')
|
||||
response = self._post_json(url, data=data)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def leave_swarm(self, force=False):
|
||||
url = self._url('/swarm/leave')
|
||||
response = self._post(url, params={'force': force})
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def nodes(self, filters=None):
|
||||
url = self._url('/nodes')
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
|
||||
rotate_manager_token=False):
|
||||
url = self._url('/swarm/update')
|
||||
response = self._post_json(url, data=swarm_spec, params={
|
||||
'rotateWorkerToken': rotate_worker_token,
|
||||
'rotateManagerToken': rotate_manager_token,
|
||||
'version': version
|
||||
})
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
from .. import errors
|
||||
from .. import utils
|
||||
|
||||
|
||||
|
|
@ -11,7 +12,7 @@ class VolumeApiMixin(object):
|
|||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.21')
|
||||
def create_volume(self, name, driver=None, driver_opts=None):
|
||||
def create_volume(self, name, driver=None, driver_opts=None, labels=None):
|
||||
url = self._url('/volumes/create')
|
||||
if driver_opts is not None and not isinstance(driver_opts, dict):
|
||||
raise TypeError('driver_opts must be a dictionary')
|
||||
|
|
@ -21,6 +22,16 @@ class VolumeApiMixin(object):
|
|||
'Driver': driver,
|
||||
'DriverOpts': driver_opts,
|
||||
}
|
||||
|
||||
if labels is not None:
|
||||
if utils.compare_version('1.23', self._version) < 0:
|
||||
raise errors.InvalidVersion(
|
||||
'volume labels were introduced in API 1.23'
|
||||
)
|
||||
if not isinstance(labels, dict):
|
||||
raise TypeError('labels must be a dictionary')
|
||||
data["Labels"] = labels
|
||||
|
||||
return self._result(self._post_json(url, data=data), True)
|
||||
|
||||
@utils.minimum_version('1.21')
|
||||
|
|
|
|||
|
|
@ -1,22 +1,9 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
import dockerpycreds
|
||||
import six
|
||||
|
||||
from .. import errors
|
||||
|
|
@ -25,6 +12,7 @@ INDEX_NAME = 'docker.io'
|
|||
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
|
||||
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
|
||||
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
TOKEN_USERNAME = '<token>'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -51,6 +39,26 @@ def resolve_index_name(index_name):
|
|||
return index_name
|
||||
|
||||
|
||||
def get_config_header(client, registry):
|
||||
log.debug('Looking for auth config')
|
||||
if not client._auth_configs:
|
||||
log.debug(
|
||||
"No auth config in memory - loading from filesystem"
|
||||
)
|
||||
client._auth_configs = load_config()
|
||||
authcfg = resolve_authconfig(client._auth_configs, registry)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug('Found auth config')
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
return encode_header(authcfg)
|
||||
log.debug('No auth config found')
|
||||
return None
|
||||
|
||||
|
||||
def split_repo_name(repo_name):
|
||||
parts = repo_name.split('/', 1)
|
||||
if len(parts) == 1 or (
|
||||
|
|
@ -68,6 +76,13 @@ def resolve_authconfig(authconfig, registry=None):
|
|||
with full URLs are stripped down to hostnames before checking for a match.
|
||||
Returns None if no match was found.
|
||||
"""
|
||||
if 'credsStore' in authconfig:
|
||||
log.debug(
|
||||
'Using credentials store "{0}"'.format(authconfig['credsStore'])
|
||||
)
|
||||
return _resolve_authconfig_credstore(
|
||||
authconfig, registry, authconfig['credsStore']
|
||||
)
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for {0}".format(repr(registry)))
|
||||
|
|
@ -85,6 +100,35 @@ def resolve_authconfig(authconfig, registry=None):
|
|||
return None
|
||||
|
||||
|
||||
def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = 'https://index.docker.io/v1/'
|
||||
log.debug("Looking for auth entry for {0}".format(repr(registry)))
|
||||
store = dockerpycreds.Store(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
res = {
|
||||
'ServerAddress': registry,
|
||||
}
|
||||
if data['Username'] == TOKEN_USERNAME:
|
||||
res['IdentityToken'] = data['Secret']
|
||||
else:
|
||||
res.update({
|
||||
'Username': data['Username'],
|
||||
'Password': data['Secret'],
|
||||
})
|
||||
return res
|
||||
except dockerpycreds.CredentialsNotFound as e:
|
||||
log.debug('No entry found')
|
||||
return None
|
||||
except dockerpycreds.StoreError as e:
|
||||
raise errors.DockerException(
|
||||
'Credentials store error: {0}'.format(repr(e))
|
||||
)
|
||||
|
||||
|
||||
def convert_to_hostname(url):
|
||||
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
|
||||
|
||||
|
|
@ -160,18 +204,24 @@ def find_config_file(config_path=None):
|
|||
os.path.basename(DOCKER_CONFIG_FILENAME)
|
||||
) if os.environ.get('DOCKER_CONFIG') else None
|
||||
|
||||
paths = [
|
||||
paths = filter(None, [
|
||||
config_path, # 1
|
||||
environment_path, # 2
|
||||
os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
|
||||
os.path.join(
|
||||
os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
|
||||
) # 4
|
||||
]
|
||||
])
|
||||
|
||||
log.debug("Trying paths: {0}".format(repr(paths)))
|
||||
|
||||
for path in paths:
|
||||
if path and os.path.exists(path):
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: {0}".format(path))
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
|
@ -186,7 +236,6 @@ def load_config(config_path=None):
|
|||
config_file = find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
log.debug("File doesn't exist")
|
||||
return {}
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -1,19 +1,6 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import struct
|
||||
from functools import partial
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
|
|
@ -29,6 +16,7 @@ from .ssladapter import ssladapter
|
|||
from .tls import TLSConfig
|
||||
from .transport import UnixAdapter
|
||||
from .utils import utils, check_resource, update_headers, kwargs_from_env
|
||||
from .utils.socket import frames_iter
|
||||
try:
|
||||
from .transport import NpipeAdapter
|
||||
except ImportError:
|
||||
|
|
@ -46,8 +34,10 @@ class Client(
|
|||
api.DaemonApiMixin,
|
||||
api.ExecApiMixin,
|
||||
api.ImageApiMixin,
|
||||
api.VolumeApiMixin,
|
||||
api.NetworkApiMixin):
|
||||
api.NetworkApiMixin,
|
||||
api.ServiceApiMixin,
|
||||
api.SwarmApiMixin,
|
||||
api.VolumeApiMixin):
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=constants.DEFAULT_USER_AGENT):
|
||||
|
|
@ -70,6 +60,7 @@ class Client(
|
|||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixAdapter(base_url, timeout)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://localunixsocket'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
|
|
@ -110,7 +101,8 @@ class Client(
|
|||
|
||||
@classmethod
|
||||
def from_env(cls, **kwargs):
|
||||
return cls(**kwargs_from_env(**kwargs))
|
||||
version = kwargs.pop('version', None)
|
||||
return cls(version=version, **kwargs_from_env(**kwargs))
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
|
|
@ -155,7 +147,8 @@ class Client(
|
|||
'instead'.format(arg, type(arg))
|
||||
)
|
||||
|
||||
args = map(six.moves.urllib.parse.quote_plus, args)
|
||||
quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
|
|
@ -250,12 +243,20 @@ class Client(
|
|||
if decode:
|
||||
if six.PY3:
|
||||
data = data.decode('utf-8')
|
||||
data = json.loads(data)
|
||||
yield data
|
||||
# remove the trailing newline
|
||||
data = data.strip()
|
||||
# split the data at any newlines
|
||||
data_list = data.split("\r\n")
|
||||
# load and yield each line seperately
|
||||
for data in data_list:
|
||||
data = json.loads(data)
|
||||
yield data
|
||||
else:
|
||||
yield data
|
||||
else:
|
||||
# Response isn't chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response)
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
|
|
@ -307,6 +308,14 @@ class Client(
|
|||
for out in response.iter_content(chunk_size=1, decode_unicode=True):
|
||||
yield out
|
||||
|
||||
def _read_from_socket(self, response, stream):
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
if stream:
|
||||
return frames_iter(socket)
|
||||
else:
|
||||
return six.binary_type().join(frames_iter(socket))
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we're
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
|
|
@ -360,6 +369,10 @@ class Client(
|
|||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(Client, self).get_adapter(url)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import sys
|
||||
from .version import version
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.22'
|
||||
DEFAULT_DOCKER_API_VERSION = '1.24'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = [
|
||||
|
|
|
|||
|
|
@ -1,16 +1,3 @@
|
|||
# Copyright 2014 dotCloud inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ class NpipeSocket(object):
|
|||
if mode.strip('b') != 'r':
|
||||
raise NotImplementedError()
|
||||
rawio = NpipeFileIOBase(self)
|
||||
if bufsize is None:
|
||||
if bufsize is None or bufsize < 0:
|
||||
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,3 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import six
|
||||
import requests.adapters
|
||||
import socket
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
# flake8: noqa
|
||||
from .containers import LogConfig, Ulimit
|
||||
from .services import (
|
||||
ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate,
|
||||
UpdateConfig
|
||||
)
|
||||
from .swarm import SwarmSpec, SwarmExternalCA
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
import six
|
||||
|
||||
|
||||
class DictType(dict):
|
||||
def __init__(self, init):
|
||||
for k, v in six.iteritems(init):
|
||||
self[k] = v
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
import six
|
||||
|
||||
from .base import DictType
|
||||
|
||||
|
||||
class LogConfigTypesEnum(object):
|
||||
_values = (
|
||||
|
|
@ -13,12 +15,6 @@ class LogConfigTypesEnum(object):
|
|||
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
|
||||
|
||||
|
||||
class DictType(dict):
|
||||
def __init__(self, init):
|
||||
for k, v in six.iteritems(init):
|
||||
self[k] = v
|
||||
|
||||
|
||||
class LogConfig(DictType):
|
||||
types = LogConfigTypesEnum
|
||||
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
import six
|
||||
|
||||
from .. import errors
|
||||
|
||||
|
||||
class TaskTemplate(dict):
|
||||
def __init__(self, container_spec, resources=None, restart_policy=None,
|
||||
placement=None, log_driver=None):
|
||||
self['ContainerSpec'] = container_spec
|
||||
if resources:
|
||||
self['Resources'] = resources
|
||||
if restart_policy:
|
||||
self['RestartPolicy'] = restart_policy
|
||||
if placement:
|
||||
self['Placement'] = placement
|
||||
if log_driver:
|
||||
self['LogDriver'] = log_driver
|
||||
|
||||
@property
|
||||
def container_spec(self):
|
||||
return self.get('ContainerSpec')
|
||||
|
||||
@property
|
||||
def resources(self):
|
||||
return self.get('Resources')
|
||||
|
||||
@property
|
||||
def restart_policy(self):
|
||||
return self.get('RestartPolicy')
|
||||
|
||||
@property
|
||||
def placement(self):
|
||||
return self.get('Placement')
|
||||
|
||||
|
||||
class ContainerSpec(dict):
|
||||
def __init__(self, image, command=None, args=None, env=None, workdir=None,
|
||||
user=None, labels=None, mounts=None, stop_grace_period=None):
|
||||
from ..utils import split_command # FIXME: circular import
|
||||
|
||||
self['Image'] = image
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
command = split_command(command)
|
||||
self['Command'] = command
|
||||
self['Args'] = args
|
||||
|
||||
if env is not None:
|
||||
self['Env'] = env
|
||||
if workdir is not None:
|
||||
self['Dir'] = workdir
|
||||
if user is not None:
|
||||
self['User'] = user
|
||||
if labels is not None:
|
||||
self['Labels'] = labels
|
||||
if mounts is not None:
|
||||
for mount in mounts:
|
||||
if isinstance(mount, six.string_types):
|
||||
mounts.append(Mount.parse_mount_string(mount))
|
||||
mounts.remove(mount)
|
||||
self['Mounts'] = mounts
|
||||
if stop_grace_period is not None:
|
||||
self['StopGracePeriod'] = stop_grace_period
|
||||
|
||||
|
||||
class Mount(dict):
|
||||
def __init__(self, target, source, type='volume', read_only=False,
|
||||
propagation=None, no_copy=False, labels=None,
|
||||
driver_config=None):
|
||||
self['Target'] = target
|
||||
self['Source'] = source
|
||||
if type not in ('bind', 'volume'):
|
||||
raise errors.DockerError(
|
||||
'Only acceptable mount types are `bind` and `volume`.'
|
||||
)
|
||||
self['Type'] = type
|
||||
|
||||
if type == 'bind':
|
||||
if propagation is not None:
|
||||
self['BindOptions'] = {
|
||||
'Propagation': propagation
|
||||
}
|
||||
if any([labels, driver_config, no_copy]):
|
||||
raise errors.DockerError(
|
||||
'Mount type is binding but volume options have been '
|
||||
'provided.'
|
||||
)
|
||||
else:
|
||||
volume_opts = {}
|
||||
if no_copy:
|
||||
volume_opts['NoCopy'] = True
|
||||
if labels:
|
||||
volume_opts['Labels'] = labels
|
||||
if driver_config:
|
||||
volume_opts['driver_config'] = driver_config
|
||||
if volume_opts:
|
||||
self['VolumeOptions'] = volume_opts
|
||||
if propagation:
|
||||
raise errors.DockerError(
|
||||
'Mount type is volume but `propagation` argument has been '
|
||||
'provided.'
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def parse_mount_string(cls, string):
|
||||
parts = string.split(':')
|
||||
if len(parts) > 3:
|
||||
raise errors.DockerError(
|
||||
'Invalid mount format "{0}"'.format(string)
|
||||
)
|
||||
if len(parts) == 1:
|
||||
return cls(target=parts[0])
|
||||
else:
|
||||
target = parts[1]
|
||||
source = parts[0]
|
||||
read_only = not (len(parts) == 3 or parts[2] == 'ro')
|
||||
return cls(target, source, read_only=read_only)
|
||||
|
||||
|
||||
class Resources(dict):
|
||||
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
|
||||
mem_reservation=None):
|
||||
limits = {}
|
||||
reservation = {}
|
||||
if cpu_limit is not None:
|
||||
limits['NanoCPUs'] = cpu_limit
|
||||
if mem_limit is not None:
|
||||
limits['MemoryBytes'] = mem_limit
|
||||
if cpu_reservation is not None:
|
||||
reservation['NanoCPUs'] = cpu_reservation
|
||||
if mem_reservation is not None:
|
||||
reservation['MemoryBytes'] = mem_reservation
|
||||
|
||||
if limits:
|
||||
self['Limits'] = limits
|
||||
if reservation:
|
||||
self['Reservations'] = reservation
|
||||
|
||||
|
||||
class UpdateConfig(dict):
|
||||
def __init__(self, parallelism=0, delay=None, failure_action='continue'):
|
||||
self['Parallelism'] = parallelism
|
||||
if delay is not None:
|
||||
self['Delay'] = delay
|
||||
if failure_action not in ('pause', 'continue'):
|
||||
raise errors.DockerError(
|
||||
'failure_action must be either `pause` or `continue`.'
|
||||
)
|
||||
self['FailureAction'] = failure_action
|
||||
|
||||
|
||||
class RestartConditionTypesEnum(object):
|
||||
_values = (
|
||||
'none',
|
||||
'on_failure',
|
||||
'any',
|
||||
)
|
||||
NONE, ON_FAILURE, ANY = _values
|
||||
|
||||
|
||||
class RestartPolicy(dict):
|
||||
condition_types = RestartConditionTypesEnum
|
||||
|
||||
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
|
||||
max_attempts=0, window=0):
|
||||
if condition not in self.condition_types._values:
|
||||
raise TypeError(
|
||||
'Invalid RestartPolicy condition {0}'.format(condition)
|
||||
)
|
||||
|
||||
self['Condition'] = condition
|
||||
self['Delay'] = delay
|
||||
self['MaxAttempts'] = max_attempts
|
||||
self['Window'] = window
|
||||
|
||||
|
||||
class DriverConfig(dict):
|
||||
def __init__(self, name, options=None):
|
||||
self['Name'] = name
|
||||
if options:
|
||||
self['Options'] = options
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
class SwarmSpec(dict):
|
||||
def __init__(self, task_history_retention_limit=None,
|
||||
snapshot_interval=None, keep_old_snapshots=None,
|
||||
log_entries_for_slow_followers=None, heartbeat_tick=None,
|
||||
election_tick=None, dispatcher_heartbeat_period=None,
|
||||
node_cert_expiry=None, external_ca=None, name=None):
|
||||
if task_history_retention_limit is not None:
|
||||
self['Orchestration'] = {
|
||||
'TaskHistoryRetentionLimit': task_history_retention_limit
|
||||
}
|
||||
if any([snapshot_interval, keep_old_snapshots,
|
||||
log_entries_for_slow_followers, heartbeat_tick, election_tick]):
|
||||
self['Raft'] = {
|
||||
'SnapshotInterval': snapshot_interval,
|
||||
'KeepOldSnapshots': keep_old_snapshots,
|
||||
'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
|
||||
'HeartbeatTick': heartbeat_tick,
|
||||
'ElectionTick': election_tick
|
||||
}
|
||||
|
||||
if dispatcher_heartbeat_period:
|
||||
self['Dispatcher'] = {
|
||||
'HeartbeatPeriod': dispatcher_heartbeat_period
|
||||
}
|
||||
|
||||
if node_cert_expiry or external_ca:
|
||||
self['CAConfig'] = {
|
||||
'NodeCertExpiry': node_cert_expiry,
|
||||
'ExternalCA': external_ca
|
||||
}
|
||||
|
||||
if name is not None:
|
||||
self['Name'] = name
|
||||
|
||||
|
||||
class SwarmExternalCA(dict):
|
||||
def __init__(self, url, protocol=None, options=None):
|
||||
self['URL'] = url
|
||||
self['Protocol'] = protocol
|
||||
self['Options'] = options
|
||||
|
|
@ -1,11 +1,13 @@
|
|||
# flake8: noqa
|
||||
from .utils import (
|
||||
compare_version, convert_port_bindings, convert_volume_binds,
|
||||
mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
|
||||
kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config,
|
||||
create_container_config, parse_bytes, ping_registry, parse_env_file,
|
||||
version_lt, version_gte, decode_json_header, split_command,
|
||||
kwargs_from_env, convert_filters, datetime_to_timestamp,
|
||||
create_host_config, create_container_config, parse_bytes, ping_registry,
|
||||
parse_env_file, version_lt, version_gte, decode_json_header, split_command,
|
||||
create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
|
||||
) # flake8: noqa
|
||||
)
|
||||
|
||||
from .types import Ulimit, LogConfig # flake8: noqa
|
||||
from .decorators import check_resource, minimum_version, update_headers #flake8: noqa
|
||||
from ..types import LogConfig, Ulimit
|
||||
from ..types import SwarmExternalCA, SwarmSpec
|
||||
from .decorators import check_resource, minimum_version, update_headers
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ def check_resource(f):
|
|||
elif kwargs.get('image'):
|
||||
resource_id = kwargs.pop('image')
|
||||
if isinstance(resource_id, dict):
|
||||
resource_id = resource_id.get('Id')
|
||||
resource_id = resource_id.get('Id', resource_id.get('ID'))
|
||||
if not resource_id:
|
||||
raise errors.NullResource(
|
||||
'image or container param is undefined'
|
||||
|
|
@ -40,7 +40,7 @@ def minimum_version(version):
|
|||
def update_headers(f):
|
||||
def inner(self, *args, **kwargs):
|
||||
if 'HttpHeaders' in self._auth_configs:
|
||||
if 'headers' not in kwargs:
|
||||
if not kwargs.get('headers'):
|
||||
kwargs['headers'] = self._auth_configs['HttpHeaders']
|
||||
else:
|
||||
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
import errno
|
||||
import os
|
||||
import select
|
||||
import struct
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class SocketError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def read(socket, n=4096):
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
"""
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
# wait for data to become available
|
||||
select.select([socket], [], [])
|
||||
|
||||
try:
|
||||
if hasattr(socket, 'recv'):
|
||||
return socket.recv(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket, n):
|
||||
"""
|
||||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there isn't enough data
|
||||
"""
|
||||
data = six.binary_type()
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
raise SocketError("Unexpected EOF")
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
def next_frame_size(socket):
|
||||
"""
|
||||
Returns the size of the next frame of data waiting to be read from socket,
|
||||
according to the protocol defined here:
|
||||
|
||||
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
|
||||
"""
|
||||
try:
|
||||
data = read_exactly(socket, 8)
|
||||
except SocketError:
|
||||
return 0
|
||||
|
||||
_, actual = struct.unpack('>BxxxL', data)
|
||||
return actual
|
||||
|
||||
|
||||
def frames_iter(socket):
|
||||
"""
|
||||
Returns a generator of frames read from socket
|
||||
"""
|
||||
n = next_frame_size(socket)
|
||||
while n > 0:
|
||||
yield read(socket, n)
|
||||
n = next_frame_size(socket)
|
||||
|
|
@ -1,17 +1,3 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
|
@ -22,8 +8,8 @@ import tarfile
|
|||
import tempfile
|
||||
import warnings
|
||||
from distutils.version import StrictVersion
|
||||
from fnmatch import fnmatch
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
|
||||
import requests
|
||||
import six
|
||||
|
|
@ -31,11 +17,17 @@ import six
|
|||
from .. import constants
|
||||
from .. import errors
|
||||
from .. import tls
|
||||
from .types import Ulimit, LogConfig
|
||||
from ..types import Ulimit, LogConfig
|
||||
|
||||
if six.PY2:
|
||||
from urllib import splitnport
|
||||
else:
|
||||
from urllib.parse import splitnport
|
||||
|
||||
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
|
||||
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
|
||||
|
||||
BYTE_UNITS = {
|
||||
'b': 1,
|
||||
'k': 1024,
|
||||
|
|
@ -385,12 +377,11 @@ def parse_repository_tag(repo_name):
|
|||
# Protocol translation: tcp -> http, unix -> http+unix
|
||||
def parse_host(addr, is_win32=False, tls=False):
|
||||
proto = "http+unix"
|
||||
host = DEFAULT_HTTP_HOST
|
||||
port = None
|
||||
path = ''
|
||||
|
||||
if not addr and is_win32:
|
||||
addr = '{0}:{1}'.format(DEFAULT_HTTP_HOST, 2375)
|
||||
addr = DEFAULT_NPIPE
|
||||
|
||||
if not addr or addr.strip() == 'unix://':
|
||||
return DEFAULT_UNIX_SOCKET
|
||||
|
|
@ -425,32 +416,27 @@ def parse_host(addr, is_win32=False, tls=False):
|
|||
)
|
||||
proto = "https" if tls else "http"
|
||||
|
||||
if proto != "http+unix" and ":" in addr:
|
||||
host_parts = addr.split(':')
|
||||
if len(host_parts) != 2:
|
||||
raise errors.DockerException(
|
||||
"Invalid bind address format: {0}".format(addr)
|
||||
)
|
||||
if host_parts[0]:
|
||||
host = host_parts[0]
|
||||
if proto in ("http", "https"):
|
||||
address_parts = addr.split('/', 1)
|
||||
host = address_parts[0]
|
||||
if len(address_parts) == 2:
|
||||
path = '/' + address_parts[1]
|
||||
host, port = splitnport(host)
|
||||
|
||||
port = host_parts[1]
|
||||
if '/' in port:
|
||||
port, path = port.split('/', 1)
|
||||
path = '/{0}'.format(path)
|
||||
try:
|
||||
port = int(port)
|
||||
except Exception:
|
||||
if port is None:
|
||||
raise errors.DockerException(
|
||||
"Invalid port: {0}".format(addr)
|
||||
)
|
||||
|
||||
elif proto in ("http", "https") and ':' not in addr:
|
||||
raise errors.DockerException(
|
||||
"Bind address needs a port: {0}".format(addr))
|
||||
if not host:
|
||||
host = DEFAULT_HTTP_HOST
|
||||
else:
|
||||
host = addr
|
||||
|
||||
if proto in ("http", "https") and port == -1:
|
||||
raise errors.DockerException(
|
||||
"Bind address needs a port: {0}".format(addr))
|
||||
|
||||
if proto == "http+unix" or proto == 'npipe':
|
||||
return "{0}://{1}".format(proto, host)
|
||||
return "{0}://{1}:{2}{3}".format(proto, host, port, path)
|
||||
|
|
@ -613,14 +599,17 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
|
|||
cap_drop=None, devices=None, extra_hosts=None,
|
||||
read_only=None, pid_mode=None, ipc_mode=None,
|
||||
security_opt=None, ulimits=None, log_config=None,
|
||||
mem_limit=None, memswap_limit=None, mem_swappiness=None,
|
||||
cgroup_parent=None, group_add=None, cpu_quota=None,
|
||||
mem_limit=None, memswap_limit=None,
|
||||
mem_reservation=None, kernel_memory=None,
|
||||
mem_swappiness=None, cgroup_parent=None,
|
||||
group_add=None, cpu_quota=None,
|
||||
cpu_period=None, blkio_weight=None,
|
||||
blkio_weight_device=None, device_read_bps=None,
|
||||
device_write_bps=None, device_read_iops=None,
|
||||
device_write_iops=None, oom_kill_disable=False,
|
||||
shm_size=None, version=None, tmpfs=None,
|
||||
oom_score_adj=None):
|
||||
shm_size=None, sysctls=None, version=None, tmpfs=None,
|
||||
oom_score_adj=None, dns_opt=None, cpu_shares=None,
|
||||
cpuset_cpus=None, userns_mode=None, pids_limit=None):
|
||||
|
||||
host_config = {}
|
||||
|
||||
|
|
@ -637,6 +626,18 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
|
|||
if memswap_limit is not None:
|
||||
host_config['MemorySwap'] = parse_bytes(memswap_limit)
|
||||
|
||||
if mem_reservation:
|
||||
if version_lt(version, '1.21'):
|
||||
raise host_config_version_error('mem_reservation', '1.21')
|
||||
|
||||
host_config['MemoryReservation'] = parse_bytes(mem_reservation)
|
||||
|
||||
if kernel_memory:
|
||||
if version_lt(version, '1.21'):
|
||||
raise host_config_version_error('kernel_memory', '1.21')
|
||||
|
||||
host_config['KernelMemory'] = parse_bytes(kernel_memory)
|
||||
|
||||
if mem_swappiness is not None:
|
||||
if version_lt(version, '1.20'):
|
||||
raise host_config_version_error('mem_swappiness', '1.20')
|
||||
|
|
@ -719,12 +720,25 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
|
|||
if dns is not None:
|
||||
host_config['Dns'] = dns
|
||||
|
||||
if dns_opt is not None:
|
||||
if version_lt(version, '1.21'):
|
||||
raise host_config_version_error('dns_opt', '1.21')
|
||||
|
||||
host_config['DnsOptions'] = dns_opt
|
||||
|
||||
if security_opt is not None:
|
||||
if not isinstance(security_opt, list):
|
||||
raise host_config_type_error('security_opt', security_opt, 'list')
|
||||
|
||||
host_config['SecurityOpt'] = security_opt
|
||||
|
||||
if sysctls:
|
||||
if not isinstance(sysctls, dict):
|
||||
raise host_config_type_error('sysctls', sysctls, 'dict')
|
||||
host_config['Sysctls'] = {}
|
||||
for k, v in six.iteritems(sysctls):
|
||||
host_config['Sysctls'][k] = six.text_type(v)
|
||||
|
||||
if volumes_from is not None:
|
||||
if isinstance(volumes_from, six.string_types):
|
||||
volumes_from = volumes_from.split(',')
|
||||
|
|
@ -796,6 +810,21 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
|
|||
|
||||
host_config['CpuPeriod'] = cpu_period
|
||||
|
||||
if cpu_shares:
|
||||
if version_lt(version, '1.18'):
|
||||
raise host_config_version_error('cpu_shares', '1.18')
|
||||
|
||||
if not isinstance(cpu_shares, int):
|
||||
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
|
||||
|
||||
host_config['CpuShares'] = cpu_shares
|
||||
|
||||
if cpuset_cpus:
|
||||
if version_lt(version, '1.18'):
|
||||
raise host_config_version_error('cpuset_cpus', '1.18')
|
||||
|
||||
host_config['CpuSetCpus'] = cpuset_cpus
|
||||
|
||||
if blkio_weight:
|
||||
if not isinstance(blkio_weight, int):
|
||||
raise host_config_type_error('blkio_weight', blkio_weight, 'int')
|
||||
|
|
@ -853,6 +882,21 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
|
|||
raise host_config_version_error('tmpfs', '1.22')
|
||||
host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
|
||||
|
||||
if userns_mode:
|
||||
if version_lt(version, '1.23'):
|
||||
raise host_config_version_error('userns_mode', '1.23')
|
||||
|
||||
if userns_mode != "host":
|
||||
raise host_config_value_error("userns_mode", userns_mode)
|
||||
host_config['UsernsMode'] = userns_mode
|
||||
|
||||
if pids_limit:
|
||||
if not isinstance(pids_limit, int):
|
||||
raise host_config_type_error('pids_limit', pids_limit, 'int')
|
||||
if version_lt(version, '1.23'):
|
||||
raise host_config_version_error('pids_limit', '1.23')
|
||||
host_config["PidsLimit"] = pids_limit
|
||||
|
||||
return host_config
|
||||
|
||||
|
||||
|
|
@ -873,7 +917,8 @@ def create_networking_config(endpoints_config=None):
|
|||
|
||||
|
||||
def create_endpoint_config(version, aliases=None, links=None,
|
||||
ipv4_address=None, ipv6_address=None):
|
||||
ipv4_address=None, ipv6_address=None,
|
||||
link_local_ips=None):
|
||||
if version_lt(version, '1.22'):
|
||||
raise errors.InvalidVersion(
|
||||
'Endpoint config is not supported for API version < 1.22'
|
||||
|
|
@ -893,6 +938,13 @@ def create_endpoint_config(version, aliases=None, links=None,
|
|||
if ipv6_address:
|
||||
ipam_config['IPv6Address'] = ipv6_address
|
||||
|
||||
if link_local_ips is not None:
|
||||
if version_lt(version, '1.24'):
|
||||
raise errors.InvalidVersion(
|
||||
'link_local_ips is not supported for API version < 1.24'
|
||||
)
|
||||
ipam_config['LinkLocalIPs'] = link_local_ips
|
||||
|
||||
if ipam_config:
|
||||
endpoint_config['IPAMConfig'] = ipam_config
|
||||
|
||||
|
|
@ -934,7 +986,7 @@ def format_environment(environment):
|
|||
def format_env(key, value):
|
||||
if value is None:
|
||||
return key
|
||||
return '{key}={value}'.format(key=key, value=value)
|
||||
return u'{key}={value}'.format(key=key, value=value)
|
||||
return [format_env(*var) for var in six.iteritems(environment)]
|
||||
|
||||
|
||||
|
|
@ -960,6 +1012,14 @@ def create_container_config(
|
|||
'labels were only introduced in API version 1.18'
|
||||
)
|
||||
|
||||
if cpuset is not None or cpu_shares is not None:
|
||||
if version_gte(version, '1.18'):
|
||||
warnings.warn(
|
||||
'The cpuset_cpus and cpu_shares options have been moved to '
|
||||
'host_config in API version 1.18, and will be removed',
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
if stop_signal is not None and compare_version('1.21', version) < 0:
|
||||
raise errors.InvalidVersion(
|
||||
'stop_signal was only introduced in API version 1.21'
|
||||
|
|
@ -989,6 +1049,7 @@ def create_container_config(
|
|||
|
||||
if mem_limit is not None:
|
||||
mem_limit = parse_bytes(mem_limit)
|
||||
|
||||
if memswap_limit is not None:
|
||||
memswap_limit = parse_bytes(memswap_limit)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
version = "1.9.0"
|
||||
version = "1.10.0"
|
||||
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
mkdocs==0.9
|
||||
mkdocs==0.15.3
|
||||
|
|
|
|||
123
docs/api.md
123
docs/api.md
|
|
@ -66,6 +66,7 @@ correct value (e.g `gzip`).
|
|||
* pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
* forcerm (bool): Always remove intermediate containers, even after unsuccessful builds
|
||||
* dockerfile (str): path within the build context to the Dockerfile
|
||||
* buildargs (dict): A dictionary of build arguments
|
||||
* container_limits (dict): A dictionary of limits applied to each container
|
||||
created by the build process. Valid keys:
|
||||
- memory (int): set memory limit for build
|
||||
|
|
@ -180,6 +181,16 @@ Connect a container to a network.
|
|||
|
||||
* container (str): container-id/name to be connected to the network
|
||||
* net_id (str): network id
|
||||
* aliases (list): A list of aliases for this endpoint. Names in that list can
|
||||
be used within the network to reach the container. Defaults to `None`.
|
||||
* links (list): A list of links for this endpoint. Containers declared in this
|
||||
list will be [linked](https://docs.docker.com/engine/userguide/networking/work-with-networks/#linking-containers-in-user-defined-networks)
|
||||
to this container. Defaults to `None`.
|
||||
* ipv4_address (str): The IP address of this container on the network,
|
||||
using the IPv4 protocol. Defaults to `None`.
|
||||
* ipv6_address (str): The IP address of this container on the network,
|
||||
using the IPv6 protocol. Defaults to `None`.
|
||||
* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
|
||||
|
||||
## copy
|
||||
Identical to the `docker cp` command. Get files/folders from the container.
|
||||
|
|
@ -228,13 +239,13 @@ where unit = b, k, m, or g)
|
|||
* environment (dict or list): A dictionary or a list of strings in the
|
||||
following format `["PASSWORD=xxx"]` or `{"PASSWORD": "xxx"}`.
|
||||
* dns (list): DNS name servers
|
||||
* dns_opt (list): Additional options to be added to the container's `resolv.conf` file
|
||||
* volumes (str or list):
|
||||
* volumes_from (str or list): List of container names or Ids to get volumes
|
||||
from. Optionally a single string joining container id's with commas
|
||||
* network_disabled (bool): Disable networking
|
||||
* name (str): A name for the container
|
||||
* entrypoint (str or list): An entrypoint
|
||||
* cpu_shares (int): CPU shares (relative weight)
|
||||
* working_dir (str): Path to the working directory
|
||||
* domainname (str or list): Set custom DNS search domains
|
||||
* memswap_limit (int):
|
||||
|
|
@ -272,25 +283,33 @@ The utility can be used as follows:
|
|||
```python
|
||||
>>> import docker.utils
|
||||
>>> my_envs = docker.utils.parse_env_file('/path/to/file')
|
||||
>>> docker.utils.create_container_config('1.18', '_mongodb', 'foobar', environment=my_envs)
|
||||
>>> client.create_container('myimage', 'command', environment=my_envs)
|
||||
```
|
||||
|
||||
You can now use this with 'environment' for `create_container`.
|
||||
|
||||
|
||||
## create_network
|
||||
|
||||
Create a network, similar to the `docker network create` command.
|
||||
Create a network, similar to the `docker network create` command. See the
|
||||
[networks documentation](networks.md) for details.
|
||||
|
||||
**Params**:
|
||||
|
||||
* name (str): Name of the network
|
||||
* driver (str): Name of the driver used to create the network
|
||||
|
||||
* options (dict): Driver options as a key-value dictionary
|
||||
* ipam (dict): Optional custom IP scheme for the network
|
||||
* check_duplicate (bool): Request daemon to check for networks with same name.
|
||||
Default: `True`.
|
||||
* internal (bool): Restrict external access to the network. Default `False`.
|
||||
* labels (dict): Map of labels to set on the network. Default `None`.
|
||||
* enable_ipv6 (bool): Enable IPv6 on the network. Default `False`.
|
||||
|
||||
**Returns** (dict): The created network reference object
|
||||
|
||||
## create_service
|
||||
|
||||
Create a service, similar to the `docker service create` command. See the
|
||||
[services documentation](services.md#Clientcreate_service) for details.
|
||||
|
||||
## create_volume
|
||||
|
||||
Create and register a named volume
|
||||
|
|
@ -300,6 +319,7 @@ Create and register a named volume
|
|||
* name (str): Name of the volume
|
||||
* driver (str): Name of the driver used to create the volume
|
||||
* driver_opts (dict): Driver options as a key-value dictionary
|
||||
* labels (dict): Labels to set on the volume
|
||||
|
||||
**Returns** (dict): The created volume reference object
|
||||
|
||||
|
|
@ -307,10 +327,16 @@ Create and register a named volume
|
|||
>>> from docker import Client
|
||||
>>> cli = Client()
|
||||
>>> volume = cli.create_volume(
|
||||
name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'}
|
||||
name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
labels={"key": "value"}
|
||||
)
|
||||
>>> print(volume)
|
||||
{u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'}
|
||||
{
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Driver': u'local',
|
||||
u'Name': u'foobar',
|
||||
u'Labels': {u'key': u'value'}
|
||||
}
|
||||
```
|
||||
|
||||
## diff
|
||||
|
|
@ -329,6 +355,8 @@ Inspect changes on a container's filesystem.
|
|||
|
||||
* container (str): container-id/name to be disconnected from a network
|
||||
* net_id (str): network id
|
||||
* force (bool): Force the container to disconnect from a network.
|
||||
Default: `False`
|
||||
|
||||
## events
|
||||
|
||||
|
|
@ -588,6 +616,11 @@ Display system-wide information. Identical to the `docker info` command.
|
|||
'SwapLimit': 1}
|
||||
```
|
||||
|
||||
## init_swarm
|
||||
|
||||
Initialize a new Swarm using the current connected engine as the first node.
|
||||
See the [Swarm documentation](swarm.md#clientinit_swarm).
|
||||
|
||||
## insert
|
||||
*DEPRECATED*
|
||||
|
||||
|
|
@ -623,6 +656,31 @@ Retrieve network info by id.
|
|||
|
||||
**Returns** (dict): Network information dictionary
|
||||
|
||||
## inspect_node
|
||||
|
||||
Retrieve low-level information about a Swarm node.
|
||||
See the [Swarm documentation](swarm.md#clientinspect_node).
|
||||
|
||||
## inspect_service
|
||||
|
||||
Create a service, similar to the `docker service create` command. See the
|
||||
[services documentation](services.md#clientinspect_service) for details.
|
||||
|
||||
## inspect_swarm
|
||||
|
||||
Retrieve information about the current Swarm.
|
||||
See the [Swarm documentation](swarm.md#clientinspect_swarm).
|
||||
|
||||
## inspect_task
|
||||
|
||||
Retrieve information about a task.
|
||||
|
||||
**Params**:
|
||||
|
||||
* task (str): Task identifier
|
||||
|
||||
**Returns** (dict): Task information dictionary
|
||||
|
||||
## inspect_volume
|
||||
|
||||
Retrieve volume info by name.
|
||||
|
|
@ -638,6 +696,11 @@ Retrieve volume info by name.
|
|||
{u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'}
|
||||
```
|
||||
|
||||
## join_swarm
|
||||
|
||||
Join an existing Swarm.
|
||||
See the [Swarm documentation](swarm.md#clientjoin_swarm).
|
||||
|
||||
## kill
|
||||
|
||||
Kill a container or send a signal to a container.
|
||||
|
|
@ -647,6 +710,11 @@ Kill a container or send a signal to a container.
|
|||
* container (str): The container to kill
|
||||
* signal (str or int): The signal to send. Defaults to `SIGKILL`
|
||||
|
||||
## leave_swarm
|
||||
|
||||
Leave the current Swarm.
|
||||
See the [Swarm documentation](swarm.md#clientleave_swarm).
|
||||
|
||||
## load_image
|
||||
|
||||
Load an image that was previously saved using `Client.get_image`
|
||||
|
|
@ -704,6 +772,10 @@ The above are combined to create a filters dict.
|
|||
|
||||
**Returns** (dict): List of network objects.
|
||||
|
||||
## nodes
|
||||
|
||||
List Swarm nodes. See the [Swarm documentation](swarm.md#clientnodes).
|
||||
|
||||
## pause
|
||||
|
||||
Pauses all processes within a container.
|
||||
|
|
@ -783,6 +855,8 @@ command.
|
|||
* tag (str): An optional tag to push
|
||||
* stream (bool): Stream the output as a blocking generator
|
||||
* insecure_registry (bool): Use `http://` to connect to the registry
|
||||
* auth_config (dict): Override the credentials that Client.login has set for this request
|
||||
`auth_config` should contain the `username` and `password` keys to be valid.
|
||||
|
||||
**Returns** (generator or str): The output of the upload
|
||||
|
||||
|
|
@ -846,6 +920,11 @@ Remove a network. Similar to the `docker network rm` command.
|
|||
|
||||
Failure to remove will raise a `docker.errors.APIError` exception.
|
||||
|
||||
## remove_service
|
||||
|
||||
Remove a service, similar to the `docker service rm` command. See the
|
||||
[services documentation](services.md#clientremove_service) for details.
|
||||
|
||||
## remove_volume
|
||||
|
||||
Remove a volume. Similar to the `docker volume rm` command.
|
||||
|
|
@ -914,6 +993,11 @@ Identical to the `docker search` command.
|
|||
...
|
||||
```
|
||||
|
||||
## services
|
||||
|
||||
List services, similar to the `docker service ls` command. See the
|
||||
[services documentation](services.md#clientservices) for details.
|
||||
|
||||
## start
|
||||
|
||||
Similar to the `docker start` command, but doesn't support attach options. Use
|
||||
|
|
@ -986,6 +1070,17 @@ Tag an image into a repository. Identical to the `docker tag` command.
|
|||
|
||||
**Returns** (bool): True if successful
|
||||
|
||||
## tasks
|
||||
|
||||
Retrieve a list of tasks.
|
||||
|
||||
**Params**:
|
||||
|
||||
* filters (dict): A map of filters to process on the tasks list. Valid filters:
|
||||
`id`, `name`, `service`, `node`, `label` and `desired-state`.
|
||||
|
||||
**Returns** (list): List of task dictionaries.
|
||||
|
||||
## top
|
||||
Display the running processes of a container.
|
||||
|
||||
|
|
@ -1034,6 +1129,16 @@ Update resource configs of one or more containers.
|
|||
|
||||
**Returns** (dict): Dictionary containing a `Warnings` key.
|
||||
|
||||
## update_service
|
||||
|
||||
Update a service, similar to the `docker service update` command. See the
|
||||
[services documentation](services.md#clientupdate_service) for details.
|
||||
|
||||
## update_swarm
|
||||
|
||||
Update the current Swarm.
|
||||
See the [Swarm documentation](swarm.md#clientupdate_swarm).
|
||||
|
||||
## version
|
||||
|
||||
Nearly identical to the `docker version` command.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,49 @@
|
|||
Change Log
|
||||
==========
|
||||
|
||||
1.10.0
|
||||
------
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.0+is%3Aclosed)
|
||||
|
||||
### Features
|
||||
|
||||
* Added swarm mode and service management methods. See the documentation for
|
||||
details.
|
||||
* Added support for IPv6 Docker host addresses in the `Client` constructor.
|
||||
* Added (read-only) support for the Docker credentials store.
|
||||
* Added support for custom `auth_config` in `Client.push`.
|
||||
* Added support for `labels` in `Client.create_volume`.
|
||||
* Added support for `labels` and `enable_ipv6` in `Client.create_network`.
|
||||
* Added support for `force` param in
|
||||
`Client.disconnect_container_from_network`.
|
||||
* Added support for `pids_limit`, `sysctls`, `userns_mode`, `cpuset_cpus`,
|
||||
`cpu_shares`, `mem_reservation` and `kernel_memory` parameters in
|
||||
`Client.create_host_config`.
|
||||
* Added support for `link_local_ips` in `create_endpoint_config`.
|
||||
* Added support for a `changes` parameter in `Client.import_image`.
|
||||
* Added support for a `version` parameter in `Client.from_env`.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
* Fixed a bug where `Client.build` would crash if the `config.json` file
|
||||
contained a `HttpHeaders` entry.
|
||||
* Fixed a bug where passing `decode=True` in some streaming methods would
|
||||
crash when the daemon's response had an unexpected format.
|
||||
* Fixed a bug where `environment` values with unicode characters weren't
|
||||
handled properly in `create_container`.
|
||||
* Fixed a bug where using the `npipe` protocol would sometimes break with
|
||||
`ValueError: buffer size must be strictly positive`.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
* Fixed an issue where URL-quoting in docker-py was inconsistent with the
|
||||
quoting done by the Docker CLI client.
|
||||
* The client now sends TCP upgrade headers to hint potential proxies about
|
||||
connection hijacking.
|
||||
* The client now defaults to using the `npipe` protocol on Windows.
|
||||
|
||||
|
||||
1.9.0
|
||||
-----
|
||||
|
||||
|
|
|
|||
|
|
@ -109,11 +109,14 @@ for example:
|
|||
* cpu_group (int): The length of a CPU period in microseconds.
|
||||
* cpu_period (int): Microseconds of CPU time that the container can get in a
|
||||
CPU period.
|
||||
* blkio_weight: Block IO weight (relative weight), accepts a weight value between 10 and 1000.
|
||||
* cpu_shares (int): CPU shares (relative weight)
|
||||
* cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1)
|
||||
* blkio_weight: Block IO weight (relative weight), accepts a weight value
|
||||
between 10 and 1000.
|
||||
* blkio_weight_device: Block IO weight (relative device weight) in the form of:
|
||||
`[{"Path": "device_path", "Weight": weight}]`
|
||||
* device_read_bps: Limit read rate (bytes per second) from a device in the form of:
|
||||
`[{"Path": "device_path", "Rate": rate}]`
|
||||
* device_read_bps: Limit read rate (bytes per second) from a device in the
|
||||
form of: `[{"Path": "device_path", "Rate": rate}]`
|
||||
* device_write_bps: Limit write rate (bytes per second) from a device.
|
||||
* device_read_iops: Limit read rate (IO per second) from a device.
|
||||
* device_write_iops: Limit write rate (IO per second) from a device.
|
||||
|
|
@ -121,8 +124,12 @@ for example:
|
|||
container process will run as.
|
||||
* devices (list): Host device bindings. See [host devices](host-devices.md)
|
||||
for more information.
|
||||
* tmpfs: Temporary filesystems to mouunt. See [Using tmpfs](tmpfs.md) for more
|
||||
* tmpfs: Temporary filesystems to mount. See [Using tmpfs](tmpfs.md) for more
|
||||
information.
|
||||
* sysctls (dict): Kernel parameters to set in the container.
|
||||
* userns_mode (str): Sets the user namespace mode for the container when user
|
||||
namespace remapping option is enabled. Supported values are: `host`
|
||||
* pids_limit (int): Tune a container’s pids limit. Set -1 for unlimited.
|
||||
|
||||
**Returns** (dict) HostConfig dictionary
|
||||
|
||||
|
|
|
|||
|
|
@ -107,6 +107,7 @@ Create an endpoint config dictionary to be used with
|
|||
using the IPv4 protocol. Defaults to `None`.
|
||||
* ipv6_address (str): The IP address of this container on the network,
|
||||
using the IPv6 protocol. Defaults to `None`.
|
||||
* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
|
||||
|
||||
**Returns** An endpoint config dictionary.
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,254 @@
|
|||
# Swarm services
|
||||
|
||||
Starting with Engine version 1.12 (API 1.24), it is possible to manage services
|
||||
using the Docker Engine API. Note that the engine needs to be part of a
|
||||
[Swarm cluster](swarm.md) before you can use the service-related methods.
|
||||
|
||||
## Creating a service
|
||||
|
||||
The `Client.create_service` method lets you create a new service inside the
|
||||
cluster. The method takes several arguments, `task_template` being mandatory.
|
||||
This dictionary of values is most easily produced by instantiating a
|
||||
`TaskTemplate` object.
|
||||
|
||||
```python
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
image='busybox', command=['echo', 'hello']
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
service_id = client.create_service(task_tmpl, name=name)
|
||||
```
|
||||
|
||||
## Listing services
|
||||
|
||||
List all existing services using the `Client.services` method.
|
||||
|
||||
```python
|
||||
client.services(filters={'name': 'mysql'})
|
||||
```
|
||||
|
||||
## Retrieving service configuration
|
||||
|
||||
To retrieve detailed information and configuration for a specific service, you
|
||||
may use the `Client.inspect_service` method using the service's ID or name.
|
||||
|
||||
```python
|
||||
client.inspect_service(service='my_service_name')
|
||||
```
|
||||
|
||||
## Updating service configuration
|
||||
|
||||
The `Client.update_service` method lets you update a service's configuration.
|
||||
The mandatory `version` argument (used to prevent concurrent writes) can be
|
||||
retrieved using `Client.inspect_service`.
|
||||
|
||||
```python
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
image='busybox', command=['echo', 'hello world']
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
|
||||
svc_version = client.inspect_service(svc_id)['Version']['Index']
|
||||
|
||||
client.update_service(
|
||||
svc_id, svc_version, name='new_name', task_template=task_tmpl
|
||||
)
|
||||
```
|
||||
|
||||
## Removing a service
|
||||
|
||||
A service may be removed simply using the `Client.remove_service` method.
|
||||
Either the service name or service ID can be used as argument.
|
||||
|
||||
```python
|
||||
client.remove_service('my_service_name')
|
||||
```
|
||||
|
||||
## Service API documentation
|
||||
|
||||
### Client.create_service
|
||||
|
||||
Create a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* task_template (dict): Specification of the task to start as part of the new
|
||||
service. See the [TaskTemplate class](#TaskTemplate) for details.
|
||||
* name (string): User-defined name for the service. Optional.
|
||||
* labels (dict): A map of labels to associate with the service. Optional.
|
||||
* mode (string): Scheduling mode for the service (`replicated` or `global`).
|
||||
Defaults to `replicated`.
|
||||
* update_config (dict): Specification for the update strategy of the service.
|
||||
See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`.
|
||||
* networks (list): List of network names or IDs to attach the service to.
|
||||
Default: `None`.
|
||||
* endpoint_config (dict): Properties that can be configured to access and load
|
||||
balance a service. Default: `None`.
|
||||
|
||||
**Returns:** A dictionary containing an `ID` key for the newly created service.
|
||||
|
||||
### Client.inspect_service
|
||||
|
||||
Return information on a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* service (string): A service identifier (either its name or service ID)
|
||||
|
||||
**Returns:** `True` if successful. Raises an `APIError` otherwise.
|
||||
|
||||
### Client.remove_service
|
||||
|
||||
Stop and remove a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* service (string): A service identifier (either its name or service ID)
|
||||
|
||||
**Returns:** `True` if successful. Raises an `APIError` otherwise.
|
||||
|
||||
### Client.services
|
||||
|
||||
List services.
|
||||
|
||||
**Params:**
|
||||
|
||||
* filters (dict): Filters to process on the nodes list. Valid filters:
|
||||
`id` and `name`. Default: `None`.
|
||||
|
||||
**Returns:** A list of dictionaries containing data about each service.
|
||||
|
||||
### Client.update_service
|
||||
|
||||
Update a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* service (string): A service identifier (either its name or service ID).
|
||||
* version (int): The version number of the service object being updated. This
|
||||
is required to avoid conflicting writes.
|
||||
* task_template (dict): Specification of the updated task to start as part of
|
||||
the service. See the [TaskTemplate class](#TaskTemplate) for details.
|
||||
* name (string): New name for the service. Optional.
|
||||
* labels (dict): A map of labels to associate with the service. Optional.
|
||||
* mode (string): Scheduling mode for the service (`replicated` or `global`).
|
||||
Defaults to `replicated`.
|
||||
* update_config (dict): Specification for the update strategy of the service.
|
||||
See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`.
|
||||
* networks (list): List of network names or IDs to attach the service to.
|
||||
Default: `None`.
|
||||
* endpoint_config (dict): Properties that can be configured to access and load
|
||||
balance a service. Default: `None`.
|
||||
|
||||
**Returns:** `True` if successful. Raises an `APIError` otherwise.
|
||||
|
||||
### Configuration objects (`docker.types`)
|
||||
|
||||
#### ContainerSpec
|
||||
|
||||
A `ContainerSpec` object describes the behavior of containers that are part
|
||||
of a task, and is used when declaring a `TaskTemplate`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* image (string): The image name to use for the container.
|
||||
* command (string or list): The command to be run in the image.
|
||||
* args (list): Arguments to the command.
|
||||
* env (dict): Environment variables.
|
||||
* dir (string): The working directory for commands to run in.
|
||||
* user (string): The user inside the container.
|
||||
* labels (dict): A map of labels to associate with the service.
|
||||
* mounts (list): A list of specifications for mounts to be added to containers
|
||||
created as part of the service. See the [Mount class](#Mount) for details.
|
||||
* stop_grace_period (int): Amount of time to wait for the container to
|
||||
terminate before forcefully killing it.
|
||||
|
||||
#### DriverConfig
|
||||
|
||||
A `LogDriver` object indicates which driver to use, as well as its
|
||||
configuration. It can be used for the `log_driver` in a `ContainerSpec`,
|
||||
and for the `driver_config` in a volume `Mount`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* name (string): Name of the logging driver to use.
|
||||
* options (dict): Driver-specific options. Default: `None`.
|
||||
|
||||
#### Mount
|
||||
|
||||
A `Mount` object describes a mounted folder's configuration inside a
|
||||
container. A list of `Mount`s would be used as part of a `ContainerSpec`.
|
||||
|
||||
* target (string): Container path.
|
||||
* source (string): Mount source (e.g. a volume name or a host path).
|
||||
* type (string): The mount type (`bind` or `volume`). Default: `volume`.
|
||||
* read_only (bool): Whether the mount should be read-only.
|
||||
* propagation (string): A propagation mode with the value `[r]private`,
|
||||
`[r]shared`, or `[r]slave`. Only valid for the `bind` type.
|
||||
* no_copy (bool): False if the volume should be populated with the data from
|
||||
the target. Default: `False`. Only valid for the `volume` type.
|
||||
* labels (dict): User-defined name and labels for the volume. Only valid for
|
||||
the `volume` type.
|
||||
* driver_config (dict): Volume driver configuration.
|
||||
See the [DriverConfig class](#DriverConfig) for details. Only valid for the
|
||||
`volume` type.
|
||||
|
||||
#### Resources
|
||||
|
||||
A `Resources` object configures resource allocation for containers when
|
||||
made part of a `ContainerSpec`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* cpu_limit (int): CPU limit in units of 10^9 CPU shares.
|
||||
* mem_limit (int): Memory limit in Bytes.
|
||||
* cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
|
||||
* mem_reservation (int): Memory reservation in Bytes.
|
||||
|
||||
#### RestartPolicy
|
||||
|
||||
A `RestartPolicy` object is used when creating a `ContainerSpec`. It dictates
|
||||
whether a container should restart after stopping or failing.
|
||||
|
||||
* condition (string): Condition for restart (`none`, `on-failure`, or `any`).
|
||||
Default: `none`.
|
||||
* delay (int): Delay between restart attempts. Default: 0
|
||||
* attempts (int): Maximum attempts to restart a given container before giving
|
||||
up. Default value is 0, which is ignored.
|
||||
* window (int): Time window used to evaluate the restart policy. Default value
|
||||
is 0, which is unbounded.
|
||||
|
||||
|
||||
#### TaskTemplate
|
||||
|
||||
A `TaskTemplate` object can be used to describe the task specification to be
|
||||
used when creating or updating a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* container_spec (dict): Container settings for containers started as part of
|
||||
this task. See the [ContainerSpec class](#ContainerSpec) for details.
|
||||
* log_driver (dict): Log configuration for containers created as part of the
|
||||
service. See the [DriverConfig class](#DriverConfig) for details.
|
||||
* resources (dict): Resource requirements which apply to each individual
|
||||
container created as part of the service. See the
|
||||
[Resources class](#Resources) for details.
|
||||
* restart_policy (dict): Specification for the restart policy which applies
|
||||
to containers created as part of this service. See the
|
||||
[RestartPolicy class](#RestartPolicy) for details.
|
||||
* placement (list): A list of constraints.
|
||||
|
||||
|
||||
#### UpdateConfig
|
||||
|
||||
An `UpdateConfig` object can be used to specify the way container updates
|
||||
should be performed by a service.
|
||||
|
||||
**Params:**
|
||||
|
||||
* parallelism (int): Maximum number of tasks to be updated in one iteration
|
||||
(0 means unlimited parallelism). Default: 0.
|
||||
* delay (int): Amount of time between updates.
|
||||
* failure_action (string): Action to take if an updated task fails to run, or
|
||||
stops running during the update. Acceptable values are `continue` and
|
||||
`pause`. Default: `continue`
|
||||
|
|
@ -0,0 +1,250 @@
|
|||
# Swarm management
|
||||
|
||||
Starting with Engine version 1.12 (API 1.24), it is possible to manage the
|
||||
engine's associated Swarm cluster using the API.
|
||||
|
||||
## Initializing a new Swarm
|
||||
|
||||
You can initialize a new Swarm by calling `Client.init_swarm`. An advertising
|
||||
address needs to be provided, usually simply by indicating which network
|
||||
interface needs to be used. Advanced options are provided using the
|
||||
`swarm_spec` parameter, which can easily be created using
|
||||
`Client.create_swarm_spec`.
|
||||
|
||||
```python
|
||||
spec = client.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
client.init_swarm(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000', force_new_cluster=False,
|
||||
swarm_spec=spec
|
||||
)
|
||||
```
|
||||
|
||||
## Joining an existing Swarm
|
||||
|
||||
If you're looking to have the engine your client is connected to join an
|
||||
existing Swarm, this can be accomplished by using the `Client.join_swarm`
|
||||
method. You will need to provide a list of at least one remote address
|
||||
corresponding to other machines already part of the swarm as well as the
|
||||
`join_token`. In most cases, a `listen_addr` and `advertise_addr` for your
|
||||
node are also required.
|
||||
|
||||
```python
|
||||
client.join_swarm(
|
||||
remote_addrs=['192.168.14.221:2377'], join_token='SWMTKN-1-redacted',
|
||||
listen_addr='0.0.0.0:5000', advertise_addr='eth0:5000'
|
||||
)
|
||||
```
|
||||
|
||||
## Leaving the Swarm
|
||||
|
||||
To leave the swarm you are currently a member of, simply use
|
||||
`Client.leave_swarm`. Note that if your engine is the Swarm's manager,
|
||||
you will need to specify `force=True` to be able to leave.
|
||||
|
||||
```python
|
||||
client.leave_swarm(force=False)
|
||||
```
|
||||
|
||||
## Retrieving Swarm status
|
||||
|
||||
You can retrieve information about your current Swarm status by calling
|
||||
`Client.inspect_swarm`. This method takes no arguments.
|
||||
|
||||
```python
|
||||
client.inspect_swarm()
|
||||
```
|
||||
|
||||
## Listing Swarm nodes
|
||||
|
||||
List all nodes that are part of the current Swarm using `Client.nodes`.
|
||||
The `filters` argument allows to filter the results.
|
||||
|
||||
```python
|
||||
client.nodes(filters={'role': 'manager'})
|
||||
```
|
||||
|
||||
## Swarm API documentation
|
||||
|
||||
### Client.init_swarm
|
||||
|
||||
Initialize a new Swarm using the current connected engine as the first node.
|
||||
|
||||
**Params:**
|
||||
|
||||
* advertise_addr (string): Externally reachable address advertised to other
|
||||
nodes. This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the port number from the listen
|
||||
address is used. If `advertise_addr` is not specified, it will be
|
||||
automatically detected when possible. Default: None
|
||||
* listen_addr (string): Listen address used for inter-manager communication,
|
||||
as well as determining the networking interface used for the VXLAN Tunnel
|
||||
Endpoint (VTEP). This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the default swarm listening port
|
||||
is used. Default: '0.0.0.0:2377'
|
||||
* force_new_cluster (bool): Force creating a new Swarm, even if already part of
|
||||
one. Default: False
|
||||
* swarm_spec (dict): Configuration settings of the new Swarm. Use
|
||||
`Client.create_swarm_spec` to generate a valid configuration. Default: None
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
#### Client.create_swarm_spec
|
||||
|
||||
Create a `docker.types.SwarmSpec` instance that can be used as the `swarm_spec`
|
||||
argument in `Client.init_swarm`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* task_history_retention_limit (int): Maximum number of tasks history stored.
|
||||
* snapshot_interval (int): Number of logs entries between snapshot.
|
||||
* keep_old_snapshots (int): Number of snapshots to keep beyond the current
|
||||
snapshot.
|
||||
* log_entries_for_slow_followers (int): Number of log entries to keep around
|
||||
to sync up slow followers after a snapshot is created.
|
||||
* heartbeat_tick (int): Amount of ticks (in seconds) between each heartbeat.
|
||||
* election_tick (int): Amount of ticks (in seconds) needed without a leader to
|
||||
trigger a new election.
|
||||
* dispatcher_heartbeat_period (int): The delay for an agent to send a
|
||||
heartbeat to the dispatcher.
|
||||
* node_cert_expiry (int): Automatic expiry for nodes certificates.
|
||||
* external_ca (dict): Configuration for forwarding signing requests to an
|
||||
external certificate authority. Use `docker.types.SwarmExternalCA`.
|
||||
* name (string): Swarm's name
|
||||
|
||||
**Returns:** `docker.types.SwarmSpec` instance.
|
||||
|
||||
#### docker.types.SwarmExternalCA
|
||||
|
||||
Create a configuration dictionary for the `external_ca` argument in a
|
||||
`SwarmSpec`.
|
||||
|
||||
**Params:**
|
||||
|
||||
* protocol (string): Protocol for communication with the external CA (currently
|
||||
only “cfssl” is supported).
|
||||
* url (string): URL where certificate signing requests should be sent.
|
||||
* options (dict): An object with key/value pairs that are interpreted as
|
||||
protocol-specific options for the external CA driver.
|
||||
|
||||
### Client.inspect_node
|
||||
|
||||
Retrieve low-level information about a Swarm node
|
||||
|
||||
**Params:**
|
||||
|
||||
* node_id (string): ID of the node to be inspected.
|
||||
|
||||
**Returns:** A dictionary containing data about this node. See sample below.
|
||||
|
||||
```python
|
||||
{u'CreatedAt': u'2016-08-11T23:28:39.695834296Z',
|
||||
u'Description': {u'Engine': {u'EngineVersion': u'1.12.0',
|
||||
u'Plugins': [{u'Name': u'bridge', u'Type': u'Network'},
|
||||
{u'Name': u'host', u'Type': u'Network'},
|
||||
{u'Name': u'null', u'Type': u'Network'},
|
||||
{u'Name': u'overlay', u'Type': u'Network'},
|
||||
{u'Name': u'local', u'Type': u'Volume'}]},
|
||||
u'Hostname': u'dockerserv-1.local.net',
|
||||
u'Platform': {u'Architecture': u'x86_64', u'OS': u'linux'},
|
||||
u'Resources': {u'MemoryBytes': 8052109312, u'NanoCPUs': 4000000000}},
|
||||
u'ID': u'1kqami616p23dz4hd7km35w63',
|
||||
u'ManagerStatus': {u'Addr': u'10.0.131.127:2377',
|
||||
u'Leader': True,
|
||||
u'Reachability': u'reachable'},
|
||||
u'Spec': {u'Availability': u'active', u'Role': u'manager'},
|
||||
u'Status': {u'State': u'ready'},
|
||||
u'UpdatedAt': u'2016-08-11T23:28:39.979829529Z',
|
||||
u'Version': {u'Index': 9}}
|
||||
```
|
||||
|
||||
### Client.inspect_swarm
|
||||
|
||||
Retrieve information about the current Swarm.
|
||||
|
||||
**Returns:** A dictionary containing information about the Swarm. See sample
|
||||
below.
|
||||
|
||||
```python
|
||||
{u'CreatedAt': u'2016-08-04T21:26:18.779800579Z',
|
||||
u'ID': u'8hk6e9wh4iq214qtbgvbp84a9',
|
||||
u'JoinTokens': {u'Manager': u'SWMTKN-1-redacted-1',
|
||||
u'Worker': u'SWMTKN-1-redacted-2'},
|
||||
u'Spec': {u'CAConfig': {u'NodeCertExpiry': 7776000000000000},
|
||||
u'Dispatcher': {u'HeartbeatPeriod': 5000000000},
|
||||
u'Name': u'default',
|
||||
u'Orchestration': {u'TaskHistoryRetentionLimit': 10},
|
||||
u'Raft': {u'ElectionTick': 3,
|
||||
u'HeartbeatTick': 1,
|
||||
u'LogEntriesForSlowFollowers': 500,
|
||||
u'SnapshotInterval': 10000},
|
||||
u'TaskDefaults': {}},
|
||||
u'UpdatedAt': u'2016-08-04T21:26:19.391623265Z',
|
||||
u'Version': {u'Index': 11}}
|
||||
```
|
||||
|
||||
### Client.join_swarm
|
||||
|
||||
Join an existing Swarm.
|
||||
|
||||
**Params:**
|
||||
|
||||
* remote_addrs (list): Addresses of one or more manager nodes already
|
||||
participating in the Swarm to join.
|
||||
* join_token (string): Secret token for joining this Swarm.
|
||||
* listen_addr (string): Listen address used for inter-manager communication
|
||||
if the node gets promoted to manager, as well as determining the networking
|
||||
interface used for the VXLAN Tunnel Endpoint (VTEP). Default: `None`
|
||||
* advertise_addr (string): Externally reachable address advertised to other
|
||||
nodes. This can either be an address/port combination in the form
|
||||
`192.168.1.1:4567`, or an interface followed by a port number, like
|
||||
`eth0:4567`. If the port number is omitted, the port number from the listen
|
||||
address is used. If AdvertiseAddr is not specified, it will be automatically
|
||||
detected when possible. Default: `None`
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
### Client.leave_swarm
|
||||
|
||||
Leave a Swarm.
|
||||
|
||||
**Params:**
|
||||
|
||||
* force (bool): Leave the Swarm even if this node is a manager.
|
||||
Default: `False`
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
||||
### Client.nodes
|
||||
|
||||
List Swarm nodes
|
||||
|
||||
**Params:**
|
||||
|
||||
* filters (dict): Filters to process on the nodes list. Valid filters:
|
||||
`id`, `name`, `membership` and `role`. Default: `None`
|
||||
|
||||
**Returns:** A list of dictionaries containing data about each swarm node.
|
||||
|
||||
### Client.update_swarm
|
||||
|
||||
Update the Swarm's configuration
|
||||
|
||||
**Params:**
|
||||
|
||||
* version (int): The version number of the swarm object being updated. This
|
||||
is required to avoid conflicting writes.
|
||||
* swarm_spec (dict): Configuration settings to update. Use
|
||||
`Client.create_swarm_spec` to generate a valid configuration.
|
||||
Default: `None`.
|
||||
* rotate_worker_token (bool): Rotate the worker join token. Default: `False`.
|
||||
* rotate_manager_token (bool): Rotate the manager join token. Default: `False`.
|
||||
|
||||
**Returns:** `True` if the request went through. Raises an `APIError` if it
|
||||
fails.
|
||||
|
|
@ -13,6 +13,8 @@ pages:
|
|||
- Host devices: host-devices.md
|
||||
- Host configuration: hostconfig.md
|
||||
- Network configuration: networks.md
|
||||
- Swarm management: swarm.md
|
||||
- Swarm services: services.md
|
||||
- Using tmpfs: tmpfs.md
|
||||
- Using with Docker Machine: machine.md
|
||||
- Change Log: change_log.md
|
||||
|
|
|
|||
|
|
@ -2,4 +2,5 @@ requests==2.5.3
|
|||
six>=1.4.0
|
||||
websocket-client==0.32.0
|
||||
backports.ssl_match_hostname>=3.5 ; python_version < '3.5'
|
||||
ipaddress==1.0.16 ; python_version < '3.3'
|
||||
ipaddress==1.0.16 ; python_version < '3.3'
|
||||
docker-pycreds==0.2.0
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
[bdist_wheel]
|
||||
|
||||
universal = 1
|
||||
|
||||
[metadata]
|
||||
description_file = README.md
|
||||
|
|
|
|||
6
setup.py
6
setup.py
|
|
@ -9,9 +9,10 @@ ROOT_DIR = os.path.dirname(__file__)
|
|||
SOURCE_DIR = os.path.join(ROOT_DIR)
|
||||
|
||||
requirements = [
|
||||
'requests >= 2.5.2',
|
||||
'requests >= 2.5.2, < 2.11',
|
||||
'six >= 1.4.0',
|
||||
'websocket-client >= 0.32.0',
|
||||
'docker-pycreds >= 0.2.0'
|
||||
]
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
|
@ -36,7 +37,8 @@ setup(
|
|||
url='https://github.com/docker/docker-py/',
|
||||
packages=[
|
||||
'docker', 'docker.api', 'docker.auth', 'docker.transport',
|
||||
'docker.utils', 'docker.utils.ports', 'docker.ssladapter'
|
||||
'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
|
||||
'docker.types',
|
||||
],
|
||||
install_requires=requirements,
|
||||
tests_require=test_requirements,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,6 @@
|
|||
import errno
|
||||
import os
|
||||
import os.path
|
||||
import select
|
||||
import shutil
|
||||
import struct
|
||||
import tarfile
|
||||
import tempfile
|
||||
import unittest
|
||||
|
|
@ -48,15 +45,6 @@ def untar_file(tardata, filename):
|
|||
return result
|
||||
|
||||
|
||||
def exec_driver_is_native():
|
||||
global EXEC_DRIVER
|
||||
if not EXEC_DRIVER:
|
||||
c = docker_client()
|
||||
EXEC_DRIVER = c.info()['ExecutionDriver']
|
||||
c.close()
|
||||
return EXEC_DRIVER.startswith('native')
|
||||
|
||||
|
||||
def docker_client(**kwargs):
|
||||
return docker.Client(**docker_client_kwargs(**kwargs))
|
||||
|
||||
|
|
@ -67,49 +55,6 @@ def docker_client_kwargs(**kwargs):
|
|||
return client_kwargs
|
||||
|
||||
|
||||
def read_socket(socket, n=4096):
|
||||
""" Code stolen from dockerpty to read the socket """
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
# wait for data to become available
|
||||
select.select([socket], [], [])
|
||||
|
||||
try:
|
||||
if hasattr(socket, 'recv'):
|
||||
return socket.recv(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
|
||||
|
||||
def next_packet_size(socket):
|
||||
""" Code stolen from dockerpty to get the next packet size """
|
||||
data = six.binary_type()
|
||||
while len(data) < 8:
|
||||
next_data = read_socket(socket, 8 - len(data))
|
||||
if not next_data:
|
||||
return 0
|
||||
data = data + next_data
|
||||
|
||||
if data is None:
|
||||
return 0
|
||||
|
||||
if len(data) == 8:
|
||||
_, actual = struct.unpack('>BxxxL', data)
|
||||
return actual
|
||||
|
||||
|
||||
def read_data(socket, packet_size):
|
||||
data = six.binary_type()
|
||||
while len(data) < packet_size:
|
||||
next_data = read_socket(socket, packet_size - len(data))
|
||||
if not next_data:
|
||||
assert False, "Failed trying to read in the dataz"
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
class BaseTestCase(unittest.TestCase):
|
||||
tmp_imgs = []
|
||||
tmp_containers = []
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import signal
|
|||
import tempfile
|
||||
|
||||
import docker
|
||||
from docker.utils.socket import next_frame_size
|
||||
from docker.utils.socket import read_exactly
|
||||
import pytest
|
||||
import six
|
||||
|
||||
|
|
@ -157,9 +159,6 @@ class CreateContainerTest(helpers.BaseTestCase):
|
|||
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
|
||||
|
||||
def create_container_readonly_fs(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
ctnr = self.client.create_container(
|
||||
BUSYBOX, ['mkdir', '/shrine'],
|
||||
host_config=self.client.create_host_config(
|
||||
|
|
@ -290,7 +289,7 @@ class CreateContainerTest(helpers.BaseTestCase):
|
|||
)
|
||||
self.client.start(container)
|
||||
|
||||
assert expected_msg in str(excinfo.value)
|
||||
assert six.b(expected_msg) in excinfo.value.explanation
|
||||
|
||||
def test_valid_no_log_driver_specified(self):
|
||||
log_config = docker.utils.LogConfig(
|
||||
|
|
@ -804,8 +803,7 @@ class KillTest(helpers.BaseTestCase):
|
|||
self.assertIn('State', container_info)
|
||||
state = container_info['State']
|
||||
self.assertIn('ExitCode', state)
|
||||
if helpers.exec_driver_is_native():
|
||||
self.assertNotEqual(state['ExitCode'], 0)
|
||||
self.assertNotEqual(state['ExitCode'], 0)
|
||||
self.assertIn('Running', state)
|
||||
self.assertEqual(state['Running'], False)
|
||||
|
||||
|
|
@ -819,8 +817,7 @@ class KillTest(helpers.BaseTestCase):
|
|||
self.assertIn('State', container_info)
|
||||
state = container_info['State']
|
||||
self.assertIn('ExitCode', state)
|
||||
if helpers.exec_driver_is_native():
|
||||
self.assertNotEqual(state['ExitCode'], 0)
|
||||
self.assertNotEqual(state['ExitCode'], 0)
|
||||
self.assertIn('Running', state)
|
||||
self.assertEqual(state['Running'], False)
|
||||
|
||||
|
|
@ -1025,9 +1022,9 @@ class AttachContainerTest(helpers.BaseTestCase):
|
|||
|
||||
self.client.start(ident)
|
||||
|
||||
next_size = helpers.next_packet_size(pty_stdout)
|
||||
next_size = next_frame_size(pty_stdout)
|
||||
self.assertEqual(next_size, len(line))
|
||||
data = helpers.read_data(pty_stdout, next_size)
|
||||
data = read_exactly(pty_stdout, next_size)
|
||||
self.assertEqual(data.decode('utf-8'), line)
|
||||
|
||||
|
||||
|
|
@ -1099,11 +1096,38 @@ class ContainerUpdateTest(helpers.BaseTestCase):
|
|||
container = self.client.create_container(
|
||||
BUSYBOX, 'top', host_config=self.client.create_host_config(
|
||||
mem_limit=old_mem_limit
|
||||
), cpu_shares=102
|
||||
)
|
||||
)
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
self.client.update_container(container, mem_limit=new_mem_limit)
|
||||
inspect_data = self.client.inspect_container(container)
|
||||
self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
|
||||
self.assertEqual(inspect_data['HostConfig']['CpuShares'], 102)
|
||||
|
||||
|
||||
class ContainerCPUTest(helpers.BaseTestCase):
|
||||
@requires_api_version('1.18')
|
||||
def test_container_cpu_shares(self):
|
||||
cpu_shares = 512
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'ls', host_config=self.client.create_host_config(
|
||||
cpu_shares=cpu_shares
|
||||
)
|
||||
)
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
inspect_data = self.client.inspect_container(container)
|
||||
self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512)
|
||||
|
||||
@requires_api_version('1.18')
|
||||
def test_container_cpuset(self):
|
||||
cpuset_cpus = "0,1"
|
||||
container = self.client.create_container(
|
||||
BUSYBOX, 'ls', host_config=self.client.create_host_config(
|
||||
cpuset_cpus=cpuset_cpus
|
||||
)
|
||||
)
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
inspect_data = self.client.inspect_container(container)
|
||||
self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import pytest
|
||||
from docker.utils.socket import next_frame_size
|
||||
from docker.utils.socket import read_exactly
|
||||
|
||||
from .. import helpers
|
||||
|
||||
|
|
@ -7,9 +8,6 @@ BUSYBOX = helpers.BUSYBOX
|
|||
|
||||
class ExecTest(helpers.BaseTestCase):
|
||||
def test_execute_command(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
@ -23,9 +21,6 @@ class ExecTest(helpers.BaseTestCase):
|
|||
self.assertEqual(exec_log, b'hello\n')
|
||||
|
||||
def test_exec_command_string(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
@ -39,9 +34,6 @@ class ExecTest(helpers.BaseTestCase):
|
|||
self.assertEqual(exec_log, b'hello world\n')
|
||||
|
||||
def test_exec_command_as_user(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
@ -55,9 +47,6 @@ class ExecTest(helpers.BaseTestCase):
|
|||
self.assertEqual(exec_log, b'default\n')
|
||||
|
||||
def test_exec_command_as_root(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
@ -71,9 +60,6 @@ class ExecTest(helpers.BaseTestCase):
|
|||
self.assertEqual(exec_log, b'root\n')
|
||||
|
||||
def test_exec_command_streaming(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
@ -89,9 +75,6 @@ class ExecTest(helpers.BaseTestCase):
|
|||
self.assertEqual(res, b'hello\nworld\n')
|
||||
|
||||
def test_exec_start_socket(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
container_id = container['Id']
|
||||
|
|
@ -107,15 +90,12 @@ class ExecTest(helpers.BaseTestCase):
|
|||
socket = self.client.exec_start(exec_id, socket=True)
|
||||
self.addCleanup(socket.close)
|
||||
|
||||
next_size = helpers.next_packet_size(socket)
|
||||
next_size = next_frame_size(socket)
|
||||
self.assertEqual(next_size, len(line))
|
||||
data = helpers.read_data(socket, next_size)
|
||||
data = read_exactly(socket, next_size)
|
||||
self.assertEqual(data.decode('utf-8'), line)
|
||||
|
||||
def test_exec_inspect(self):
|
||||
if not helpers.exec_driver_is_native():
|
||||
pytest.skip('Exec driver not native')
|
||||
|
||||
container = self.client.create_container(BUSYBOX, 'cat',
|
||||
detach=True, stdin_open=True)
|
||||
id = container['Id']
|
||||
|
|
|
|||
|
|
@ -208,6 +208,48 @@ class ImportImageTest(helpers.BaseTestCase):
|
|||
img_id = result['status']
|
||||
self.tmp_imgs.append(img_id)
|
||||
|
||||
def test_import_image_from_data_with_changes(self):
|
||||
with self.dummy_tar_stream(n_bytes=500) as f:
|
||||
content = f.read()
|
||||
|
||||
statuses = self.client.import_image_from_data(
|
||||
content, repository='test/import-from-bytes',
|
||||
changes=['USER foobar', 'CMD ["echo"]']
|
||||
)
|
||||
|
||||
result_text = statuses.splitlines()[-1]
|
||||
result = json.loads(result_text)
|
||||
|
||||
assert 'error' not in result
|
||||
|
||||
img_id = result['status']
|
||||
self.tmp_imgs.append(img_id)
|
||||
|
||||
img_data = self.client.inspect_image(img_id)
|
||||
assert img_data is not None
|
||||
assert img_data['Config']['Cmd'] == ['echo']
|
||||
assert img_data['Config']['User'] == 'foobar'
|
||||
|
||||
def test_import_image_with_changes(self):
|
||||
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
|
||||
statuses = self.client.import_image(
|
||||
src=tar_filename, repository='test/import-from-file',
|
||||
changes=['USER foobar', 'CMD ["echo"]']
|
||||
)
|
||||
|
||||
result_text = statuses.splitlines()[-1]
|
||||
result = json.loads(result_text)
|
||||
|
||||
assert 'error' not in result
|
||||
|
||||
img_id = result['status']
|
||||
self.tmp_imgs.append(img_id)
|
||||
|
||||
img_data = self.client.inspect_image(img_id)
|
||||
assert img_data is not None
|
||||
assert img_data['Config']['Cmd'] == ['echo']
|
||||
assert img_data['Config']['User'] == 'foobar'
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temporary_http_file_server(self, stream):
|
||||
'''Serve data from an IO stream over HTTP.'''
|
||||
|
|
|
|||
|
|
@ -115,7 +115,8 @@ class TestNetworks(helpers.BaseTestCase):
|
|||
network_data = self.client.inspect_network(net_id)
|
||||
self.assertEqual(
|
||||
list(network_data['Containers'].keys()),
|
||||
[container['Id']])
|
||||
[container['Id']]
|
||||
)
|
||||
|
||||
with pytest.raises(docker.errors.APIError):
|
||||
self.client.connect_container_to_network(container, net_id)
|
||||
|
|
@ -127,6 +128,33 @@ class TestNetworks(helpers.BaseTestCase):
|
|||
with pytest.raises(docker.errors.APIError):
|
||||
self.client.disconnect_container_from_network(container, net_id)
|
||||
|
||||
@requires_api_version('1.22')
|
||||
def test_connect_and_force_disconnect_container(self):
|
||||
net_name, net_id = self.create_network()
|
||||
|
||||
container = self.client.create_container('busybox', 'top')
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
|
||||
network_data = self.client.inspect_network(net_id)
|
||||
self.assertFalse(network_data.get('Containers'))
|
||||
|
||||
self.client.connect_container_to_network(container, net_id)
|
||||
network_data = self.client.inspect_network(net_id)
|
||||
self.assertEqual(
|
||||
list(network_data['Containers'].keys()),
|
||||
[container['Id']]
|
||||
)
|
||||
|
||||
self.client.disconnect_container_from_network(container, net_id, True)
|
||||
network_data = self.client.inspect_network(net_id)
|
||||
self.assertFalse(network_data.get('Containers'))
|
||||
|
||||
with pytest.raises(docker.errors.APIError):
|
||||
self.client.disconnect_container_from_network(
|
||||
container, net_id, force=True
|
||||
)
|
||||
|
||||
@requires_api_version('1.22')
|
||||
def test_connect_with_aliases(self):
|
||||
net_name, net_id = self.create_network()
|
||||
|
|
@ -249,6 +277,27 @@ class TestNetworks(helpers.BaseTestCase):
|
|||
'2001:389::f00d'
|
||||
)
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_create_with_linklocal_ips(self):
|
||||
container = self.client.create_container(
|
||||
'busybox', 'top',
|
||||
networking_config=self.client.create_networking_config(
|
||||
{
|
||||
'bridge': self.client.create_endpoint_config(
|
||||
link_local_ips=['169.254.8.8']
|
||||
)
|
||||
}
|
||||
),
|
||||
host_config=self.client.create_host_config(network_mode='bridge')
|
||||
)
|
||||
self.tmp_containers.append(container)
|
||||
self.client.start(container)
|
||||
container_data = self.client.inspect_container(container)
|
||||
net_cfg = container_data['NetworkSettings']['Networks']['bridge']
|
||||
assert 'IPAMConfig' in net_cfg
|
||||
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
|
||||
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
|
||||
|
||||
@requires_api_version('1.22')
|
||||
def test_create_with_links(self):
|
||||
net_name, net_id = self.create_network()
|
||||
|
|
@ -279,7 +328,8 @@ class TestNetworks(helpers.BaseTestCase):
|
|||
net_name, net_id = self.create_network()
|
||||
with self.assertRaises(docker.errors.APIError):
|
||||
self.client.create_network(net_name, check_duplicate=True)
|
||||
self.client.create_network(net_name, check_duplicate=False)
|
||||
net_id = self.client.create_network(net_name, check_duplicate=False)
|
||||
self.tmp_networks.append(net_id['Id'])
|
||||
|
||||
@requires_api_version('1.22')
|
||||
def test_connect_with_links(self):
|
||||
|
|
@ -366,3 +416,27 @@ class TestNetworks(helpers.BaseTestCase):
|
|||
_, net_id = self.create_network(internal=True)
|
||||
net = self.client.inspect_network(net_id)
|
||||
assert net['Internal'] is True
|
||||
|
||||
@requires_api_version('1.23')
|
||||
def test_create_network_with_labels(self):
|
||||
_, net_id = self.create_network(labels={
|
||||
'com.docker.py.test': 'label'
|
||||
})
|
||||
|
||||
net = self.client.inspect_network(net_id)
|
||||
assert 'Labels' in net
|
||||
assert len(net['Labels']) == 1
|
||||
assert net['Labels'] == {
|
||||
'com.docker.py.test': 'label'
|
||||
}
|
||||
|
||||
@requires_api_version('1.23')
|
||||
def test_create_network_with_labels_wrong_type(self):
|
||||
with pytest.raises(TypeError):
|
||||
self.create_network(labels=['com.docker.py.test=label', ])
|
||||
|
||||
@requires_api_version('1.23')
|
||||
def test_create_network_ipv6_enabled(self):
|
||||
_, net_id = self.create_network(enable_ipv6=True)
|
||||
net = self.client.inspect_network(net_id)
|
||||
assert net['EnableIPv6'] is True
|
||||
|
|
|
|||
|
|
@ -0,0 +1,189 @@
|
|||
import random
|
||||
|
||||
import docker
|
||||
|
||||
from ..base import requires_api_version
|
||||
from .. import helpers
|
||||
|
||||
|
||||
BUSYBOX = helpers.BUSYBOX
|
||||
|
||||
|
||||
class ServiceTest(helpers.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(ServiceTest, self).setUp()
|
||||
try:
|
||||
self.client.leave_swarm(force=True)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
self.client.init_swarm('eth0')
|
||||
|
||||
def tearDown(self):
|
||||
super(ServiceTest, self).tearDown()
|
||||
for service in self.client.services(filters={'name': 'dockerpytest_'}):
|
||||
try:
|
||||
self.client.remove_service(service['ID'])
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
try:
|
||||
self.client.leave_swarm(force=True)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
|
||||
def get_service_name(self):
|
||||
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
|
||||
|
||||
def create_simple_service(self, name=None):
|
||||
if name:
|
||||
name = 'dockerpytest_{0}'.format(name)
|
||||
else:
|
||||
name = self.get_service_name()
|
||||
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
'busybox', ['echo', 'hello']
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
return name, self.client.create_service(task_tmpl, name=name)
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_list_services(self):
|
||||
services = self.client.services()
|
||||
assert isinstance(services, list)
|
||||
|
||||
test_services = self.client.services(filters={'name': 'dockerpytest_'})
|
||||
assert len(test_services) == 0
|
||||
self.create_simple_service()
|
||||
test_services = self.client.services(filters={'name': 'dockerpytest_'})
|
||||
assert len(test_services) == 1
|
||||
assert 'dockerpytest_' in test_services[0]['Spec']['Name']
|
||||
|
||||
def test_inspect_service_by_id(self):
|
||||
svc_name, svc_id = self.create_simple_service()
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'ID' in svc_info
|
||||
assert svc_info['ID'] == svc_id['ID']
|
||||
|
||||
def test_inspect_service_by_name(self):
|
||||
svc_name, svc_id = self.create_simple_service()
|
||||
svc_info = self.client.inspect_service(svc_name)
|
||||
assert 'ID' in svc_info
|
||||
assert svc_info['ID'] == svc_id['ID']
|
||||
|
||||
def test_remove_service_by_id(self):
|
||||
svc_name, svc_id = self.create_simple_service()
|
||||
assert self.client.remove_service(svc_id)
|
||||
test_services = self.client.services(filters={'name': 'dockerpytest_'})
|
||||
assert len(test_services) == 0
|
||||
|
||||
def test_remove_service_by_name(self):
|
||||
svc_name, svc_id = self.create_simple_service()
|
||||
assert self.client.remove_service(svc_name)
|
||||
test_services = self.client.services(filters={'name': 'dockerpytest_'})
|
||||
assert len(test_services) == 0
|
||||
|
||||
def test_create_service_simple(self):
|
||||
name, svc_id = self.create_simple_service()
|
||||
assert self.client.inspect_service(svc_id)
|
||||
services = self.client.services(filters={'name': name})
|
||||
assert len(services) == 1
|
||||
assert services[0]['ID'] == svc_id['ID']
|
||||
|
||||
def test_create_service_custom_log_driver(self):
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
'busybox', ['echo', 'hello']
|
||||
)
|
||||
log_cfg = docker.types.DriverConfig('none')
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, log_driver=log_cfg
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'TaskTemplate' in svc_info['Spec']
|
||||
res_template = svc_info['Spec']['TaskTemplate']
|
||||
assert 'LogDriver' in res_template
|
||||
assert 'Name' in res_template['LogDriver']
|
||||
assert res_template['LogDriver']['Name'] == 'none'
|
||||
|
||||
def test_create_service_with_volume_mount(self):
|
||||
vol_name = self.get_service_name()
|
||||
container_spec = docker.types.ContainerSpec(
|
||||
'busybox', ['ls'],
|
||||
mounts=[
|
||||
docker.types.Mount(target='/test', source=vol_name)
|
||||
]
|
||||
)
|
||||
self.tmp_volumes.append(vol_name)
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
|
||||
cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
|
||||
assert 'Mounts' in cspec
|
||||
assert len(cspec['Mounts']) == 1
|
||||
mount = cspec['Mounts'][0]
|
||||
assert mount['Target'] == '/test'
|
||||
assert mount['Source'] == vol_name
|
||||
assert mount['Type'] == 'volume'
|
||||
|
||||
def test_create_service_with_resources_constraints(self):
|
||||
container_spec = docker.types.ContainerSpec('busybox', ['true'])
|
||||
resources = docker.types.Resources(
|
||||
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
|
||||
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, resources=resources
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'TaskTemplate' in svc_info['Spec']
|
||||
res_template = svc_info['Spec']['TaskTemplate']
|
||||
assert 'Resources' in res_template
|
||||
assert res_template['Resources']['Limits'] == resources['Limits']
|
||||
assert res_template['Resources']['Reservations'] == resources[
|
||||
'Reservations'
|
||||
]
|
||||
|
||||
def test_create_service_with_update_config(self):
|
||||
container_spec = docker.types.ContainerSpec('busybox', ['true'])
|
||||
task_tmpl = docker.types.TaskTemplate(container_spec)
|
||||
update_config = docker.types.UpdateConfig(
|
||||
parallelism=10, delay=5, failure_action='pause'
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(
|
||||
task_tmpl, update_config=update_config, name=name
|
||||
)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'UpdateConfig' in svc_info['Spec']
|
||||
assert update_config == svc_info['Spec']['UpdateConfig']
|
||||
|
||||
def test_create_service_with_restart_policy(self):
|
||||
container_spec = docker.types.ContainerSpec('busybox', ['true'])
|
||||
policy = docker.types.RestartPolicy(
|
||||
docker.types.RestartPolicy.condition_types.ANY,
|
||||
delay=5, max_attempts=5
|
||||
)
|
||||
task_tmpl = docker.types.TaskTemplate(
|
||||
container_spec, restart_policy=policy
|
||||
)
|
||||
name = self.get_service_name()
|
||||
svc_id = self.client.create_service(task_tmpl, name=name)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
|
||||
assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
|
||||
|
||||
def test_update_service_name(self):
|
||||
name, svc_id = self.create_simple_service()
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
svc_version = svc_info['Version']['Index']
|
||||
new_name = self.get_service_name()
|
||||
assert self.client.update_service(
|
||||
svc_id, svc_version, name=new_name,
|
||||
task_template=svc_info['Spec']['TaskTemplate']
|
||||
)
|
||||
svc_info = self.client.inspect_service(svc_id)
|
||||
assert svc_info['Spec']['Name'] == new_name
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
import docker
|
||||
import pytest
|
||||
|
||||
from ..base import requires_api_version
|
||||
from .. import helpers
|
||||
|
||||
|
||||
BUSYBOX = helpers.BUSYBOX
|
||||
|
||||
|
||||
class SwarmTest(helpers.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(SwarmTest, self).setUp()
|
||||
try:
|
||||
self.client.leave_swarm(force=True)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
super(SwarmTest, self).tearDown()
|
||||
try:
|
||||
self.client.leave_swarm(force=True)
|
||||
except docker.errors.APIError:
|
||||
pass
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_init_swarm_simple(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_init_swarm_force_new_cluster(self):
|
||||
pytest.skip('Test stalls the engine on 1.12.0')
|
||||
|
||||
assert self.client.init_swarm('eth0')
|
||||
version_1 = self.client.inspect_swarm()['Version']['Index']
|
||||
assert self.client.init_swarm('eth0', force_new_cluster=True)
|
||||
version_2 = self.client.inspect_swarm()['Version']['Index']
|
||||
assert version_2 != version_1
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_init_already_in_cluster(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
with pytest.raises(docker.errors.APIError):
|
||||
self.client.init_swarm('eth0')
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_init_swarm_custom_raft_spec(self):
|
||||
spec = self.client.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
assert self.client.init_swarm(
|
||||
advertise_addr='eth0', swarm_spec=spec
|
||||
)
|
||||
swarm_info = self.client.inspect_swarm()
|
||||
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
|
||||
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_leave_swarm(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
with pytest.raises(docker.errors.APIError) as exc_info:
|
||||
self.client.leave_swarm()
|
||||
exc_info.value.response.status_code == 500
|
||||
assert self.client.leave_swarm(force=True)
|
||||
with pytest.raises(docker.errors.APIError) as exc_info:
|
||||
self.client.inspect_swarm()
|
||||
exc_info.value.response.status_code == 406
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_update_swarm(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
swarm_info_1 = self.client.inspect_swarm()
|
||||
spec = self.client.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200,
|
||||
node_cert_expiry=7776000000000000
|
||||
)
|
||||
assert self.client.update_swarm(
|
||||
version=swarm_info_1['Version']['Index'],
|
||||
swarm_spec=spec, rotate_worker_token=True
|
||||
)
|
||||
swarm_info_2 = self.client.inspect_swarm()
|
||||
|
||||
assert (
|
||||
swarm_info_1['Version']['Index'] !=
|
||||
swarm_info_2['Version']['Index']
|
||||
)
|
||||
assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
|
||||
assert (
|
||||
swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
|
||||
)
|
||||
assert (
|
||||
swarm_info_1['JoinTokens']['Manager'] ==
|
||||
swarm_info_2['JoinTokens']['Manager']
|
||||
)
|
||||
assert (
|
||||
swarm_info_1['JoinTokens']['Worker'] !=
|
||||
swarm_info_2['JoinTokens']['Worker']
|
||||
)
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_update_swarm_name(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
swarm_info_1 = self.client.inspect_swarm()
|
||||
spec = self.client.create_swarm_spec(
|
||||
node_cert_expiry=7776000000000000, name='reimuhakurei'
|
||||
)
|
||||
assert self.client.update_swarm(
|
||||
version=swarm_info_1['Version']['Index'], swarm_spec=spec
|
||||
)
|
||||
swarm_info_2 = self.client.inspect_swarm()
|
||||
|
||||
assert (
|
||||
swarm_info_1['Version']['Index'] !=
|
||||
swarm_info_2['Version']['Index']
|
||||
)
|
||||
assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_list_nodes(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
nodes_list = self.client.nodes()
|
||||
assert len(nodes_list) == 1
|
||||
node = nodes_list[0]
|
||||
assert 'ID' in node
|
||||
assert 'Spec' in node
|
||||
assert node['Spec']['Role'] == 'manager'
|
||||
|
||||
filtered_list = self.client.nodes(filters={
|
||||
'id': node['ID']
|
||||
})
|
||||
assert len(filtered_list) == 1
|
||||
filtered_list = self.client.nodes(filters={
|
||||
'role': 'worker'
|
||||
})
|
||||
assert len(filtered_list) == 0
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_inspect_node(self):
|
||||
assert self.client.init_swarm('eth0')
|
||||
nodes_list = self.client.nodes()
|
||||
assert len(nodes_list) == 1
|
||||
node = nodes_list[0]
|
||||
node_data = self.client.inspect_node(node['ID'])
|
||||
assert node['ID'] == node_data['ID']
|
||||
assert node['Version'] == node_data['Version']
|
||||
|
|
@ -1,17 +1,3 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
|
|
@ -22,9 +8,11 @@ import sys
|
|||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import io
|
||||
|
||||
import docker
|
||||
import requests
|
||||
from requests.packages import urllib3
|
||||
import six
|
||||
|
||||
from .. import base
|
||||
|
|
@ -42,7 +30,7 @@ DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
|
|||
|
||||
|
||||
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
|
||||
request=None):
|
||||
request=None, raw=None):
|
||||
res = requests.Response()
|
||||
res.status_code = status_code
|
||||
if not isinstance(content, six.binary_type):
|
||||
|
|
@ -52,6 +40,7 @@ def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
|
|||
res.reason = reason
|
||||
res.elapsed = datetime.timedelta(elapsed)
|
||||
res.request = request
|
||||
res.raw = raw
|
||||
return res
|
||||
|
||||
|
||||
|
|
@ -93,6 +82,10 @@ def fake_put(self, url, *args, **kwargs):
|
|||
def fake_delete(self, url, *args, **kwargs):
|
||||
return fake_request('DELETE', url, *args, **kwargs)
|
||||
|
||||
|
||||
def fake_read_from_socket(self, response, stream):
|
||||
return six.binary_type()
|
||||
|
||||
url_base = 'http+docker://localunixsocket/'
|
||||
url_prefix = '{0}v{1}/'.format(
|
||||
url_base,
|
||||
|
|
@ -103,7 +96,8 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
|
|||
def setUp(self):
|
||||
self.patcher = mock.patch.multiple(
|
||||
'docker.Client', get=fake_get, post=fake_post, put=fake_put,
|
||||
delete=fake_delete
|
||||
delete=fake_delete,
|
||||
_read_from_socket=fake_read_from_socket
|
||||
)
|
||||
self.patcher.start()
|
||||
self.client = docker.Client()
|
||||
|
|
@ -154,9 +148,15 @@ class DockerApiTest(DockerClientTest):
|
|||
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
|
||||
)
|
||||
|
||||
url = self.client._url('/hello/{0}/world', '/some?name')
|
||||
url = self.client._url('/hello/{0}/world', 'some?name')
|
||||
self.assertEqual(
|
||||
url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world')
|
||||
url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
|
||||
)
|
||||
|
||||
url = self.client._url("/images/{0}/push", "localhost:5000/image")
|
||||
self.assertEqual(
|
||||
url,
|
||||
'{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
|
||||
)
|
||||
|
||||
def test_url_invalid_resource(self):
|
||||
|
|
@ -317,6 +317,43 @@ class DockerApiTest(DockerClientTest):
|
|||
TypeError, self.client.create_host_config, security_opt='wrong'
|
||||
)
|
||||
|
||||
def test_stream_helper_decoding(self):
|
||||
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
|
||||
content_str = json.dumps(content)
|
||||
if six.PY3:
|
||||
content_str = content_str.encode('utf-8')
|
||||
body = io.BytesIO(content_str)
|
||||
|
||||
# mock a stream interface
|
||||
raw_resp = urllib3.HTTPResponse(body=body)
|
||||
setattr(raw_resp._fp, 'chunked', True)
|
||||
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue())-1)
|
||||
|
||||
# pass `decode=False` to the helper
|
||||
raw_resp._fp.seek(0)
|
||||
resp = response(status_code=status_code, content=content, raw=raw_resp)
|
||||
result = next(self.client._stream_helper(resp))
|
||||
self.assertEqual(result, content_str)
|
||||
|
||||
# pass `decode=True` to the helper
|
||||
raw_resp._fp.seek(0)
|
||||
resp = response(status_code=status_code, content=content, raw=raw_resp)
|
||||
result = next(self.client._stream_helper(resp, decode=True))
|
||||
self.assertEqual(result, content)
|
||||
|
||||
# non-chunked response, pass `decode=False` to the helper
|
||||
setattr(raw_resp._fp, 'chunked', False)
|
||||
raw_resp._fp.seek(0)
|
||||
resp = response(status_code=status_code, content=content, raw=raw_resp)
|
||||
result = next(self.client._stream_helper(resp))
|
||||
self.assertEqual(result, content_str.decode('utf-8'))
|
||||
|
||||
# non-chunked response, pass `decode=True` to the helper
|
||||
raw_resp._fp.seek(0)
|
||||
resp = response(status_code=status_code, content=content, raw=raw_resp)
|
||||
result = next(self.client._stream_helper(resp, decode=True))
|
||||
self.assertEqual(result, content)
|
||||
|
||||
|
||||
class StreamTest(base.Cleanup, base.BaseTestCase):
|
||||
def setUp(self):
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@ import gzip
|
|||
import io
|
||||
|
||||
import docker
|
||||
from docker import auth
|
||||
|
||||
from .api_test import DockerClientTest
|
||||
from .api_test import DockerClientTest, fake_request, url_prefix
|
||||
|
||||
|
||||
class BuildTest(DockerClientTest):
|
||||
|
|
@ -83,8 +84,25 @@ class BuildTest(DockerClientTest):
|
|||
}
|
||||
}
|
||||
|
||||
expected_params = {'t': None, 'q': False, 'dockerfile': None,
|
||||
'rm': False, 'nocache': False, 'pull': False,
|
||||
'forcerm': False,
|
||||
'remote': 'https://github.com/docker-library/mongo'}
|
||||
expected_headers = {
|
||||
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
|
||||
|
||||
self.client.build(path='https://github.com/docker-library/mongo')
|
||||
|
||||
fake_request.assert_called_with(
|
||||
'POST',
|
||||
url_prefix + 'build',
|
||||
stream=True,
|
||||
data=None,
|
||||
headers=expected_headers,
|
||||
params=expected_params,
|
||||
timeout=None
|
||||
)
|
||||
|
||||
def test_build_container_with_named_dockerfile(self):
|
||||
self.client.build('.', dockerfile='nameddockerfile')
|
||||
|
||||
|
|
@ -103,3 +121,44 @@ class BuildTest(DockerClientTest):
|
|||
'foo': 'bar'
|
||||
})
|
||||
)
|
||||
|
||||
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
|
||||
self.client._auth_configs = {
|
||||
'https://example.com': {
|
||||
'user': 'example',
|
||||
'password': 'example',
|
||||
'email': 'example@example.com'
|
||||
}
|
||||
}
|
||||
|
||||
headers = {}
|
||||
expected_headers = {
|
||||
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
|
||||
self.client._set_auth_headers(headers)
|
||||
self.assertEqual(headers, expected_headers)
|
||||
|
||||
def test_set_auth_headers_with_dict_and_auth_configs(self):
|
||||
self.client._auth_configs = {
|
||||
'https://example.com': {
|
||||
'user': 'example',
|
||||
'password': 'example',
|
||||
'email': 'example@example.com'
|
||||
}
|
||||
}
|
||||
|
||||
headers = {'foo': 'bar'}
|
||||
expected_headers = {
|
||||
'foo': 'bar',
|
||||
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
|
||||
|
||||
self.client._set_auth_headers(headers)
|
||||
self.assertEqual(headers, expected_headers)
|
||||
|
||||
def test_set_auth_headers_with_dict_and_no_auth_configs(self):
|
||||
headers = {'foo': 'bar'}
|
||||
expected_headers = {
|
||||
'foo': 'bar'
|
||||
}
|
||||
|
||||
self.client._set_auth_headers(headers)
|
||||
self.assertEqual(headers, expected_headers)
|
||||
|
|
|
|||
|
|
@ -25,6 +25,14 @@ class ClientTest(base.BaseTestCase):
|
|||
client = Client.from_env()
|
||||
self.assertEqual(client.base_url, "https://192.168.59.103:2376")
|
||||
|
||||
def test_from_env_with_version(self):
|
||||
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
|
||||
DOCKER_CERT_PATH=TEST_CERT_DIR,
|
||||
DOCKER_TLS_VERIFY='1')
|
||||
client = Client.from_env(version='2.32')
|
||||
self.assertEqual(client.base_url, "https://192.168.59.103:2376")
|
||||
self.assertEqual(client._version, '2.32')
|
||||
|
||||
|
||||
class DisableSocketTest(base.BaseTestCase):
|
||||
class DummySocket(object):
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import signal
|
||||
|
|
@ -286,6 +288,33 @@ class CreateContainerTest(DockerClientTest):
|
|||
self.assertEqual(args[1]['headers'],
|
||||
{'Content-Type': 'application/json'})
|
||||
|
||||
@requires_api_version('1.18')
|
||||
def test_create_container_with_host_config_cpu_shares(self):
|
||||
self.client.create_container(
|
||||
'busybox', 'ls', host_config=self.client.create_host_config(
|
||||
cpu_shares=512
|
||||
)
|
||||
)
|
||||
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(args[0][1],
|
||||
url_prefix + 'containers/create')
|
||||
|
||||
self.assertEqual(json.loads(args[1]['data']),
|
||||
json.loads('''
|
||||
{"Tty": false, "Image": "busybox",
|
||||
"Cmd": ["ls"], "AttachStdin": false,
|
||||
"AttachStderr": true,
|
||||
"AttachStdout": true, "OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"NetworkDisabled": false,
|
||||
"HostConfig": {
|
||||
"CpuShares": 512,
|
||||
"NetworkMode": "default"
|
||||
}}'''))
|
||||
self.assertEqual(args[1]['headers'],
|
||||
{'Content-Type': 'application/json'})
|
||||
|
||||
def test_create_container_with_cpuset(self):
|
||||
self.client.create_container('busybox', 'ls',
|
||||
cpuset='0,1')
|
||||
|
|
@ -306,6 +335,33 @@ class CreateContainerTest(DockerClientTest):
|
|||
self.assertEqual(args[1]['headers'],
|
||||
{'Content-Type': 'application/json'})
|
||||
|
||||
@requires_api_version('1.18')
|
||||
def test_create_container_with_host_config_cpuset(self):
|
||||
self.client.create_container(
|
||||
'busybox', 'ls', host_config=self.client.create_host_config(
|
||||
cpuset_cpus='0,1'
|
||||
)
|
||||
)
|
||||
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(args[0][1],
|
||||
url_prefix + 'containers/create')
|
||||
|
||||
self.assertEqual(json.loads(args[1]['data']),
|
||||
json.loads('''
|
||||
{"Tty": false, "Image": "busybox",
|
||||
"Cmd": ["ls"], "AttachStdin": false,
|
||||
"AttachStderr": true,
|
||||
"AttachStdout": true, "OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"NetworkDisabled": false,
|
||||
"HostConfig": {
|
||||
"CpuSetCpus": "0,1",
|
||||
"NetworkMode": "default"
|
||||
}}'''))
|
||||
self.assertEqual(args[1]['headers'],
|
||||
{'Content-Type': 'application/json'})
|
||||
|
||||
def test_create_container_with_cgroup_parent(self):
|
||||
self.client.create_container(
|
||||
'busybox', 'ls', host_config=self.client.create_host_config(
|
||||
|
|
@ -695,14 +751,18 @@ class CreateContainerTest(DockerClientTest):
|
|||
)
|
||||
|
||||
def test_create_container_with_mac_address(self):
|
||||
mac_address_expected = "02:42:ac:11:00:0a"
|
||||
expected = "02:42:ac:11:00:0a"
|
||||
|
||||
container = self.client.create_container(
|
||||
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
|
||||
self.client.create_container(
|
||||
'busybox',
|
||||
['sleep', '60'],
|
||||
mac_address=expected
|
||||
)
|
||||
|
||||
res = self.client.inspect_container(container['Id'])
|
||||
self.assertEqual(mac_address_expected,
|
||||
res['NetworkSettings']['MacAddress'])
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(args[0][1], url_prefix + 'containers/create')
|
||||
data = json.loads(args[1]['data'])
|
||||
assert data['MacAddress'] == expected
|
||||
|
||||
def test_create_container_with_links(self):
|
||||
link_path = 'path'
|
||||
|
|
@ -1074,6 +1134,51 @@ class CreateContainerTest(DockerClientTest):
|
|||
DEFAULT_TIMEOUT_SECONDS
|
||||
)
|
||||
|
||||
@requires_api_version('1.24')
|
||||
def test_create_container_with_sysctl(self):
|
||||
self.client.create_container(
|
||||
'busybox', 'true',
|
||||
host_config=self.client.create_host_config(
|
||||
sysctls={
|
||||
'net.core.somaxconn': 1024,
|
||||
'net.ipv4.tcp_syncookies': '0',
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(args[0][1], url_prefix + 'containers/create')
|
||||
expected_payload = self.base_create_payload()
|
||||
expected_payload['HostConfig'] = self.client.create_host_config()
|
||||
expected_payload['HostConfig']['Sysctls'] = {
|
||||
'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
|
||||
}
|
||||
self.assertEqual(json.loads(args[1]['data']), expected_payload)
|
||||
self.assertEqual(
|
||||
args[1]['headers'], {'Content-Type': 'application/json'}
|
||||
)
|
||||
self.assertEqual(
|
||||
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
|
||||
)
|
||||
|
||||
def test_create_container_with_unicode_envvars(self):
|
||||
envvars_dict = {
|
||||
'foo': u'☃',
|
||||
}
|
||||
|
||||
expected = [
|
||||
u'foo=☃'
|
||||
]
|
||||
|
||||
self.client.create_container(
|
||||
'busybox', 'true',
|
||||
environment=envvars_dict,
|
||||
)
|
||||
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(args[0][1], url_prefix + 'containers/create')
|
||||
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
|
||||
|
||||
|
||||
class ContainerTest(DockerClientTest):
|
||||
def test_list_containers(self):
|
||||
|
|
|
|||
|
|
@ -51,8 +51,36 @@ class ExecTest(DockerClientTest):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(args[1]['headers'],
|
||||
{'Content-Type': 'application/json'})
|
||||
self.assertEqual(
|
||||
args[1]['headers'], {
|
||||
'Content-Type': 'application/json',
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp'
|
||||
}
|
||||
)
|
||||
|
||||
def test_exec_start_detached(self):
|
||||
self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
|
||||
|
||||
args = fake_request.call_args
|
||||
self.assertEqual(
|
||||
args[0][1], url_prefix + 'exec/{0}/start'.format(
|
||||
fake_api.FAKE_EXEC_ID
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
json.loads(args[1]['data']), {
|
||||
'Tty': False,
|
||||
'Detach': True
|
||||
}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
args[1]['headers'], {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
)
|
||||
|
||||
def test_exec_inspect(self):
|
||||
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,3 @@
|
|||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from . import fake_stat
|
||||
from docker import constants
|
||||
|
||||
|
|
@ -183,35 +169,6 @@ def get_fake_inspect_image():
|
|||
return status_code, response
|
||||
|
||||
|
||||
def get_fake_port():
|
||||
status_code = 200
|
||||
response = {
|
||||
'HostConfig': {
|
||||
'Binds': None,
|
||||
'ContainerIDFile': '',
|
||||
'Links': None,
|
||||
'LxcConf': None,
|
||||
'PortBindings': {
|
||||
'1111': None,
|
||||
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
|
||||
'2222': None
|
||||
},
|
||||
'Privileged': False,
|
||||
'PublishAllPorts': False
|
||||
},
|
||||
'NetworkSettings': {
|
||||
'Bridge': 'docker0',
|
||||
'PortMapping': None,
|
||||
'Ports': {
|
||||
'1111': None,
|
||||
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
|
||||
'2222': None},
|
||||
'MacAddress': '02:42:ac:11:00:0a'
|
||||
}
|
||||
}
|
||||
return status_code, response
|
||||
|
||||
|
||||
def get_fake_insert_image():
|
||||
status_code = 200
|
||||
response = {'StatusCode': 0}
|
||||
|
|
@ -433,7 +390,10 @@ def get_fake_volume():
|
|||
response = {
|
||||
'Name': 'perfectcherryblossom',
|
||||
'Driver': 'local',
|
||||
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
|
||||
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
|
||||
'Labels': {
|
||||
'com.example.some-label': 'some-value'
|
||||
}
|
||||
}
|
||||
return status_code, response
|
||||
|
||||
|
|
@ -506,8 +466,6 @@ fake_responses = {
|
|||
post_fake_pause_container,
|
||||
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
|
||||
post_fake_unpause_container,
|
||||
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
|
||||
get_fake_port,
|
||||
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
|
||||
post_fake_restart_container,
|
||||
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import docker
|
|||
import pytest
|
||||
|
||||
from . import fake_api
|
||||
from docker import auth
|
||||
from .api_test import (
|
||||
DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
|
||||
fake_resolve_authconfig
|
||||
|
|
@ -262,6 +263,31 @@ class ImageTest(DockerClientTest):
|
|||
timeout=DEFAULT_TIMEOUT_SECONDS
|
||||
)
|
||||
|
||||
def test_push_image_with_auth(self):
|
||||
auth_config = {
|
||||
'username': "test_user",
|
||||
'password': "test_password",
|
||||
'serveraddress': "test_server",
|
||||
}
|
||||
encoded_auth = auth.encode_header(auth_config)
|
||||
self.client.push(
|
||||
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
|
||||
auth_config=auth_config
|
||||
)
|
||||
|
||||
fake_request.assert_called_with(
|
||||
'POST',
|
||||
url_prefix + 'images/test_image/push',
|
||||
params={
|
||||
'tag': fake_api.FAKE_TAG_NAME,
|
||||
},
|
||||
data='{}',
|
||||
headers={'Content-Type': 'application/json',
|
||||
'X-Registry-Auth': encoded_auth},
|
||||
stream=False,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS
|
||||
)
|
||||
|
||||
def test_push_image_stream(self):
|
||||
with mock.patch('docker.auth.auth.resolve_authconfig',
|
||||
fake_resolve_authconfig):
|
||||
|
|
|
|||
|
|
@ -184,4 +184,4 @@ class NetworkTest(DockerClientTest):
|
|||
|
||||
self.assertEqual(
|
||||
json.loads(post.call_args[1]['data']),
|
||||
{'container': container_id})
|
||||
{'Container': container_id})
|
||||
|
|
|
|||
|
|
@ -20,9 +20,11 @@ from docker.utils import (
|
|||
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
|
||||
exclude_paths, convert_volume_binds, decode_json_header, tar,
|
||||
split_command, create_ipam_config, create_ipam_pool, parse_devices,
|
||||
update_headers,
|
||||
)
|
||||
from docker.utils.utils import create_endpoint_config
|
||||
|
||||
from docker.utils.ports import build_port_bindings, split_port
|
||||
from docker.utils.utils import create_endpoint_config
|
||||
|
||||
from .. import base
|
||||
from ..helpers import make_tree
|
||||
|
|
@ -34,6 +36,37 @@ TEST_CERT_DIR = os.path.join(
|
|||
)
|
||||
|
||||
|
||||
class DecoratorsTest(base.BaseTestCase):
|
||||
def test_update_headers(self):
|
||||
sample_headers = {
|
||||
'X-Docker-Locale': 'en-US',
|
||||
}
|
||||
|
||||
def f(self, headers=None):
|
||||
return headers
|
||||
|
||||
client = Client()
|
||||
client._auth_configs = {}
|
||||
|
||||
g = update_headers(f)
|
||||
assert g(client, headers=None) is None
|
||||
assert g(client, headers={}) == {}
|
||||
assert g(client, headers={'Content-type': 'application/json'}) == {
|
||||
'Content-type': 'application/json',
|
||||
}
|
||||
|
||||
client._auth_configs = {
|
||||
'HttpHeaders': sample_headers
|
||||
}
|
||||
|
||||
assert g(client, headers=None) == sample_headers
|
||||
assert g(client, headers={}) == sample_headers
|
||||
assert g(client, headers={'Content-type': 'application/json'}) == {
|
||||
'Content-type': 'application/json',
|
||||
'X-Docker-Locale': 'en-US',
|
||||
}
|
||||
|
||||
|
||||
class HostConfigTest(base.BaseTestCase):
|
||||
def test_create_host_config_no_options(self):
|
||||
config = create_host_config(version='1.19')
|
||||
|
|
@ -98,6 +131,16 @@ class HostConfigTest(base.BaseTestCase):
|
|||
InvalidVersion, lambda: create_host_config(version='1.18.3',
|
||||
oom_kill_disable=True))
|
||||
|
||||
def test_create_host_config_with_userns_mode(self):
|
||||
config = create_host_config(version='1.23', userns_mode='host')
|
||||
self.assertEqual(config.get('UsernsMode'), 'host')
|
||||
self.assertRaises(
|
||||
InvalidVersion, lambda: create_host_config(version='1.22',
|
||||
userns_mode='host'))
|
||||
self.assertRaises(
|
||||
ValueError, lambda: create_host_config(version='1.23',
|
||||
userns_mode='host12'))
|
||||
|
||||
def test_create_host_config_with_oom_score_adj(self):
|
||||
config = create_host_config(version='1.22', oom_score_adj=100)
|
||||
self.assertEqual(config.get('OomScoreAdj'), 100)
|
||||
|
|
@ -108,6 +151,19 @@ class HostConfigTest(base.BaseTestCase):
|
|||
TypeError, lambda: create_host_config(version='1.22',
|
||||
oom_score_adj='100'))
|
||||
|
||||
def test_create_host_config_with_dns_opt(self):
|
||||
|
||||
tested_opts = ['use-vc', 'no-tld-query']
|
||||
config = create_host_config(version='1.21', dns_opt=tested_opts)
|
||||
dns_opts = config.get('DnsOptions')
|
||||
|
||||
self.assertTrue('use-vc' in dns_opts)
|
||||
self.assertTrue('no-tld-query' in dns_opts)
|
||||
|
||||
self.assertRaises(
|
||||
InvalidVersion, lambda: create_host_config(version='1.20',
|
||||
dns_opt=tested_opts))
|
||||
|
||||
def test_create_endpoint_config_with_aliases(self):
|
||||
config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
|
||||
assert config == {'Aliases': ['foo', 'bar']}
|
||||
|
|
@ -115,6 +171,29 @@ class HostConfigTest(base.BaseTestCase):
|
|||
with pytest.raises(InvalidVersion):
|
||||
create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
|
||||
|
||||
def test_create_host_config_with_mem_reservation(self):
|
||||
config = create_host_config(version='1.21', mem_reservation=67108864)
|
||||
self.assertEqual(config.get('MemoryReservation'), 67108864)
|
||||
self.assertRaises(
|
||||
InvalidVersion, lambda: create_host_config(
|
||||
version='1.20', mem_reservation=67108864))
|
||||
|
||||
def test_create_host_config_with_kernel_memory(self):
|
||||
config = create_host_config(version='1.21', kernel_memory=67108864)
|
||||
self.assertEqual(config.get('KernelMemory'), 67108864)
|
||||
self.assertRaises(
|
||||
InvalidVersion, lambda: create_host_config(
|
||||
version='1.20', kernel_memory=67108864))
|
||||
|
||||
def test_create_host_config_with_pids_limit(self):
|
||||
config = create_host_config(version='1.23', pids_limit=1024)
|
||||
self.assertEqual(config.get('PidsLimit'), 1024)
|
||||
|
||||
with pytest.raises(InvalidVersion):
|
||||
create_host_config(version='1.22', pids_limit=1024)
|
||||
with pytest.raises(TypeError):
|
||||
create_host_config(version='1.22', pids_limit='1024')
|
||||
|
||||
|
||||
class UlimitTest(base.BaseTestCase):
|
||||
def test_create_host_config_dict_ulimit(self):
|
||||
|
|
@ -404,10 +483,18 @@ class ParseHostTest(base.BaseTestCase):
|
|||
'https://kokia.jp:2375': 'https://kokia.jp:2375',
|
||||
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
|
||||
'unix://': 'http+unix://var/run/docker.sock',
|
||||
'12.234.45.127:2375/docker/engine': (
|
||||
'http://12.234.45.127:2375/docker/engine'
|
||||
),
|
||||
'somehost.net:80/service/swarm': (
|
||||
'http://somehost.net:80/service/swarm'
|
||||
),
|
||||
'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
|
||||
'[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
|
||||
'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
|
||||
'[fd12::82d1]:2375/docker/engine': (
|
||||
'http://[fd12::82d1]:2375/docker/engine'
|
||||
),
|
||||
}
|
||||
|
||||
for host in invalid_hosts:
|
||||
|
|
@ -415,15 +502,15 @@ class ParseHostTest(base.BaseTestCase):
|
|||
parse_host(host, None)
|
||||
|
||||
for host, expected in valid_hosts.items():
|
||||
self.assertEqual(parse_host(host, None), expected, msg=host)
|
||||
assert parse_host(host, None) == expected
|
||||
|
||||
def test_parse_host_empty_value(self):
|
||||
unix_socket = 'http+unix://var/run/docker.sock'
|
||||
tcp_port = 'http://127.0.0.1:2375'
|
||||
npipe = 'npipe:////./pipe/docker_engine'
|
||||
|
||||
for val in [None, '']:
|
||||
assert parse_host(val, is_win32=False) == unix_socket
|
||||
assert parse_host(val, is_win32=True) == tcp_port
|
||||
assert parse_host(val, is_win32=True) == npipe
|
||||
|
||||
def test_parse_host_tls(self):
|
||||
host_value = 'myhost.docker.net:3348'
|
||||
|
|
@ -602,7 +689,6 @@ class UtilsTest(base.BaseTestCase):
|
|||
|
||||
|
||||
class SplitCommandTest(base.BaseTestCase):
|
||||
|
||||
def test_split_command_with_unicode(self):
|
||||
self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
|
||||
|
||||
|
|
|
|||
|
|
@ -43,6 +43,22 @@ class VolumeTest(DockerClientTest):
|
|||
self.assertEqual(args[0][1], url_prefix + 'volumes/create')
|
||||
self.assertEqual(json.loads(args[1]['data']), {'Name': name})
|
||||
|
||||
@base.requires_api_version('1.23')
|
||||
def test_create_volume_with_labels(self):
|
||||
name = 'perfectcherryblossom'
|
||||
result = self.client.create_volume(name, labels={
|
||||
'com.example.some-label': 'some-value'})
|
||||
self.assertEqual(
|
||||
result["Labels"],
|
||||
{'com.example.some-label': 'some-value'}
|
||||
)
|
||||
|
||||
@base.requires_api_version('1.23')
|
||||
def test_create_volume_with_invalid_labels(self):
|
||||
name = 'perfectcherryblossom'
|
||||
with pytest.raises(TypeError):
|
||||
self.client.create_volume(name, labels=1)
|
||||
|
||||
@base.requires_api_version('1.21')
|
||||
def test_create_volume_with_driver(self):
|
||||
name = 'perfectcherryblossom'
|
||||
|
|
|
|||
Loading…
Reference in New Issue