mirror of https://github.com/docker/docker-py.git
Merge branch 'master' into exec_create_user
This commit is contained in:
commit
1c1d7eee5a
|
|
@ -1,5 +1,5 @@
|
||||||
FROM python:2.7
|
FROM python:2.7
|
||||||
MAINTAINER Joffrey F <joffrey@dotcloud.com>
|
MAINTAINER Joffrey F <joffrey@docker.com>
|
||||||
ADD . /home/docker-py
|
ADD . /home/docker-py
|
||||||
WORKDIR /home/docker-py
|
WORKDIR /home/docker-py
|
||||||
RUN pip install -r test-requirements.txt
|
RUN pip install -r test-requirements.txt
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
FROM python:3.4
|
||||||
|
MAINTAINER Joffrey F <joffrey@docker.com>
|
||||||
|
ADD . /home/docker-py
|
||||||
|
WORKDIR /home/docker-py
|
||||||
|
RUN pip install -r test-requirements.txt
|
||||||
|
RUN pip install .
|
||||||
13
Makefile
13
Makefile
|
|
@ -1,4 +1,4 @@
|
||||||
.PHONY: all build test integration-test unit-test
|
.PHONY: all build test integration-test unit-test build-py3 unit-test-py3 integration-test-py3
|
||||||
|
|
||||||
HOST_TMPDIR=test -n "$(TMPDIR)" && echo $(TMPDIR) || echo /tmp
|
HOST_TMPDIR=test -n "$(TMPDIR)" && echo $(TMPDIR) || echo /tmp
|
||||||
|
|
||||||
|
|
@ -7,10 +7,19 @@ all: test
|
||||||
build:
|
build:
|
||||||
docker build -t docker-py .
|
docker build -t docker-py .
|
||||||
|
|
||||||
test: unit-test integration-test
|
build-py3:
|
||||||
|
docker build -t docker-py3 -f Dockerfile-py3 .
|
||||||
|
|
||||||
|
test: unit-test integration-test unit-test-py3 integration-test-py3
|
||||||
|
|
||||||
unit-test: build
|
unit-test: build
|
||||||
docker run docker-py python tests/test.py
|
docker run docker-py python tests/test.py
|
||||||
|
|
||||||
|
unit-test-py3: build-py3
|
||||||
|
docker run docker-py3 python tests/test.py
|
||||||
|
|
||||||
integration-test: build
|
integration-test: build
|
||||||
docker run -e NOT_ON_HOST=true -v `$(HOST_TMPDIR)`:/tmp -v /var/run/docker.sock:/var/run/docker.sock docker-py python tests/integration_test.py
|
docker run -e NOT_ON_HOST=true -v `$(HOST_TMPDIR)`:/tmp -v /var/run/docker.sock:/var/run/docker.sock docker-py python tests/integration_test.py
|
||||||
|
|
||||||
|
integration-test-py3: build-py3
|
||||||
|
docker run -e NOT_ON_HOST=true -v `$(HOST_TMPDIR)`:/tmp -v /var/run/docker.sock:/var/run/docker.sock docker-py3 python tests/integration_test.py
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
from .auth import (
|
from .auth import (
|
||||||
|
INDEX_NAME,
|
||||||
INDEX_URL,
|
INDEX_URL,
|
||||||
encode_header,
|
encode_header,
|
||||||
load_config,
|
load_config,
|
||||||
|
|
|
||||||
|
|
@ -16,38 +16,34 @@ import base64
|
||||||
import fileinput
|
import fileinput
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import warnings
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ..utils import utils
|
from .. import constants
|
||||||
from .. import errors
|
from .. import errors
|
||||||
|
|
||||||
INDEX_URL = 'https://index.docker.io/v1/'
|
INDEX_NAME = 'index.docker.io'
|
||||||
|
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
|
||||||
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
|
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
|
||||||
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
|
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||||
|
|
||||||
|
|
||||||
def expand_registry_url(hostname, insecure=False):
|
def resolve_repository_name(repo_name, insecure=False):
|
||||||
if hostname.startswith('http:') or hostname.startswith('https:'):
|
if insecure:
|
||||||
return hostname
|
warnings.warn(
|
||||||
if utils.ping_registry('https://' + hostname):
|
constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
|
||||||
return 'https://' + hostname
|
'resolve_repository_name()'
|
||||||
elif insecure:
|
), DeprecationWarning
|
||||||
return 'http://' + hostname
|
|
||||||
else:
|
|
||||||
raise errors.DockerException(
|
|
||||||
"HTTPS endpoint unresponsive and insecure mode isn't enabled."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def resolve_repository_name(repo_name, insecure=False):
|
|
||||||
if '://' in repo_name:
|
if '://' in repo_name:
|
||||||
raise errors.InvalidRepository(
|
raise errors.InvalidRepository(
|
||||||
'Repository name cannot contain a scheme ({0})'.format(repo_name))
|
'Repository name cannot contain a scheme ({0})'.format(repo_name))
|
||||||
parts = repo_name.split('/', 1)
|
parts = repo_name.split('/', 1)
|
||||||
if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
|
if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
|
||||||
# This is a docker index repo (ex: foo/bar or ubuntu)
|
# This is a docker index repo (ex: foo/bar or ubuntu)
|
||||||
return INDEX_URL, repo_name
|
return INDEX_NAME, repo_name
|
||||||
if len(parts) < 2:
|
if len(parts) < 2:
|
||||||
raise errors.InvalidRepository(
|
raise errors.InvalidRepository(
|
||||||
'Invalid repository name ({0})'.format(repo_name))
|
'Invalid repository name ({0})'.format(repo_name))
|
||||||
|
|
@ -57,7 +53,7 @@ def resolve_repository_name(repo_name, insecure=False):
|
||||||
'Invalid repository name, try "{0}" instead'.format(parts[1])
|
'Invalid repository name, try "{0}" instead'.format(parts[1])
|
||||||
)
|
)
|
||||||
|
|
||||||
return expand_registry_url(parts[0], insecure), parts[1]
|
return parts[0], parts[1]
|
||||||
|
|
||||||
|
|
||||||
def resolve_authconfig(authconfig, registry=None):
|
def resolve_authconfig(authconfig, registry=None):
|
||||||
|
|
@ -68,7 +64,7 @@ def resolve_authconfig(authconfig, registry=None):
|
||||||
Returns None if no match was found.
|
Returns None if no match was found.
|
||||||
"""
|
"""
|
||||||
# Default to the public index server
|
# Default to the public index server
|
||||||
registry = convert_to_hostname(registry) if registry else INDEX_URL
|
registry = convert_to_hostname(registry) if registry else INDEX_NAME
|
||||||
|
|
||||||
if registry in authconfig:
|
if registry in authconfig:
|
||||||
return authconfig[registry]
|
return authconfig[registry]
|
||||||
|
|
@ -102,12 +98,6 @@ def encode_header(auth):
|
||||||
return base64.b64encode(auth_json)
|
return base64.b64encode(auth_json)
|
||||||
|
|
||||||
|
|
||||||
def encode_full_header(auth):
|
|
||||||
""" Returns the given auth block encoded for the X-Registry-Config header.
|
|
||||||
"""
|
|
||||||
return encode_header({'configs': auth})
|
|
||||||
|
|
||||||
|
|
||||||
def parse_auth(entries):
|
def parse_auth(entries):
|
||||||
"""
|
"""
|
||||||
Parses authentication entries
|
Parses authentication entries
|
||||||
|
|
@ -185,7 +175,7 @@ def load_config(config_path=None):
|
||||||
'Invalid or empty configuration file!')
|
'Invalid or empty configuration file!')
|
||||||
|
|
||||||
username, password = decode_auth(data[0])
|
username, password = decode_auth(data[0])
|
||||||
conf[INDEX_URL] = {
|
conf[INDEX_NAME] = {
|
||||||
'username': username,
|
'username': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
'email': data[1],
|
'email': data[1],
|
||||||
|
|
|
||||||
356
docker/client.py
356
docker/client.py
|
|
@ -12,237 +12,23 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import shlex
|
import shlex
|
||||||
import struct
|
|
||||||
import warnings
|
import warnings
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import requests
|
|
||||||
import requests.exceptions
|
|
||||||
import six
|
import six
|
||||||
import websocket
|
|
||||||
|
|
||||||
|
|
||||||
|
from . import clientbase
|
||||||
from . import constants
|
from . import constants
|
||||||
from . import errors
|
from . import errors
|
||||||
from .auth import auth
|
from .auth import auth
|
||||||
from .unixconn import unixconn
|
|
||||||
from .ssladapter import ssladapter
|
|
||||||
from .utils import utils, check_resource
|
from .utils import utils, check_resource
|
||||||
from .tls import TLSConfig
|
from .constants import INSECURE_REGISTRY_DEPRECATION_WARNING
|
||||||
|
|
||||||
|
|
||||||
class Client(requests.Session):
|
class Client(clientbase.ClientBase):
|
||||||
def __init__(self, base_url=None, version=None,
|
|
||||||
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
|
|
||||||
super(Client, self).__init__()
|
|
||||||
|
|
||||||
if tls and not base_url.startswith('https://'):
|
|
||||||
raise errors.TLSParameterError(
|
|
||||||
'If using TLS, the base_url argument must begin with '
|
|
||||||
'"https://".')
|
|
||||||
|
|
||||||
self.base_url = base_url
|
|
||||||
self.timeout = timeout
|
|
||||||
|
|
||||||
self._auth_configs = auth.load_config()
|
|
||||||
|
|
||||||
base_url = utils.parse_host(base_url)
|
|
||||||
if base_url.startswith('http+unix://'):
|
|
||||||
unix_socket_adapter = unixconn.UnixAdapter(base_url, timeout)
|
|
||||||
self.mount('http+docker://', unix_socket_adapter)
|
|
||||||
self.base_url = 'http+docker://localunixsocket'
|
|
||||||
else:
|
|
||||||
# Use SSLAdapter for the ability to specify SSL version
|
|
||||||
if isinstance(tls, TLSConfig):
|
|
||||||
tls.configure_client(self)
|
|
||||||
elif tls:
|
|
||||||
self.mount('https://', ssladapter.SSLAdapter())
|
|
||||||
self.base_url = base_url
|
|
||||||
|
|
||||||
# version detection needs to be after unix adapter mounting
|
|
||||||
if version is None:
|
|
||||||
self._version = constants.DEFAULT_DOCKER_API_VERSION
|
|
||||||
elif isinstance(version, six.string_types):
|
|
||||||
if version.lower() == 'auto':
|
|
||||||
self._version = self._retrieve_server_version()
|
|
||||||
else:
|
|
||||||
self._version = version
|
|
||||||
else:
|
|
||||||
raise errors.DockerException(
|
|
||||||
'Version parameter must be a string or None. Found {0}'.format(
|
|
||||||
type(version).__name__
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _retrieve_server_version(self):
|
|
||||||
try:
|
|
||||||
return self.version(api_version=False)["ApiVersion"]
|
|
||||||
except KeyError:
|
|
||||||
raise errors.DockerException(
|
|
||||||
'Invalid response from docker daemon: key "ApiVersion"'
|
|
||||||
' is missing.'
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise errors.DockerException(
|
|
||||||
'Error while fetching server API version: {0}'.format(e)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _set_request_timeout(self, kwargs):
|
|
||||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
|
||||||
parameter, if not already present."""
|
|
||||||
kwargs.setdefault('timeout', self.timeout)
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
def _post(self, url, **kwargs):
|
|
||||||
return self.post(url, **self._set_request_timeout(kwargs))
|
|
||||||
|
|
||||||
def _get(self, url, **kwargs):
|
|
||||||
return self.get(url, **self._set_request_timeout(kwargs))
|
|
||||||
|
|
||||||
def _delete(self, url, **kwargs):
|
|
||||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
|
||||||
|
|
||||||
def _url(self, path, versioned_api=True):
|
|
||||||
if versioned_api:
|
|
||||||
return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
|
|
||||||
else:
|
|
||||||
return '{0}{1}'.format(self.base_url, path)
|
|
||||||
|
|
||||||
def _raise_for_status(self, response, explanation=None):
|
|
||||||
"""Raises stored :class:`APIError`, if one occurred."""
|
|
||||||
try:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.HTTPError as e:
|
|
||||||
raise errors.APIError(e, response, explanation=explanation)
|
|
||||||
|
|
||||||
def _result(self, response, json=False, binary=False):
|
|
||||||
assert not (json and binary)
|
|
||||||
self._raise_for_status(response)
|
|
||||||
|
|
||||||
if json:
|
|
||||||
return response.json()
|
|
||||||
if binary:
|
|
||||||
return response.content
|
|
||||||
return response.text
|
|
||||||
|
|
||||||
def _post_json(self, url, data, **kwargs):
|
|
||||||
# Go <1.1 can't unserialize null to a string
|
|
||||||
# so we do this disgusting thing here.
|
|
||||||
data2 = {}
|
|
||||||
if data is not None:
|
|
||||||
for k, v in six.iteritems(data):
|
|
||||||
if v is not None:
|
|
||||||
data2[k] = v
|
|
||||||
|
|
||||||
if 'headers' not in kwargs:
|
|
||||||
kwargs['headers'] = {}
|
|
||||||
kwargs['headers']['Content-Type'] = 'application/json'
|
|
||||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
|
||||||
|
|
||||||
def _attach_params(self, override=None):
|
|
||||||
return override or {
|
|
||||||
'stdout': 1,
|
|
||||||
'stderr': 1,
|
|
||||||
'stream': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
@check_resource
|
|
||||||
def _attach_websocket(self, container, params=None):
|
|
||||||
url = self._url("/containers/{0}/attach/ws".format(container))
|
|
||||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
|
||||||
full_url = req.prepare().url
|
|
||||||
full_url = full_url.replace("http://", "ws://", 1)
|
|
||||||
full_url = full_url.replace("https://", "wss://", 1)
|
|
||||||
return self._create_websocket_connection(full_url)
|
|
||||||
|
|
||||||
def _create_websocket_connection(self, url):
|
|
||||||
return websocket.create_connection(url)
|
|
||||||
|
|
||||||
def _get_raw_response_socket(self, response):
|
|
||||||
self._raise_for_status(response)
|
|
||||||
if six.PY3:
|
|
||||||
sock = response.raw._fp.fp.raw
|
|
||||||
else:
|
|
||||||
sock = response.raw._fp.fp._sock
|
|
||||||
try:
|
|
||||||
# Keep a reference to the response to stop it being garbage
|
|
||||||
# collected. If the response is garbage collected, it will
|
|
||||||
# close TLS sockets.
|
|
||||||
sock._response = response
|
|
||||||
except AttributeError:
|
|
||||||
# UNIX sockets can't have attributes set on them, but that's
|
|
||||||
# fine because we won't be doing TLS over them
|
|
||||||
pass
|
|
||||||
|
|
||||||
return sock
|
|
||||||
|
|
||||||
def _stream_helper(self, response, decode=False):
|
|
||||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
|
||||||
if response.raw._fp.chunked:
|
|
||||||
reader = response.raw
|
|
||||||
while not reader.closed:
|
|
||||||
# this read call will block until we get a chunk
|
|
||||||
data = reader.read(1)
|
|
||||||
if not data:
|
|
||||||
break
|
|
||||||
if reader._fp.chunk_left:
|
|
||||||
data += reader.read(reader._fp.chunk_left)
|
|
||||||
if decode:
|
|
||||||
if six.PY3:
|
|
||||||
data = data.decode('utf-8')
|
|
||||||
data = json.loads(data)
|
|
||||||
yield data
|
|
||||||
else:
|
|
||||||
# Response isn't chunked, meaning we probably
|
|
||||||
# encountered an error immediately
|
|
||||||
yield self._result(response)
|
|
||||||
|
|
||||||
def _multiplexed_buffer_helper(self, response):
|
|
||||||
"""A generator of multiplexed data blocks read from a buffered
|
|
||||||
response."""
|
|
||||||
buf = self._result(response, binary=True)
|
|
||||||
walker = 0
|
|
||||||
while True:
|
|
||||||
if len(buf[walker:]) < 8:
|
|
||||||
break
|
|
||||||
_, length = struct.unpack_from('>BxxxL', buf[walker:])
|
|
||||||
start = walker + constants.STREAM_HEADER_SIZE_BYTES
|
|
||||||
end = start + length
|
|
||||||
walker = end
|
|
||||||
yield buf[start:end]
|
|
||||||
|
|
||||||
def _multiplexed_response_stream_helper(self, response):
|
|
||||||
"""A generator of multiplexed data blocks coming from a response
|
|
||||||
stream."""
|
|
||||||
|
|
||||||
# Disable timeout on the underlying socket to prevent
|
|
||||||
# Read timed out(s) for long running processes
|
|
||||||
socket = self._get_raw_response_socket(response)
|
|
||||||
if six.PY3:
|
|
||||||
socket._sock.settimeout(None)
|
|
||||||
else:
|
|
||||||
socket.settimeout(None)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
|
|
||||||
if not header:
|
|
||||||
break
|
|
||||||
_, length = struct.unpack('>BxxxL', header)
|
|
||||||
if not length:
|
|
||||||
break
|
|
||||||
data = response.raw.read(length)
|
|
||||||
if not data:
|
|
||||||
break
|
|
||||||
yield data
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api_version(self):
|
|
||||||
return self._version
|
|
||||||
|
|
||||||
@check_resource
|
@check_resource
|
||||||
def attach(self, container, stdout=True, stderr=True,
|
def attach(self, container, stdout=True, stderr=True,
|
||||||
stream=False, logs=False):
|
stream=False, logs=False):
|
||||||
|
|
@ -255,28 +41,7 @@ class Client(requests.Session):
|
||||||
u = self._url("/containers/{0}/attach".format(container))
|
u = self._url("/containers/{0}/attach".format(container))
|
||||||
response = self._post(u, params=params, stream=stream)
|
response = self._post(u, params=params, stream=stream)
|
||||||
|
|
||||||
# Stream multi-plexing was only introduced in API v1.6. Anything before
|
return self._get_result(container, stream, response)
|
||||||
# that needs old-style streaming.
|
|
||||||
if utils.compare_version('1.6', self._version) < 0:
|
|
||||||
def stream_result():
|
|
||||||
self._raise_for_status(response)
|
|
||||||
for line in response.iter_lines(chunk_size=1,
|
|
||||||
decode_unicode=True):
|
|
||||||
# filter out keep-alive new lines
|
|
||||||
if line:
|
|
||||||
yield line
|
|
||||||
|
|
||||||
return stream_result() if stream else \
|
|
||||||
self._result(response, binary=True)
|
|
||||||
|
|
||||||
sep = bytes() if six.PY3 else str()
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
return self._multiplexed_response_stream_helper(response)
|
|
||||||
else:
|
|
||||||
return sep.join(
|
|
||||||
[x for x in self._multiplexed_buffer_helper(response)]
|
|
||||||
)
|
|
||||||
|
|
||||||
@check_resource
|
@check_resource
|
||||||
def attach_socket(self, container, params=None, ws=False):
|
def attach_socket(self, container, params=None, ws=False):
|
||||||
|
|
@ -317,7 +82,7 @@ class Client(requests.Session):
|
||||||
elif fileobj is not None:
|
elif fileobj is not None:
|
||||||
context = utils.mkbuildcontext(fileobj)
|
context = utils.mkbuildcontext(fileobj)
|
||||||
elif path.startswith(('http://', 'https://',
|
elif path.startswith(('http://', 'https://',
|
||||||
'git://', 'github.com/')):
|
'git://', 'github.com/', 'git@')):
|
||||||
remote = path
|
remote = path
|
||||||
elif not os.path.isdir(path):
|
elif not os.path.isdir(path):
|
||||||
raise TypeError("You must specify a directory to build in path")
|
raise TypeError("You must specify a directory to build in path")
|
||||||
|
|
@ -375,9 +140,14 @@ class Client(requests.Session):
|
||||||
if self._auth_configs:
|
if self._auth_configs:
|
||||||
if headers is None:
|
if headers is None:
|
||||||
headers = {}
|
headers = {}
|
||||||
headers['X-Registry-Config'] = auth.encode_full_header(
|
if utils.compare_version('1.19', self._version) >= 0:
|
||||||
self._auth_configs
|
headers['X-Registry-Config'] = auth.encode_header(
|
||||||
)
|
self._auth_configs
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
headers['X-Registry-Config'] = auth.encode_header({
|
||||||
|
'configs': self._auth_configs
|
||||||
|
})
|
||||||
|
|
||||||
response = self._post(
|
response = self._post(
|
||||||
u,
|
u,
|
||||||
|
|
@ -450,11 +220,11 @@ class Client(requests.Session):
|
||||||
|
|
||||||
def create_container(self, image, command=None, hostname=None, user=None,
|
def create_container(self, image, command=None, hostname=None, user=None,
|
||||||
detach=False, stdin_open=False, tty=False,
|
detach=False, stdin_open=False, tty=False,
|
||||||
mem_limit=0, ports=None, environment=None, dns=None,
|
mem_limit=None, ports=None, environment=None,
|
||||||
volumes=None, volumes_from=None,
|
dns=None, volumes=None, volumes_from=None,
|
||||||
network_disabled=False, name=None, entrypoint=None,
|
network_disabled=False, name=None, entrypoint=None,
|
||||||
cpu_shares=None, working_dir=None, domainname=None,
|
cpu_shares=None, working_dir=None, domainname=None,
|
||||||
memswap_limit=0, cpuset=None, host_config=None,
|
memswap_limit=None, cpuset=None, host_config=None,
|
||||||
mac_address=None, labels=None, volume_driver=None):
|
mac_address=None, labels=None, volume_driver=None):
|
||||||
|
|
||||||
if isinstance(volumes, six.string_types):
|
if isinstance(volumes, six.string_types):
|
||||||
|
|
@ -503,23 +273,12 @@ class Client(requests.Session):
|
||||||
'filters': filters
|
'filters': filters
|
||||||
}
|
}
|
||||||
|
|
||||||
return self._stream_helper(self.get(self._url('/events'),
|
return self._stream_helper(
|
||||||
params=params, stream=True),
|
self.get(self._url('/events'), params=params, stream=True),
|
||||||
decode=decode)
|
decode=decode
|
||||||
|
)
|
||||||
|
|
||||||
@check_resource
|
@check_resource
|
||||||
def execute(self, container, cmd, detach=False, stdout=True, stderr=True,
|
|
||||||
stream=False, tty=False):
|
|
||||||
warnings.warn(
|
|
||||||
'Client.execute is being deprecated. Please use exec_create & '
|
|
||||||
'exec_start instead', DeprecationWarning
|
|
||||||
)
|
|
||||||
create_res = self.exec_create(
|
|
||||||
container, cmd, stdout, stderr, tty
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.exec_start(create_res, detach, tty, stream)
|
|
||||||
|
|
||||||
def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False,
|
def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False,
|
||||||
privileged=False, user=''):
|
privileged=False, user=''):
|
||||||
if utils.compare_version('1.15', self._version) < 0:
|
if utils.compare_version('1.15', self._version) < 0:
|
||||||
|
|
@ -582,17 +341,7 @@ class Client(requests.Session):
|
||||||
|
|
||||||
res = self._post_json(self._url('/exec/{0}/start'.format(exec_id)),
|
res = self._post_json(self._url('/exec/{0}/start'.format(exec_id)),
|
||||||
data=data, stream=stream)
|
data=data, stream=stream)
|
||||||
self._raise_for_status(res)
|
return self._get_result_tty(stream, res, tty)
|
||||||
if stream:
|
|
||||||
return self._multiplexed_response_stream_helper(res)
|
|
||||||
elif six.PY3:
|
|
||||||
return bytes().join(
|
|
||||||
[x for x in self._multiplexed_buffer_helper(res)]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return str().join(
|
|
||||||
[x for x in self._multiplexed_buffer_helper(res)]
|
|
||||||
)
|
|
||||||
|
|
||||||
@check_resource
|
@check_resource
|
||||||
def export(self, container):
|
def export(self, container):
|
||||||
|
|
@ -740,7 +489,9 @@ class Client(requests.Session):
|
||||||
@check_resource
|
@check_resource
|
||||||
def inspect_image(self, image):
|
def inspect_image(self, image):
|
||||||
return self._result(
|
return self._result(
|
||||||
self._get(self._url("/images/{0}/json".format(image))),
|
self._get(
|
||||||
|
self._url("/images/{0}/json".format(image.replace('/', '%2F')))
|
||||||
|
),
|
||||||
True
|
True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -760,6 +511,12 @@ class Client(requests.Session):
|
||||||
|
|
||||||
def login(self, username, password=None, email=None, registry=None,
|
def login(self, username, password=None, email=None, registry=None,
|
||||||
reauth=False, insecure_registry=False, dockercfg_path=None):
|
reauth=False, insecure_registry=False, dockercfg_path=None):
|
||||||
|
if insecure_registry:
|
||||||
|
warnings.warn(
|
||||||
|
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
# If we don't have any auth data so far, try reloading the config file
|
# If we don't have any auth data so far, try reloading the config file
|
||||||
# one more time in case anything showed up in there.
|
# one more time in case anything showed up in there.
|
||||||
# If dockercfg_path is passed check to see if the config file exists,
|
# If dockercfg_path is passed check to see if the config file exists,
|
||||||
|
|
@ -805,16 +562,7 @@ class Client(requests.Session):
|
||||||
params['tail'] = tail
|
params['tail'] = tail
|
||||||
url = self._url("/containers/{0}/logs".format(container))
|
url = self._url("/containers/{0}/logs".format(container))
|
||||||
res = self._get(url, params=params, stream=stream)
|
res = self._get(url, params=params, stream=stream)
|
||||||
if stream:
|
return self._get_result(container, stream, res)
|
||||||
return self._multiplexed_response_stream_helper(res)
|
|
||||||
elif six.PY3:
|
|
||||||
return bytes().join(
|
|
||||||
[x for x in self._multiplexed_buffer_helper(res)]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return str().join(
|
|
||||||
[x for x in self._multiplexed_buffer_helper(res)]
|
|
||||||
)
|
|
||||||
return self.attach(
|
return self.attach(
|
||||||
container,
|
container,
|
||||||
stdout=stdout,
|
stdout=stdout,
|
||||||
|
|
@ -854,11 +602,15 @@ class Client(requests.Session):
|
||||||
|
|
||||||
def pull(self, repository, tag=None, stream=False,
|
def pull(self, repository, tag=None, stream=False,
|
||||||
insecure_registry=False, auth_config=None):
|
insecure_registry=False, auth_config=None):
|
||||||
|
if insecure_registry:
|
||||||
|
warnings.warn(
|
||||||
|
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
if not tag:
|
if not tag:
|
||||||
repository, tag = utils.parse_repository_tag(repository)
|
repository, tag = utils.parse_repository_tag(repository)
|
||||||
registry, repo_name = auth.resolve_repository_name(
|
registry, repo_name = auth.resolve_repository_name(repository)
|
||||||
repository, insecure=insecure_registry
|
|
||||||
)
|
|
||||||
if repo_name.count(":") == 1:
|
if repo_name.count(":") == 1:
|
||||||
repository, tag = repository.rsplit(":", 1)
|
repository, tag = repository.rsplit(":", 1)
|
||||||
|
|
||||||
|
|
@ -901,11 +653,15 @@ class Client(requests.Session):
|
||||||
|
|
||||||
def push(self, repository, tag=None, stream=False,
|
def push(self, repository, tag=None, stream=False,
|
||||||
insecure_registry=False):
|
insecure_registry=False):
|
||||||
|
if insecure_registry:
|
||||||
|
warnings.warn(
|
||||||
|
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
if not tag:
|
if not tag:
|
||||||
repository, tag = utils.parse_repository_tag(repository)
|
repository, tag = utils.parse_repository_tag(repository)
|
||||||
registry, repo_name = auth.resolve_repository_name(
|
registry, repo_name = auth.resolve_repository_name(repository)
|
||||||
repository, insecure=insecure_registry
|
|
||||||
)
|
|
||||||
u = self._url("/images/{0}/push".format(repository))
|
u = self._url("/images/{0}/push".format(repository))
|
||||||
params = {
|
params = {
|
||||||
'tag': tag
|
'tag': tag
|
||||||
|
|
@ -981,7 +737,7 @@ class Client(requests.Session):
|
||||||
|
|
||||||
@check_resource
|
@check_resource
|
||||||
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
|
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
|
||||||
publish_all_ports=False, links=None, privileged=False,
|
publish_all_ports=None, links=None, privileged=None,
|
||||||
dns=None, dns_search=None, volumes_from=None, network_mode=None,
|
dns=None, dns_search=None, volumes_from=None, network_mode=None,
|
||||||
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
|
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
|
||||||
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
|
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
|
||||||
|
|
@ -1023,7 +779,7 @@ class Client(requests.Session):
|
||||||
'ulimits is only supported for API version >= 1.18'
|
'ulimits is only supported for API version >= 1.18'
|
||||||
)
|
)
|
||||||
|
|
||||||
start_config = utils.create_host_config(
|
start_config_kwargs = dict(
|
||||||
binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
|
binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
|
||||||
publish_all_ports=publish_all_ports, links=links, dns=dns,
|
publish_all_ports=publish_all_ports, links=links, dns=dns,
|
||||||
privileged=privileged, dns_search=dns_search, cap_add=cap_add,
|
privileged=privileged, dns_search=dns_search, cap_add=cap_add,
|
||||||
|
|
@ -1032,16 +788,18 @@ class Client(requests.Session):
|
||||||
extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
|
extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
|
||||||
ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
|
ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
|
||||||
)
|
)
|
||||||
|
start_config = None
|
||||||
|
|
||||||
|
if any(v is not None for v in start_config_kwargs.values()):
|
||||||
|
if utils.compare_version('1.15', self._version) > 0:
|
||||||
|
warnings.warn(
|
||||||
|
'Passing host config parameters in start() is deprecated. '
|
||||||
|
'Please use host_config in create_container instead!',
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
start_config = utils.create_host_config(**start_config_kwargs)
|
||||||
|
|
||||||
url = self._url("/containers/{0}/start".format(container))
|
url = self._url("/containers/{0}/start".format(container))
|
||||||
if not start_config:
|
|
||||||
start_config = None
|
|
||||||
elif utils.compare_version('1.15', self._version) > 0:
|
|
||||||
warnings.warn(
|
|
||||||
'Passing host config parameters in start() is deprecated. '
|
|
||||||
'Please use host_config in create_container instead!',
|
|
||||||
DeprecationWarning
|
|
||||||
)
|
|
||||||
res = self._post_json(url, data=start_config)
|
res = self._post_json(url, data=start_config)
|
||||||
self._raise_for_status(res)
|
self._raise_for_status(res)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,277 @@
|
||||||
|
import json
|
||||||
|
import struct
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import requests.exceptions
|
||||||
|
import six
|
||||||
|
import websocket
|
||||||
|
|
||||||
|
|
||||||
|
from . import constants
|
||||||
|
from . import errors
|
||||||
|
from .auth import auth
|
||||||
|
from .unixconn import unixconn
|
||||||
|
from .ssladapter import ssladapter
|
||||||
|
from .utils import utils, check_resource
|
||||||
|
from .tls import TLSConfig
|
||||||
|
|
||||||
|
|
||||||
|
class ClientBase(requests.Session):
|
||||||
|
def __init__(self, base_url=None, version=None,
|
||||||
|
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
|
||||||
|
super(ClientBase, self).__init__()
|
||||||
|
|
||||||
|
if tls and not base_url.startswith('https://'):
|
||||||
|
raise errors.TLSParameterError(
|
||||||
|
'If using TLS, the base_url argument must begin with '
|
||||||
|
'"https://".')
|
||||||
|
|
||||||
|
self.base_url = base_url
|
||||||
|
self.timeout = timeout
|
||||||
|
|
||||||
|
self._auth_configs = auth.load_config()
|
||||||
|
|
||||||
|
base_url = utils.parse_host(base_url)
|
||||||
|
if base_url.startswith('http+unix://'):
|
||||||
|
self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
|
||||||
|
self.mount('http+docker://', self._custom_adapter)
|
||||||
|
self.base_url = 'http+docker://localunixsocket'
|
||||||
|
else:
|
||||||
|
# Use SSLAdapter for the ability to specify SSL version
|
||||||
|
if isinstance(tls, TLSConfig):
|
||||||
|
tls.configure_client(self)
|
||||||
|
elif tls:
|
||||||
|
self._custom_adapter = ssladapter.SSLAdapter()
|
||||||
|
self.mount('https://', self._custom_adapter)
|
||||||
|
self.base_url = base_url
|
||||||
|
|
||||||
|
# version detection needs to be after unix adapter mounting
|
||||||
|
if version is None:
|
||||||
|
self._version = constants.DEFAULT_DOCKER_API_VERSION
|
||||||
|
elif isinstance(version, six.string_types):
|
||||||
|
if version.lower() == 'auto':
|
||||||
|
self._version = self._retrieve_server_version()
|
||||||
|
else:
|
||||||
|
self._version = version
|
||||||
|
else:
|
||||||
|
raise errors.DockerException(
|
||||||
|
'Version parameter must be a string or None. Found {0}'.format(
|
||||||
|
type(version).__name__
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _retrieve_server_version(self):
|
||||||
|
try:
|
||||||
|
return self.version(api_version=False)["ApiVersion"]
|
||||||
|
except KeyError:
|
||||||
|
raise errors.DockerException(
|
||||||
|
'Invalid response from docker daemon: key "ApiVersion"'
|
||||||
|
' is missing.'
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise errors.DockerException(
|
||||||
|
'Error while fetching server API version: {0}'.format(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _set_request_timeout(self, kwargs):
|
||||||
|
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||||
|
parameter, if not already present."""
|
||||||
|
kwargs.setdefault('timeout', self.timeout)
|
||||||
|
return kwargs
|
||||||
|
|
||||||
|
def _post(self, url, **kwargs):
|
||||||
|
return self.post(url, **self._set_request_timeout(kwargs))
|
||||||
|
|
||||||
|
def _get(self, url, **kwargs):
|
||||||
|
return self.get(url, **self._set_request_timeout(kwargs))
|
||||||
|
|
||||||
|
def _delete(self, url, **kwargs):
|
||||||
|
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||||
|
|
||||||
|
def _url(self, path, versioned_api=True):
|
||||||
|
if versioned_api:
|
||||||
|
return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
|
||||||
|
else:
|
||||||
|
return '{0}{1}'.format(self.base_url, path)
|
||||||
|
|
||||||
|
def _raise_for_status(self, response, explanation=None):
|
||||||
|
"""Raises stored :class:`APIError`, if one occurred."""
|
||||||
|
try:
|
||||||
|
response.raise_for_status()
|
||||||
|
except requests.exceptions.HTTPError as e:
|
||||||
|
if e.response.status_code == 404:
|
||||||
|
raise errors.NotFound(e, response, explanation=explanation)
|
||||||
|
raise errors.APIError(e, response, explanation=explanation)
|
||||||
|
|
||||||
|
def _result(self, response, json=False, binary=False):
|
||||||
|
assert not (json and binary)
|
||||||
|
self._raise_for_status(response)
|
||||||
|
|
||||||
|
if json:
|
||||||
|
return response.json()
|
||||||
|
if binary:
|
||||||
|
return response.content
|
||||||
|
return response.text
|
||||||
|
|
||||||
|
def _post_json(self, url, data, **kwargs):
|
||||||
|
# Go <1.1 can't unserialize null to a string
|
||||||
|
# so we do this disgusting thing here.
|
||||||
|
data2 = {}
|
||||||
|
if data is not None:
|
||||||
|
for k, v in six.iteritems(data):
|
||||||
|
if v is not None:
|
||||||
|
data2[k] = v
|
||||||
|
|
||||||
|
if 'headers' not in kwargs:
|
||||||
|
kwargs['headers'] = {}
|
||||||
|
kwargs['headers']['Content-Type'] = 'application/json'
|
||||||
|
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||||
|
|
||||||
|
def _attach_params(self, override=None):
|
||||||
|
return override or {
|
||||||
|
'stdout': 1,
|
||||||
|
'stderr': 1,
|
||||||
|
'stream': 1
|
||||||
|
}
|
||||||
|
|
||||||
|
@check_resource
|
||||||
|
def _attach_websocket(self, container, params=None):
|
||||||
|
url = self._url("/containers/{0}/attach/ws".format(container))
|
||||||
|
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||||
|
full_url = req.prepare().url
|
||||||
|
full_url = full_url.replace("http://", "ws://", 1)
|
||||||
|
full_url = full_url.replace("https://", "wss://", 1)
|
||||||
|
return self._create_websocket_connection(full_url)
|
||||||
|
|
||||||
|
def _create_websocket_connection(self, url):
|
||||||
|
return websocket.create_connection(url)
|
||||||
|
|
||||||
|
def _get_raw_response_socket(self, response):
|
||||||
|
self._raise_for_status(response)
|
||||||
|
if six.PY3:
|
||||||
|
sock = response.raw._fp.fp.raw
|
||||||
|
else:
|
||||||
|
sock = response.raw._fp.fp._sock
|
||||||
|
try:
|
||||||
|
# Keep a reference to the response to stop it being garbage
|
||||||
|
# collected. If the response is garbage collected, it will
|
||||||
|
# close TLS sockets.
|
||||||
|
sock._response = response
|
||||||
|
except AttributeError:
|
||||||
|
# UNIX sockets can't have attributes set on them, but that's
|
||||||
|
# fine because we won't be doing TLS over them
|
||||||
|
pass
|
||||||
|
|
||||||
|
return sock
|
||||||
|
|
||||||
|
def _stream_helper(self, response, decode=False):
|
||||||
|
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||||
|
if response.raw._fp.chunked:
|
||||||
|
reader = response.raw
|
||||||
|
while not reader.closed:
|
||||||
|
# this read call will block until we get a chunk
|
||||||
|
data = reader.read(1)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
if reader._fp.chunk_left:
|
||||||
|
data += reader.read(reader._fp.chunk_left)
|
||||||
|
if decode:
|
||||||
|
if six.PY3:
|
||||||
|
data = data.decode('utf-8')
|
||||||
|
data = json.loads(data)
|
||||||
|
yield data
|
||||||
|
else:
|
||||||
|
# Response isn't chunked, meaning we probably
|
||||||
|
# encountered an error immediately
|
||||||
|
yield self._result(response)
|
||||||
|
|
||||||
|
def _multiplexed_buffer_helper(self, response):
|
||||||
|
"""A generator of multiplexed data blocks read from a buffered
|
||||||
|
response."""
|
||||||
|
buf = self._result(response, binary=True)
|
||||||
|
walker = 0
|
||||||
|
while True:
|
||||||
|
if len(buf[walker:]) < 8:
|
||||||
|
break
|
||||||
|
_, length = struct.unpack_from('>BxxxL', buf[walker:])
|
||||||
|
start = walker + constants.STREAM_HEADER_SIZE_BYTES
|
||||||
|
end = start + length
|
||||||
|
walker = end
|
||||||
|
yield buf[start:end]
|
||||||
|
|
||||||
|
def _multiplexed_response_stream_helper(self, response):
|
||||||
|
"""A generator of multiplexed data blocks coming from a response
|
||||||
|
stream."""
|
||||||
|
|
||||||
|
# Disable timeout on the underlying socket to prevent
|
||||||
|
# Read timed out(s) for long running processes
|
||||||
|
socket = self._get_raw_response_socket(response)
|
||||||
|
if six.PY3:
|
||||||
|
socket._sock.settimeout(None)
|
||||||
|
else:
|
||||||
|
socket.settimeout(None)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
|
||||||
|
if not header:
|
||||||
|
break
|
||||||
|
_, length = struct.unpack('>BxxxL', header)
|
||||||
|
if not length:
|
||||||
|
continue
|
||||||
|
data = response.raw.read(length)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
yield data
|
||||||
|
|
||||||
|
def _stream_raw_result_old(self, response):
|
||||||
|
''' Stream raw output for API versions below 1.6 '''
|
||||||
|
self._raise_for_status(response)
|
||||||
|
for line in response.iter_lines(chunk_size=1,
|
||||||
|
decode_unicode=True):
|
||||||
|
# filter out keep-alive new lines
|
||||||
|
if line:
|
||||||
|
yield line
|
||||||
|
|
||||||
|
def _stream_raw_result(self, response):
|
||||||
|
''' Stream result for TTY-enabled container above API 1.6 '''
|
||||||
|
self._raise_for_status(response)
|
||||||
|
for out in response.iter_content(chunk_size=1, decode_unicode=True):
|
||||||
|
yield out
|
||||||
|
|
||||||
|
def _get_result(self, container, stream, res):
|
||||||
|
cont = self.inspect_container(container)
|
||||||
|
return self._get_result_tty(stream, res, cont['Config']['Tty'])
|
||||||
|
|
||||||
|
def _get_result_tty(self, stream, res, is_tty):
|
||||||
|
# Stream multi-plexing was only introduced in API v1.6. Anything
|
||||||
|
# before that needs old-style streaming.
|
||||||
|
if utils.compare_version('1.6', self._version) < 0:
|
||||||
|
return self._stream_raw_result_old(res)
|
||||||
|
|
||||||
|
# We should also use raw streaming (without keep-alives)
|
||||||
|
# if we're dealing with a tty-enabled container.
|
||||||
|
if is_tty:
|
||||||
|
return self._stream_raw_result(res) if stream else \
|
||||||
|
self._result(res, binary=True)
|
||||||
|
|
||||||
|
self._raise_for_status(res)
|
||||||
|
sep = six.binary_type()
|
||||||
|
if stream:
|
||||||
|
return self._multiplexed_response_stream_helper(res)
|
||||||
|
else:
|
||||||
|
return sep.join(
|
||||||
|
[x for x in self._multiplexed_buffer_helper(res)]
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_adapter(self, url):
|
||||||
|
try:
|
||||||
|
return super(ClientBase, self).get_adapter(url)
|
||||||
|
except requests.exceptions.InvalidSchema as e:
|
||||||
|
if self._custom_adapter:
|
||||||
|
return self._custom_adapter
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
@property
|
||||||
|
def api_version(self):
|
||||||
|
return self._version
|
||||||
|
|
@ -4,3 +4,7 @@ STREAM_HEADER_SIZE_BYTES = 8
|
||||||
CONTAINER_LIMITS_KEYS = [
|
CONTAINER_LIMITS_KEYS = [
|
||||||
'memory', 'memswap', 'cpushares', 'cpusetcpus'
|
'memory', 'memswap', 'cpushares', 'cpusetcpus'
|
||||||
]
|
]
|
||||||
|
|
||||||
|
INSECURE_REGISTRY_DEPRECATION_WARNING = \
|
||||||
|
'The `insecure_registry` argument to {} ' \
|
||||||
|
'is deprecated and non-functional. Please remove it.'
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,10 @@ class DockerException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NotFound(APIError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class InvalidVersion(DockerException):
|
class InvalidVersion(DockerException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ from .utils import (
|
||||||
compare_version, convert_port_bindings, convert_volume_binds,
|
compare_version, convert_port_bindings, convert_volume_binds,
|
||||||
mkbuildcontext, tar, parse_repository_tag, parse_host,
|
mkbuildcontext, tar, parse_repository_tag, parse_host,
|
||||||
kwargs_from_env, convert_filters, create_host_config,
|
kwargs_from_env, convert_filters, create_host_config,
|
||||||
create_container_config, parse_bytes, ping_registry
|
create_container_config, parse_bytes, ping_registry, parse_env_file
|
||||||
) # flake8: noqa
|
) # flake8: noqa
|
||||||
|
|
||||||
from .types import Ulimit, LogConfig # flake8: noqa
|
from .types import Ulimit, LogConfig # flake8: noqa
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,10 @@ class LogConfigTypesEnum(object):
|
||||||
_values = (
|
_values = (
|
||||||
'json-file',
|
'json-file',
|
||||||
'syslog',
|
'syslog',
|
||||||
|
'journald',
|
||||||
'none'
|
'none'
|
||||||
)
|
)
|
||||||
JSON, SYSLOG, NONE = _values
|
JSON, SYSLOG, JOURNALD, NONE = _values
|
||||||
|
|
||||||
|
|
||||||
class DictType(dict):
|
class DictType(dict):
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import json
|
||||||
import shlex
|
import shlex
|
||||||
import tarfile
|
import tarfile
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import warnings
|
||||||
from distutils.version import StrictVersion
|
from distutils.version import StrictVersion
|
||||||
from fnmatch import fnmatch
|
from fnmatch import fnmatch
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
@ -120,6 +121,11 @@ def compare_version(v1, v2):
|
||||||
|
|
||||||
|
|
||||||
def ping_registry(url):
|
def ping_registry(url):
|
||||||
|
warnings.warn(
|
||||||
|
'The `ping_registry` method is deprecated and will be removed.',
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
|
return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -333,9 +339,9 @@ def convert_filters(filters):
|
||||||
return json.dumps(result)
|
return json.dumps(result)
|
||||||
|
|
||||||
|
|
||||||
def datetime_to_timestamp(dt=datetime.now()):
|
def datetime_to_timestamp(dt):
|
||||||
"""Convert a datetime in local timezone to a unix timestamp"""
|
"""Convert a UTC datetime to a Unix timestamp"""
|
||||||
delta = dt - datetime.fromtimestamp(0)
|
delta = dt - datetime.utcfromtimestamp(0)
|
||||||
return delta.seconds + delta.days * 24 * 3600
|
return delta.seconds + delta.days * 24 * 3600
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -383,10 +389,21 @@ def create_host_config(
|
||||||
dns=None, dns_search=None, volumes_from=None, network_mode=None,
|
dns=None, dns_search=None, volumes_from=None, network_mode=None,
|
||||||
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
|
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
|
||||||
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
|
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
|
||||||
security_opt=None, ulimits=None, log_config=None
|
security_opt=None, ulimits=None, log_config=None, mem_limit=None,
|
||||||
|
memswap_limit=None
|
||||||
):
|
):
|
||||||
host_config = {}
|
host_config = {}
|
||||||
|
|
||||||
|
if mem_limit is not None:
|
||||||
|
if isinstance(mem_limit, six.string_types):
|
||||||
|
mem_limit = parse_bytes(mem_limit)
|
||||||
|
host_config['Memory'] = mem_limit
|
||||||
|
|
||||||
|
if memswap_limit is not None:
|
||||||
|
if isinstance(memswap_limit, six.string_types):
|
||||||
|
memswap_limit = parse_bytes(memswap_limit)
|
||||||
|
host_config['MemorySwap'] = memswap_limit
|
||||||
|
|
||||||
if pid_mode not in (None, 'host'):
|
if pid_mode not in (None, 'host'):
|
||||||
raise errors.DockerException(
|
raise errors.DockerException(
|
||||||
'Invalid value for pid param: {0}'.format(pid_mode)
|
'Invalid value for pid param: {0}'.format(pid_mode)
|
||||||
|
|
@ -411,6 +428,8 @@ def create_host_config(
|
||||||
|
|
||||||
if network_mode:
|
if network_mode:
|
||||||
host_config['NetworkMode'] = network_mode
|
host_config['NetworkMode'] = network_mode
|
||||||
|
elif network_mode is None:
|
||||||
|
host_config['NetworkMode'] = 'default'
|
||||||
|
|
||||||
if restart_policy:
|
if restart_policy:
|
||||||
host_config['RestartPolicy'] = restart_policy
|
host_config['RestartPolicy'] = restart_policy
|
||||||
|
|
@ -501,16 +520,42 @@ def create_host_config(
|
||||||
return host_config
|
return host_config
|
||||||
|
|
||||||
|
|
||||||
|
def parse_env_file(env_file):
|
||||||
|
"""
|
||||||
|
Reads a line-separated environment file.
|
||||||
|
The format of each line should be "key=value".
|
||||||
|
"""
|
||||||
|
environment = {}
|
||||||
|
|
||||||
|
with open(env_file, 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
|
||||||
|
if line[0] == '#':
|
||||||
|
continue
|
||||||
|
|
||||||
|
parse_line = line.strip().split('=')
|
||||||
|
if len(parse_line) == 2:
|
||||||
|
k, v = parse_line
|
||||||
|
environment[k] = v
|
||||||
|
else:
|
||||||
|
raise errors.DockerException(
|
||||||
|
'Invalid line in environment file {0}:\n{1}'.format(
|
||||||
|
env_file, line))
|
||||||
|
|
||||||
|
return environment
|
||||||
|
|
||||||
|
|
||||||
def create_container_config(
|
def create_container_config(
|
||||||
version, image, command, hostname=None, user=None, detach=False,
|
version, image, command, hostname=None, user=None, detach=False,
|
||||||
stdin_open=False, tty=False, mem_limit=0, ports=None, environment=None,
|
stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
|
||||||
dns=None, volumes=None, volumes_from=None, network_disabled=False,
|
dns=None, volumes=None, volumes_from=None, network_disabled=False,
|
||||||
entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
|
entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
|
||||||
memswap_limit=0, cpuset=None, host_config=None, mac_address=None,
|
memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
|
||||||
labels=None, volume_driver=None
|
labels=None, volume_driver=None
|
||||||
):
|
):
|
||||||
if isinstance(command, six.string_types):
|
if isinstance(command, six.string_types):
|
||||||
command = shlex.split(str(command))
|
command = shlex.split(str(command))
|
||||||
|
|
||||||
if isinstance(environment, dict):
|
if isinstance(environment, dict):
|
||||||
environment = [
|
environment = [
|
||||||
six.text_type('{0}={1}').format(k, v)
|
six.text_type('{0}={1}').format(k, v)
|
||||||
|
|
@ -522,10 +567,24 @@ def create_container_config(
|
||||||
'labels were only introduced in API version 1.18'
|
'labels were only introduced in API version 1.18'
|
||||||
)
|
)
|
||||||
|
|
||||||
if volume_driver is not None and compare_version('1.19', version) < 0:
|
if compare_version('1.19', version) < 0:
|
||||||
raise errors.InvalidVersion(
|
if volume_driver is not None:
|
||||||
'Volume drivers were only introduced in API version 1.19'
|
raise errors.InvalidVersion(
|
||||||
)
|
'Volume drivers were only introduced in API version 1.19'
|
||||||
|
)
|
||||||
|
mem_limit = mem_limit if mem_limit is not None else 0
|
||||||
|
memswap_limit = memswap_limit if memswap_limit is not None else 0
|
||||||
|
else:
|
||||||
|
if mem_limit is not None:
|
||||||
|
raise errors.InvalidVersion(
|
||||||
|
'mem_limit has been moved to host_config in API version 1.19'
|
||||||
|
)
|
||||||
|
|
||||||
|
if memswap_limit is not None:
|
||||||
|
raise errors.InvalidVersion(
|
||||||
|
'memswap_limit has been moved to host_config in API '
|
||||||
|
'version 1.19'
|
||||||
|
)
|
||||||
|
|
||||||
if isinstance(labels, list):
|
if isinstance(labels, list):
|
||||||
labels = dict((lbl, six.text_type('')) for lbl in labels)
|
labels = dict((lbl, six.text_type('')) for lbl in labels)
|
||||||
|
|
|
||||||
|
|
@ -1,2 +1,2 @@
|
||||||
version = "1.3.0-dev"
|
version = "1.4.0-dev"
|
||||||
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
||||||
|
|
|
||||||
61
docs/api.md
61
docs/api.md
|
|
@ -30,7 +30,7 @@ the entire backlog.
|
||||||
* container (str): The container to attach to
|
* container (str): The container to attach to
|
||||||
* stdout (bool): Get STDOUT
|
* stdout (bool): Get STDOUT
|
||||||
* stderr (bool): Get STDERR
|
* stderr (bool): Get STDERR
|
||||||
* stream (bool): Return an interator
|
* stream (bool): Return an iterator
|
||||||
* logs (bool): Get all previous output
|
* logs (bool): Get all previous output
|
||||||
|
|
||||||
**Returns** (generator or str): The logs or output for the image
|
**Returns** (generator or str): The logs or output for the image
|
||||||
|
|
@ -70,7 +70,7 @@ correct value (e.g `gzip`).
|
||||||
- memory (int): set memory limit for build
|
- memory (int): set memory limit for build
|
||||||
- memswap (int): Total memory (memory + swap), -1 to disable swap
|
- memswap (int): Total memory (memory + swap), -1 to disable swap
|
||||||
- cpushares (int): CPU shares (relative weight)
|
- cpushares (int): CPU shares (relative weight)
|
||||||
- cpusetcpus (str): CPUs in which to allow exection, e.g., `"0-3"`, `"0,1"`
|
- cpusetcpus (str): CPUs in which to allow execution, e.g., `"0-3"`, `"0,1"`
|
||||||
* decode (bool): If set to `True`, the returned stream will be decoded into
|
* decode (bool): If set to `True`, the returned stream will be decoded into
|
||||||
dicts on the fly. Default `False`.
|
dicts on the fly. Default `False`.
|
||||||
|
|
||||||
|
|
@ -123,7 +123,7 @@ Identical to the `docker commit` command.
|
||||||
* tag (str): The tag to push
|
* tag (str): The tag to push
|
||||||
* message (str): A commit message
|
* message (str): A commit message
|
||||||
* author (str): The name of the author
|
* author (str): The name of the author
|
||||||
* conf (dict): The configuraton for the container. See the [Docker remote api](
|
* conf (dict): The configuration for the container. See the [Docker remote api](
|
||||||
https://docs.docker.com/reference/api/docker_remote_api/) for full details.
|
https://docs.docker.com/reference/api/docker_remote_api/) for full details.
|
||||||
|
|
||||||
## containers
|
## containers
|
||||||
|
|
@ -184,7 +184,7 @@ information on how to create port bindings and volume mappings.
|
||||||
|
|
||||||
The `mem_limit` variable accepts float values (which represent the memory limit
|
The `mem_limit` variable accepts float values (which represent the memory limit
|
||||||
of the created container in bytes) or a string with a units identification char
|
of the created container in bytes) or a string with a units identification char
|
||||||
('100000b', 1000k', 128m', '1g'). If a string is specified without a units
|
('100000b', '1000k', '128m', '1g'). If a string is specified without a units
|
||||||
character, bytes are assumed as an intended unit.
|
character, bytes are assumed as an intended unit.
|
||||||
|
|
||||||
`volumes_from` and `dns` arguments raise [TypeError](
|
`volumes_from` and `dns` arguments raise [TypeError](
|
||||||
|
|
@ -234,6 +234,27 @@ from. Optionally a single string joining container id's with commas
|
||||||
'Warnings': None}
|
'Warnings': None}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### parse_env_file
|
||||||
|
|
||||||
|
A utility for parsing an environment file.
|
||||||
|
|
||||||
|
The expected format of the file is as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
USERNAME=jdoe
|
||||||
|
PASSWORD=secret
|
||||||
|
```
|
||||||
|
|
||||||
|
The utility can be used as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>> import docker.utils
|
||||||
|
>> my_envs = docker.utils.parse_env_file('/path/to/file')
|
||||||
|
>> docker.utils.create_container_config('1.18', '_mongodb', 'foobar', environment=my_envs)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now use this with 'environment' for `create_container`.
|
||||||
|
|
||||||
## diff
|
## diff
|
||||||
|
|
||||||
Inspect changes on a container's filesystem
|
Inspect changes on a container's filesystem
|
||||||
|
|
@ -251,8 +272,8 @@ function return a blocking generator you can iterate over to retrieve events as
|
||||||
|
|
||||||
**Params**:
|
**Params**:
|
||||||
|
|
||||||
* since (datetime or int): get events from this point
|
* since (UTC datetime or int): get events from this point
|
||||||
* until (datetime or int): get events until this point
|
* until (UTC datetime or int): get events until this point
|
||||||
* filters (dict): filter the events by event time, container or image
|
* filters (dict): filter the events by event time, container or image
|
||||||
* decode (bool): If set to true, stream will be decoded into dicts on the
|
* decode (bool): If set to true, stream will be decoded into dicts on the
|
||||||
fly. False by default.
|
fly. False by default.
|
||||||
|
|
@ -398,7 +419,7 @@ src will be treated as a URL instead to fetch the image from. You can also pass
|
||||||
an open file handle as 'src', in which case the data will be read from that
|
an open file handle as 'src', in which case the data will be read from that
|
||||||
file.
|
file.
|
||||||
|
|
||||||
If `src` is unset but `image` is set, the `image` paramater will be taken as
|
If `src` is unset but `image` is set, the `image` parameter will be taken as
|
||||||
the name of an existing image to import from.
|
the name of an existing image to import from.
|
||||||
|
|
||||||
**Params**:
|
**Params**:
|
||||||
|
|
@ -512,7 +533,16 @@ Kill a container or send a signal to a container
|
||||||
**Params**:
|
**Params**:
|
||||||
|
|
||||||
* container (str): The container to kill
|
* container (str): The container to kill
|
||||||
* signal (str or int): The singal to send. Defaults to `SIGKILL`
|
* signal (str or int): The signal to send. Defaults to `SIGKILL`
|
||||||
|
|
||||||
|
## load_image
|
||||||
|
|
||||||
|
Load an image that was previously saved using `Client.get_image`
|
||||||
|
(or `docker save`). Similar to `docker load`.
|
||||||
|
|
||||||
|
**Params**:
|
||||||
|
|
||||||
|
* data (binary): Image data to be loaded
|
||||||
|
|
||||||
## login
|
## login
|
||||||
|
|
||||||
|
|
@ -718,6 +748,10 @@ Identical to the `docker search` command.
|
||||||
Similar to the `docker start` command, but doesn't support attach options. Use
|
Similar to the `docker start` command, but doesn't support attach options. Use
|
||||||
`.logs()` to recover `stdout`/`stderr`.
|
`.logs()` to recover `stdout`/`stderr`.
|
||||||
|
|
||||||
|
**Params**:
|
||||||
|
|
||||||
|
* container (str): The container to start
|
||||||
|
|
||||||
**Deprecation warning:** For API version > 1.15, it is highly recommended to
|
**Deprecation warning:** For API version > 1.15, it is highly recommended to
|
||||||
provide host config options in the
|
provide host config options in the
|
||||||
[`host_config` parameter of `create_container`](#create_container)
|
[`host_config` parameter of `create_container`](#create_container)
|
||||||
|
|
@ -740,7 +774,7 @@ This will stream statistics for a specific container.
|
||||||
|
|
||||||
**Params**:
|
**Params**:
|
||||||
|
|
||||||
* container (str): The container to start
|
* container (str): The container to stream statistics for
|
||||||
* decode (bool): If set to true, stream will be decoded into dicts on the
|
* decode (bool): If set to true, stream will be decoded into dicts on the
|
||||||
fly. False by default.
|
fly. False by default.
|
||||||
|
|
||||||
|
|
@ -829,10 +863,13 @@ Nearly identical to the `docker version` command.
|
||||||
|
|
||||||
## wait
|
## wait
|
||||||
Identical to the `docker wait` command. Block until a container stops, then
|
Identical to the `docker wait` command. Block until a container stops, then
|
||||||
print its exit code. Returns the value `-1` if no `StatusCode` is returned by
|
return its exit code. Returns the value `-1` if the API responds without a
|
||||||
the API.
|
`StatusCode` attribute.
|
||||||
|
|
||||||
If `container` a dict, the `Id` key is used.
|
If `container` is a dict, the `Id` key is used.
|
||||||
|
|
||||||
|
If the timeout value is exceeded, a `requests.exceptions.ReadTimeout`
|
||||||
|
exception will be raised.
|
||||||
|
|
||||||
**Params**:
|
**Params**:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,77 @@
|
||||||
Change Log
|
Change Log
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
1.3.1
|
||||||
|
-----
|
||||||
|
|
||||||
|
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.1+is%3Aclosed)
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
* Fixed a bug where empty chunks in streams was misinterpreted as EOF.
|
||||||
|
* `datetime` arguments passed to `Client.events` parameters `since` and
|
||||||
|
`until` are now always considered to be UTC.
|
||||||
|
* Fixed a bug with Docker 1.7.x where the wrong auth headers were being passed
|
||||||
|
in `Client.build`, failing builds that depended on private images.
|
||||||
|
* `Client.exec_create` can now retrieve the `Id` key from a dictionary for its
|
||||||
|
container param.
|
||||||
|
|
||||||
|
### Miscellaneous
|
||||||
|
|
||||||
|
* 404 API status now raises `docker.errors.NotFound`. This exception inherits
|
||||||
|
`APIError` which was used previously.
|
||||||
|
* Docs fixes
|
||||||
|
* Test fixes
|
||||||
|
|
||||||
|
1.3.0
|
||||||
|
-----
|
||||||
|
|
||||||
|
[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.0+is%3Aclosed)
|
||||||
|
|
||||||
|
### Deprecation warning
|
||||||
|
|
||||||
|
* As announced in the 1.2.0 release, `Client.execute` has been removed in favor of
|
||||||
|
`Client.exec_create` and `Client.exec_start`.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* `extra_hosts` parameter in host config can now also be provided as a list.
|
||||||
|
* Added support for `memory_limit` and `memswap_limit` in host config to
|
||||||
|
comply with recent deprecations.
|
||||||
|
* Added support for `volume_driver` in `Client.create_container`
|
||||||
|
* Added support for advanced modes in volume binds (using the `mode` key)
|
||||||
|
* Added support for `decode` in `Client.build` (decodes JSON stream on the fly)
|
||||||
|
* docker-py will now look for login configuration under the new config path,
|
||||||
|
and fall back to the old `~/.dockercfg` path if not present.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
* Configuration file lookup now also work on platforms that don't define a
|
||||||
|
`$HOME` environment variable.
|
||||||
|
* Fixed an issue where pinging a v2 private registry wasn't working properly,
|
||||||
|
preventing users from pushing and pulling.
|
||||||
|
* `pull` parameter in `Client.build` now defaults to `False`. Fixes a bug where
|
||||||
|
the default options would try to force a pull of non-remote images.
|
||||||
|
* Fixed a bug where getting logs from tty-enabled containers wasn't working
|
||||||
|
properly with more recent versions of Docker
|
||||||
|
* `Client.push` and `Client.pull` will now raise exceptions if the HTTP
|
||||||
|
status indicates an error.
|
||||||
|
* Fixed a bug with adapter lookup when using the Unix socket adapter
|
||||||
|
(this affected some weird edge cases, see issue #647 for details)
|
||||||
|
* Fixed a bug where providing `timeout=None` to `Client.stop` would result
|
||||||
|
in an exception despite the usecase being valid.
|
||||||
|
* Added `git@` to the list of valid prefixes for remote build paths.
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
|
||||||
|
* The websocket-client dependency has been updated to a more recent version.
|
||||||
|
This new version also supports Python 3.x, making `attach_socket` available
|
||||||
|
on those versions as well.
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
* Various fixes
|
||||||
|
|
||||||
1.2.3
|
1.2.3
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -91,6 +91,8 @@ for example:
|
||||||
* ulimits (list): A list of dicts or `docker.utils.Ulimit` objects. A list
|
* ulimits (list): A list of dicts or `docker.utils.Ulimit` objects. A list
|
||||||
of ulimits to be set in the container.
|
of ulimits to be set in the container.
|
||||||
* log_config (`docker.utils.LogConfig` or dict): Logging configuration to container
|
* log_config (`docker.utils.LogConfig` or dict): Logging configuration to container
|
||||||
|
* mem_limit (str or num): Maximum amount of memory container is allowed to consume. (e.g. `'1g'`)
|
||||||
|
* memswap_limit (str or num): Maximum amount of memory + swap a container is allowed to consume.
|
||||||
|
|
||||||
**Returns** (dict) HostConfig dictionary
|
**Returns** (dict) HostConfig dictionary
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
requests==2.5.3
|
requests==2.5.3
|
||||||
six>=1.3.0
|
six>=1.4.0
|
||||||
websocket-client==0.32.0
|
websocket-client==0.32.0
|
||||||
|
|
|
||||||
6
setup.py
6
setup.py
|
|
@ -8,12 +8,10 @@ SOURCE_DIR = os.path.join(ROOT_DIR)
|
||||||
|
|
||||||
requirements = [
|
requirements = [
|
||||||
'requests >= 2.5.2',
|
'requests >= 2.5.2',
|
||||||
'six >= 1.3.0',
|
'six >= 1.4.0',
|
||||||
|
'websocket-client >= 0.32.0',
|
||||||
]
|
]
|
||||||
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
requirements.append('websocket-client >= 0.32.0')
|
|
||||||
|
|
||||||
exec(open('docker/version.py').read())
|
exec(open('docker/version.py').read())
|
||||||
|
|
||||||
with open('./test-requirements.txt') as test_reqs_txt:
|
with open('./test-requirements.txt') as test_reqs_txt:
|
||||||
|
|
|
||||||
|
|
@ -129,11 +129,11 @@ def post_fake_create_container():
|
||||||
return status_code, response
|
return status_code, response
|
||||||
|
|
||||||
|
|
||||||
def get_fake_inspect_container():
|
def get_fake_inspect_container(tty=False):
|
||||||
status_code = 200
|
status_code = 200
|
||||||
response = {
|
response = {
|
||||||
'Id': FAKE_CONTAINER_ID,
|
'Id': FAKE_CONTAINER_ID,
|
||||||
'Config': {'Privileged': True},
|
'Config': {'Privileged': True, 'Tty': tty},
|
||||||
'ID': FAKE_CONTAINER_ID,
|
'ID': FAKE_CONTAINER_ID,
|
||||||
'Image': 'busybox:latest',
|
'Image': 'busybox:latest',
|
||||||
"State": {
|
"State": {
|
||||||
|
|
|
||||||
|
|
@ -181,7 +181,9 @@ class TestCreateContainerWithBinds(BaseTestCase):
|
||||||
container = self.client.create_container(
|
container = self.client.create_container(
|
||||||
'busybox',
|
'busybox',
|
||||||
['ls', mount_dest], volumes={mount_dest: {}},
|
['ls', mount_dest], volumes={mount_dest: {}},
|
||||||
host_config=create_host_config(binds=binds)
|
host_config=create_host_config(
|
||||||
|
binds=binds, network_mode='none'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
container_id = container['Id']
|
container_id = container['Id']
|
||||||
self.client.start(container_id)
|
self.client.start(container_id)
|
||||||
|
|
@ -221,7 +223,9 @@ class TestCreateContainerWithRoBinds(BaseTestCase):
|
||||||
container = self.client.create_container(
|
container = self.client.create_container(
|
||||||
'busybox',
|
'busybox',
|
||||||
['ls', mount_dest], volumes={mount_dest: {}},
|
['ls', mount_dest], volumes={mount_dest: {}},
|
||||||
host_config=create_host_config(binds=binds)
|
host_config=create_host_config(
|
||||||
|
binds=binds, network_mode='none'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
container_id = container['Id']
|
container_id = container['Id']
|
||||||
self.client.start(container_id)
|
self.client.start(container_id)
|
||||||
|
|
@ -242,6 +246,7 @@ class TestCreateContainerWithRoBinds(BaseTestCase):
|
||||||
self.assertFalse(inspect_data['VolumesRW'][mount_dest])
|
self.assertFalse(inspect_data['VolumesRW'][mount_dest])
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skipIf(NOT_ON_HOST, 'Tests running inside a container; no syslog')
|
||||||
class TestCreateContainerWithLogConfig(BaseTestCase):
|
class TestCreateContainerWithLogConfig(BaseTestCase):
|
||||||
def runTest(self):
|
def runTest(self):
|
||||||
config = docker.utils.LogConfig(
|
config = docker.utils.LogConfig(
|
||||||
|
|
@ -272,7 +277,9 @@ class TestCreateContainerReadOnlyFs(BaseTestCase):
|
||||||
def runTest(self):
|
def runTest(self):
|
||||||
ctnr = self.client.create_container(
|
ctnr = self.client.create_container(
|
||||||
'busybox', ['mkdir', '/shrine'],
|
'busybox', ['mkdir', '/shrine'],
|
||||||
host_config=create_host_config(read_only=True)
|
host_config=create_host_config(
|
||||||
|
read_only=True, network_mode='none'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
self.assertIn('Id', ctnr)
|
self.assertIn('Id', ctnr)
|
||||||
self.tmp_containers.append(ctnr['Id'])
|
self.tmp_containers.append(ctnr['Id'])
|
||||||
|
|
@ -346,7 +353,9 @@ class TestStartContainerWithDictInsteadOfId(BaseTestCase):
|
||||||
class TestCreateContainerPrivileged(BaseTestCase):
|
class TestCreateContainerPrivileged(BaseTestCase):
|
||||||
def runTest(self):
|
def runTest(self):
|
||||||
res = self.client.create_container(
|
res = self.client.create_container(
|
||||||
'busybox', 'true', host_config=create_host_config(privileged=True)
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
privileged=True, network_mode='none'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
self.assertIn('Id', res)
|
self.assertIn('Id', res)
|
||||||
self.tmp_containers.append(res['Id'])
|
self.tmp_containers.append(res['Id'])
|
||||||
|
|
@ -590,7 +599,9 @@ class TestPort(BaseTestCase):
|
||||||
|
|
||||||
container = self.client.create_container(
|
container = self.client.create_container(
|
||||||
'busybox', ['sleep', '60'], ports=list(port_bindings.keys()),
|
'busybox', ['sleep', '60'], ports=list(port_bindings.keys()),
|
||||||
host_config=create_host_config(port_bindings=port_bindings)
|
host_config=create_host_config(
|
||||||
|
port_bindings=port_bindings, network_mode='bridge'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
id = container['Id']
|
id = container['Id']
|
||||||
|
|
||||||
|
|
@ -716,7 +727,9 @@ class TestCreateContainerWithVolumesFrom(BaseTestCase):
|
||||||
)
|
)
|
||||||
res2 = self.client.create_container(
|
res2 = self.client.create_container(
|
||||||
'busybox', 'cat', detach=True, stdin_open=True,
|
'busybox', 'cat', detach=True, stdin_open=True,
|
||||||
host_config=create_host_config(volumes_from=vol_names)
|
host_config=create_host_config(
|
||||||
|
volumes_from=vol_names, network_mode='none'
|
||||||
|
)
|
||||||
)
|
)
|
||||||
container3_id = res2['Id']
|
container3_id = res2['Id']
|
||||||
self.tmp_containers.append(container3_id)
|
self.tmp_containers.append(container3_id)
|
||||||
|
|
@ -759,7 +772,8 @@ class TestCreateContainerWithLinks(BaseTestCase):
|
||||||
|
|
||||||
res2 = self.client.create_container(
|
res2 = self.client.create_container(
|
||||||
'busybox', 'env', host_config=create_host_config(
|
'busybox', 'env', host_config=create_host_config(
|
||||||
links={link_path1: link_alias1, link_path2: link_alias2}
|
links={link_path1: link_alias1, link_path2: link_alias2},
|
||||||
|
network_mode='none'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
container3_id = res2['Id']
|
container3_id = res2['Id']
|
||||||
|
|
@ -780,7 +794,8 @@ class TestRestartingContainer(BaseTestCase):
|
||||||
def runTest(self):
|
def runTest(self):
|
||||||
container = self.client.create_container(
|
container = self.client.create_container(
|
||||||
'busybox', ['sleep', '2'], host_config=create_host_config(
|
'busybox', ['sleep', '2'], host_config=create_host_config(
|
||||||
restart_policy={"Name": "always", "MaximumRetryCount": 0}
|
restart_policy={"Name": "always", "MaximumRetryCount": 0},
|
||||||
|
network_mode='none'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
id = container['Id']
|
id = container['Id']
|
||||||
|
|
@ -906,8 +921,8 @@ class TestRunContainerStreaming(BaseTestCase):
|
||||||
id = container['Id']
|
id = container['Id']
|
||||||
self.client.start(id)
|
self.client.start(id)
|
||||||
self.tmp_containers.append(id)
|
self.tmp_containers.append(id)
|
||||||
socket = self.client.attach_socket(container, ws=False)
|
sock = self.client.attach_socket(container, ws=False)
|
||||||
self.assertTrue(socket.fileno() > -1)
|
self.assertTrue(sock.fileno() > -1)
|
||||||
|
|
||||||
|
|
||||||
class TestPauseUnpauseContainer(BaseTestCase):
|
class TestPauseUnpauseContainer(BaseTestCase):
|
||||||
|
|
@ -943,7 +958,7 @@ class TestCreateContainerWithHostPidMode(BaseTestCase):
|
||||||
def runTest(self):
|
def runTest(self):
|
||||||
ctnr = self.client.create_container(
|
ctnr = self.client.create_container(
|
||||||
'busybox', 'true', host_config=create_host_config(
|
'busybox', 'true', host_config=create_host_config(
|
||||||
pid_mode='host'
|
pid_mode='host', network_mode='none'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self.assertIn('Id', ctnr)
|
self.assertIn('Id', ctnr)
|
||||||
|
|
@ -978,7 +993,7 @@ class TestRemoveLink(BaseTestCase):
|
||||||
|
|
||||||
container2 = self.client.create_container(
|
container2 = self.client.create_container(
|
||||||
'busybox', 'cat', host_config=create_host_config(
|
'busybox', 'cat', host_config=create_host_config(
|
||||||
links={link_path: link_alias}
|
links={link_path: link_alias}, network_mode='none'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
container2_id = container2['Id']
|
container2_id = container2['Id']
|
||||||
|
|
@ -1390,8 +1405,8 @@ class TestLoadConfig(BaseTestCase):
|
||||||
f.write('email = sakuya@scarlet.net')
|
f.write('email = sakuya@scarlet.net')
|
||||||
f.close()
|
f.close()
|
||||||
cfg = docker.auth.load_config(cfg_path)
|
cfg = docker.auth.load_config(cfg_path)
|
||||||
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
|
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
|
||||||
cfg = cfg[docker.auth.INDEX_URL]
|
cfg = cfg[docker.auth.INDEX_NAME]
|
||||||
self.assertEqual(cfg['username'], 'sakuya')
|
self.assertEqual(cfg['username'], 'sakuya')
|
||||||
self.assertEqual(cfg['password'], 'izayoi')
|
self.assertEqual(cfg['password'], 'izayoi')
|
||||||
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
|
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
|
||||||
|
|
@ -1420,7 +1435,7 @@ class TestLoadJSONConfig(BaseTestCase):
|
||||||
|
|
||||||
class TestAutoDetectVersion(unittest.TestCase):
|
class TestAutoDetectVersion(unittest.TestCase):
|
||||||
def test_client_init(self):
|
def test_client_init(self):
|
||||||
client = docker.Client(version='auto')
|
client = docker.Client(base_url=DEFAULT_BASE_URL, version='auto')
|
||||||
client_version = client._version
|
client_version = client._version
|
||||||
api_version = client.version(api_version=False)['ApiVersion']
|
api_version = client.version(api_version=False)['ApiVersion']
|
||||||
self.assertEqual(client_version, api_version)
|
self.assertEqual(client_version, api_version)
|
||||||
|
|
@ -1429,7 +1444,7 @@ class TestAutoDetectVersion(unittest.TestCase):
|
||||||
client.close()
|
client.close()
|
||||||
|
|
||||||
def test_auto_client(self):
|
def test_auto_client(self):
|
||||||
client = docker.AutoVersionClient()
|
client = docker.AutoVersionClient(base_url=DEFAULT_BASE_URL)
|
||||||
client_version = client._version
|
client_version = client._version
|
||||||
api_version = client.version(api_version=False)['ApiVersion']
|
api_version = client.version(api_version=False)['ApiVersion']
|
||||||
self.assertEqual(client_version, api_version)
|
self.assertEqual(client_version, api_version)
|
||||||
|
|
@ -1437,7 +1452,7 @@ class TestAutoDetectVersion(unittest.TestCase):
|
||||||
self.assertEqual(client_version, api_version_2)
|
self.assertEqual(client_version, api_version_2)
|
||||||
client.close()
|
client.close()
|
||||||
with self.assertRaises(docker.errors.DockerException):
|
with self.assertRaises(docker.errors.DockerException):
|
||||||
docker.AutoVersionClient(version='1.11')
|
docker.AutoVersionClient(base_url=DEFAULT_BASE_URL, version='1.11')
|
||||||
|
|
||||||
|
|
||||||
class TestConnectionTimeout(unittest.TestCase):
|
class TestConnectionTimeout(unittest.TestCase):
|
||||||
|
|
@ -1501,12 +1516,17 @@ class TestRegressions(BaseTestCase):
|
||||||
result = self.client.containers(all=True, trunc=True)
|
result = self.client.containers(all=True, trunc=True)
|
||||||
self.assertEqual(len(result[0]['Id']), 12)
|
self.assertEqual(len(result[0]['Id']), 12)
|
||||||
|
|
||||||
|
def test_647(self):
|
||||||
|
with self.assertRaises(docker.errors.APIError):
|
||||||
|
self.client.inspect_image('gensokyo.jp//kirisame')
|
||||||
|
|
||||||
def test_649(self):
|
def test_649(self):
|
||||||
self.client.timeout = None
|
self.client.timeout = None
|
||||||
ctnr = self.client.create_container('busybox', ['sleep', '2'])
|
ctnr = self.client.create_container('busybox', ['sleep', '2'])
|
||||||
self.client.start(ctnr)
|
self.client.start(ctnr)
|
||||||
self.client.stop(ctnr)
|
self.client.stop(ctnr)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
c = docker.Client(base_url=DEFAULT_BASE_URL)
|
c = docker.Client(base_url=DEFAULT_BASE_URL)
|
||||||
c.pull('busybox')
|
c.pull('busybox')
|
||||||
|
|
|
||||||
163
tests/test.py
163
tests/test.py
|
|
@ -69,6 +69,14 @@ def fake_resolve_authconfig(authconfig, registry=None):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def fake_inspect_container(self, container, tty=False):
|
||||||
|
return fake_api.get_fake_inspect_container(tty=tty)[1]
|
||||||
|
|
||||||
|
|
||||||
|
def fake_inspect_container_tty(self, container):
|
||||||
|
return fake_inspect_container(self, container, tty=True)
|
||||||
|
|
||||||
|
|
||||||
def fake_resp(url, data=None, **kwargs):
|
def fake_resp(url, data=None, **kwargs):
|
||||||
status_code, content = fake_api.fake_responses[url]()
|
status_code, content = fake_api.fake_responses[url]()
|
||||||
return response(status_code=status_code, content=content)
|
return response(status_code=status_code, content=content)
|
||||||
|
|
@ -124,11 +132,10 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
if not cmd:
|
if not cmd:
|
||||||
cmd = ['true']
|
cmd = ['true']
|
||||||
return {"Tty": False, "Image": img, "Cmd": cmd,
|
return {"Tty": False, "Image": img, "Cmd": cmd,
|
||||||
"AttachStdin": False, "Memory": 0,
|
"AttachStdin": False,
|
||||||
"AttachStderr": True, "AttachStdout": True,
|
"AttachStderr": True, "AttachStdout": True,
|
||||||
"StdinOnce": False,
|
"StdinOnce": False,
|
||||||
"OpenStdin": False, "NetworkDisabled": False,
|
"OpenStdin": False, "NetworkDisabled": False,
|
||||||
"MemorySwap": 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_ctor(self):
|
def test_ctor(self):
|
||||||
|
|
@ -214,7 +221,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
|
|
||||||
def test_events_with_since_until(self):
|
def test_events_with_since_until(self):
|
||||||
ts = 1356048000
|
ts = 1356048000
|
||||||
now = datetime.datetime.fromtimestamp(ts)
|
now = datetime.datetime.utcfromtimestamp(ts)
|
||||||
since = now - datetime.timedelta(seconds=10)
|
since = now - datetime.timedelta(seconds=10)
|
||||||
until = now + datetime.timedelta(seconds=10)
|
until = now + datetime.timedelta(seconds=10)
|
||||||
try:
|
try:
|
||||||
|
|
@ -337,11 +344,10 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
self.assertEqual(json.loads(args[1]['data']),
|
self.assertEqual(json.loads(args[1]['data']),
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
||||||
"AttachStdin": false, "Memory": 0,
|
"AttachStdin": false,
|
||||||
"AttachStderr": true, "AttachStdout": true,
|
"AttachStderr": true, "AttachStdout": true,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"OpenStdin": false, "NetworkDisabled": false,
|
"OpenStdin": false, "NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -361,12 +367,11 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
|
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
|
||||||
"Volumes": {"/mnt": {}}, "Memory": 0,
|
"Volumes": {"/mnt": {}},
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -386,12 +391,11 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
|
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
|
||||||
"Volumes": {"/mnt": {}}, "Memory": 0,
|
"Volumes": {"/mnt": {}},
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -409,7 +413,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls"], "AttachStdin": false,
|
"Cmd": ["ls"], "AttachStdin": false,
|
||||||
"Memory": 0, "ExposedPorts": {
|
"ExposedPorts": {
|
||||||
"1111/tcp": {},
|
"1111/tcp": {},
|
||||||
"2222/udp": {},
|
"2222/udp": {},
|
||||||
"3333/tcp": {}
|
"3333/tcp": {}
|
||||||
|
|
@ -417,8 +421,7 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -436,13 +439,11 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["hello"], "AttachStdin": false,
|
"Cmd": ["hello"], "AttachStdin": false,
|
||||||
"Memory": 0,
|
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false,
|
||||||
"Entrypoint": "cowsay",
|
"Entrypoint": "cowsay"}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -460,13 +461,11 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls"], "AttachStdin": false,
|
"Cmd": ["ls"], "AttachStdin": false,
|
||||||
"Memory": 0,
|
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false,
|
||||||
"CpuShares": 5,
|
"CpuShares": 5}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -484,14 +483,12 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls"], "AttachStdin": false,
|
"Cmd": ["ls"], "AttachStdin": false,
|
||||||
"Memory": 0,
|
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false,
|
||||||
"Cpuset": "0,1",
|
"Cpuset": "0,1",
|
||||||
"CpusetCpus": "0,1",
|
"CpusetCpus": "0,1"}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -509,13 +506,11 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox",
|
{"Tty": false, "Image": "busybox",
|
||||||
"Cmd": ["ls"], "AttachStdin": false,
|
"Cmd": ["ls"], "AttachStdin": false,
|
||||||
"Memory": 0,
|
|
||||||
"AttachStderr": true,
|
"AttachStderr": true,
|
||||||
"AttachStdout": true, "OpenStdin": false,
|
"AttachStdout": true, "OpenStdin": false,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"NetworkDisabled": false,
|
"NetworkDisabled": false,
|
||||||
"WorkingDir": "/root",
|
"WorkingDir": "/root"}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -531,11 +526,10 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
self.assertEqual(json.loads(args[1]['data']),
|
self.assertEqual(json.loads(args[1]['data']),
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
||||||
"AttachStdin": true, "Memory": 0,
|
"AttachStdin": true,
|
||||||
"AttachStderr": true, "AttachStdout": true,
|
"AttachStderr": true, "AttachStdout": true,
|
||||||
"StdinOnce": true,
|
"StdinOnce": true,
|
||||||
"OpenStdin": true, "NetworkDisabled": false,
|
"OpenStdin": true, "NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
@ -581,78 +575,95 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
self.assertEqual(json.loads(args[1]['data']),
|
self.assertEqual(json.loads(args[1]['data']),
|
||||||
json.loads('''
|
json.loads('''
|
||||||
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
|
||||||
"AttachStdin": false, "Memory": 0,
|
"AttachStdin": false,
|
||||||
"AttachStderr": true, "AttachStdout": true,
|
"AttachStderr": true, "AttachStdout": true,
|
||||||
"StdinOnce": false,
|
"StdinOnce": false,
|
||||||
"OpenStdin": false, "NetworkDisabled": false,
|
"OpenStdin": false, "NetworkDisabled": false}'''))
|
||||||
"MemorySwap": 0}'''))
|
|
||||||
self.assertEqual(args[1]['headers'],
|
self.assertEqual(args[1]['headers'],
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
|
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_int(self):
|
def test_create_container_with_mem_limit_as_int(self):
|
||||||
try:
|
try:
|
||||||
self.client.create_container('busybox', 'true',
|
self.client.create_container(
|
||||||
mem_limit=128.0)
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
mem_limit=128.0
|
||||||
|
)
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
args = fake_request.call_args
|
args = fake_request.call_args
|
||||||
data = json.loads(args[1]['data'])
|
data = json.loads(args[1]['data'])
|
||||||
self.assertEqual(data['Memory'], 128.0)
|
self.assertEqual(data['HostConfig']['Memory'], 128.0)
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_string(self):
|
def test_create_container_with_mem_limit_as_string(self):
|
||||||
try:
|
try:
|
||||||
self.client.create_container('busybox', 'true',
|
self.client.create_container(
|
||||||
mem_limit='128')
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
mem_limit='128'
|
||||||
|
)
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
args = fake_request.call_args
|
args = fake_request.call_args
|
||||||
data = json.loads(args[1]['data'])
|
data = json.loads(args[1]['data'])
|
||||||
self.assertEqual(data['Memory'], 128.0)
|
self.assertEqual(data['HostConfig']['Memory'], 128.0)
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
|
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
|
||||||
try:
|
try:
|
||||||
self.client.create_container('busybox', 'true',
|
self.client.create_container(
|
||||||
mem_limit='128k')
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
mem_limit='128k'
|
||||||
|
)
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
args = fake_request.call_args
|
args = fake_request.call_args
|
||||||
data = json.loads(args[1]['data'])
|
data = json.loads(args[1]['data'])
|
||||||
self.assertEqual(data['Memory'], 128.0 * 1024)
|
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
|
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
|
||||||
try:
|
try:
|
||||||
self.client.create_container('busybox', 'true',
|
self.client.create_container(
|
||||||
mem_limit='128m')
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
mem_limit='128m'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
args = fake_request.call_args
|
args = fake_request.call_args
|
||||||
data = json.loads(args[1]['data'])
|
data = json.loads(args[1]['data'])
|
||||||
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024)
|
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
|
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
|
||||||
try:
|
try:
|
||||||
self.client.create_container('busybox', 'true',
|
self.client.create_container(
|
||||||
mem_limit='128g')
|
'busybox', 'true', host_config=create_host_config(
|
||||||
|
mem_limit='128g'
|
||||||
|
)
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
args = fake_request.call_args
|
args = fake_request.call_args
|
||||||
data = json.loads(args[1]['data'])
|
data = json.loads(args[1]['data'])
|
||||||
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024 * 1024)
|
self.assertEqual(
|
||||||
|
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
|
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
|
||||||
self.assertRaises(docker.errors.DockerException,
|
self.assertRaises(
|
||||||
self.client.create_container,
|
docker.errors.DockerException, create_host_config, mem_limit='128p'
|
||||||
'busybox', 'true', mem_limit='128p')
|
)
|
||||||
|
|
||||||
self.assertRaises(docker.errors.DockerException,
|
self.assertRaises(
|
||||||
self.client.create_container,
|
docker.errors.DockerException, create_host_config, mem_limit='1f28'
|
||||||
'busybox', 'true', mem_limit='1f28')
|
)
|
||||||
|
|
||||||
def test_start_container(self):
|
def test_start_container(self):
|
||||||
try:
|
try:
|
||||||
|
|
@ -1543,7 +1554,9 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
|
|
||||||
def test_logs(self):
|
def test_logs(self):
|
||||||
try:
|
try:
|
||||||
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
|
with mock.patch('docker.Client.inspect_container',
|
||||||
|
fake_inspect_container):
|
||||||
|
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
|
|
@ -1562,7 +1575,9 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
|
|
||||||
def test_logs_with_dict_instead_of_id(self):
|
def test_logs_with_dict_instead_of_id(self):
|
||||||
try:
|
try:
|
||||||
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
|
with mock.patch('docker.Client.inspect_container',
|
||||||
|
fake_inspect_container):
|
||||||
|
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
|
|
@ -1581,7 +1596,9 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
|
|
||||||
def test_log_streaming(self):
|
def test_log_streaming(self):
|
||||||
try:
|
try:
|
||||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
|
with mock.patch('docker.Client.inspect_container',
|
||||||
|
fake_inspect_container):
|
||||||
|
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
|
|
@ -1595,7 +1612,10 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
|
|
||||||
def test_log_tail(self):
|
def test_log_tail(self):
|
||||||
try:
|
try:
|
||||||
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, tail=10)
|
with mock.patch('docker.Client.inspect_container',
|
||||||
|
fake_inspect_container):
|
||||||
|
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
|
||||||
|
tail=10)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail('Command should not raise exception: {0}'.format(e))
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
|
|
@ -1607,6 +1627,27 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
stream=False
|
stream=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_log_tty(self):
|
||||||
|
try:
|
||||||
|
m = mock.Mock()
|
||||||
|
with mock.patch('docker.Client.inspect_container',
|
||||||
|
fake_inspect_container_tty):
|
||||||
|
with mock.patch('docker.Client._stream_raw_result',
|
||||||
|
m):
|
||||||
|
self.client.logs(fake_api.FAKE_CONTAINER_ID,
|
||||||
|
stream=True)
|
||||||
|
except Exception as e:
|
||||||
|
self.fail('Command should not raise exception: {0}'.format(e))
|
||||||
|
|
||||||
|
self.assertTrue(m.called)
|
||||||
|
fake_request.assert_called_with(
|
||||||
|
url_prefix + 'containers/3cc2351ab11b/logs',
|
||||||
|
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
|
||||||
|
'tail': 'all'},
|
||||||
|
timeout=DEFAULT_TIMEOUT_SECONDS,
|
||||||
|
stream=True
|
||||||
|
)
|
||||||
|
|
||||||
def test_diff(self):
|
def test_diff(self):
|
||||||
try:
|
try:
|
||||||
self.client.diff(fake_api.FAKE_CONTAINER_ID)
|
self.client.diff(fake_api.FAKE_CONTAINER_ID)
|
||||||
|
|
@ -2383,9 +2424,9 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
|
||||||
f.write('auth = {0}\n'.format(auth_))
|
f.write('auth = {0}\n'.format(auth_))
|
||||||
f.write('email = sakuya@scarlet.net')
|
f.write('email = sakuya@scarlet.net')
|
||||||
cfg = docker.auth.load_config(dockercfg_path)
|
cfg = docker.auth.load_config(dockercfg_path)
|
||||||
self.assertTrue(docker.auth.INDEX_URL in cfg)
|
self.assertTrue(docker.auth.INDEX_NAME in cfg)
|
||||||
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
|
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
|
||||||
cfg = cfg[docker.auth.INDEX_URL]
|
cfg = cfg[docker.auth.INDEX_NAME]
|
||||||
self.assertEqual(cfg['username'], 'sakuya')
|
self.assertEqual(cfg['username'], 'sakuya')
|
||||||
self.assertEqual(cfg['password'], 'izayoi')
|
self.assertEqual(cfg['password'], 'izayoi')
|
||||||
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
|
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,16 @@
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import unittest
|
import unittest
|
||||||
|
import tempfile
|
||||||
|
|
||||||
from docker.client import Client
|
from docker.client import Client
|
||||||
from docker.errors import DockerException
|
from docker.errors import DockerException
|
||||||
from docker.utils import (
|
from docker.utils import (
|
||||||
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
|
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
|
||||||
create_host_config, Ulimit, LogConfig, parse_bytes
|
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file
|
||||||
)
|
)
|
||||||
from docker.utils.ports import build_port_bindings, split_port
|
from docker.utils.ports import build_port_bindings, split_port
|
||||||
from docker.auth import resolve_authconfig
|
from docker.auth import resolve_repository_name, resolve_authconfig
|
||||||
|
|
||||||
import base
|
import base
|
||||||
|
|
||||||
|
|
@ -17,6 +18,17 @@ import base
|
||||||
class UtilsTest(base.BaseTestCase):
|
class UtilsTest(base.BaseTestCase):
|
||||||
longMessage = True
|
longMessage = True
|
||||||
|
|
||||||
|
def generate_tempfile(self, file_content=None):
|
||||||
|
"""
|
||||||
|
Generates a temporary file for tests with the content
|
||||||
|
of 'file_content' and returns the filename.
|
||||||
|
Don't forget to unlink the file with os.unlink() after.
|
||||||
|
"""
|
||||||
|
local_tempfile = tempfile.NamedTemporaryFile(delete=False)
|
||||||
|
local_tempfile.write(file_content.encode('UTF-8'))
|
||||||
|
local_tempfile.close()
|
||||||
|
return local_tempfile.name
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.os_environ = os.environ.copy()
|
self.os_environ = os.environ.copy()
|
||||||
|
|
||||||
|
|
@ -95,6 +107,28 @@ class UtilsTest(base.BaseTestCase):
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
self.fail(e)
|
self.fail(e)
|
||||||
|
|
||||||
|
def test_parse_env_file_proper(self):
|
||||||
|
env_file = self.generate_tempfile(
|
||||||
|
file_content='USER=jdoe\nPASS=secret')
|
||||||
|
get_parse_env_file = parse_env_file(env_file)
|
||||||
|
self.assertEqual(get_parse_env_file,
|
||||||
|
{'USER': 'jdoe', 'PASS': 'secret'})
|
||||||
|
os.unlink(env_file)
|
||||||
|
|
||||||
|
def test_parse_env_file_commented_line(self):
|
||||||
|
env_file = self.generate_tempfile(
|
||||||
|
file_content='USER=jdoe\n#PASS=secret')
|
||||||
|
get_parse_env_file = parse_env_file((env_file))
|
||||||
|
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
|
||||||
|
os.unlink(env_file)
|
||||||
|
|
||||||
|
def test_parse_env_file_invalid_line(self):
|
||||||
|
env_file = self.generate_tempfile(
|
||||||
|
file_content='USER jdoe')
|
||||||
|
self.assertRaises(
|
||||||
|
DockerException, parse_env_file, env_file)
|
||||||
|
os.unlink(env_file)
|
||||||
|
|
||||||
def test_convert_filters(self):
|
def test_convert_filters(self):
|
||||||
tests = [
|
tests = [
|
||||||
({'dangling': True}, '{"dangling": ["true"]}'),
|
({'dangling': True}, '{"dangling": ["true"]}'),
|
||||||
|
|
@ -107,7 +141,7 @@ class UtilsTest(base.BaseTestCase):
|
||||||
self.assertEqual(convert_filters(filters), expected)
|
self.assertEqual(convert_filters(filters), expected)
|
||||||
|
|
||||||
def test_create_empty_host_config(self):
|
def test_create_empty_host_config(self):
|
||||||
empty_config = create_host_config()
|
empty_config = create_host_config(network_mode='')
|
||||||
self.assertEqual(empty_config, {})
|
self.assertEqual(empty_config, {})
|
||||||
|
|
||||||
def test_create_host_config_dict_ulimit(self):
|
def test_create_host_config_dict_ulimit(self):
|
||||||
|
|
@ -167,6 +201,61 @@ class UtilsTest(base.BaseTestCase):
|
||||||
type=LogConfig.types.JSON, config='helloworld'
|
type=LogConfig.types.JSON, config='helloworld'
|
||||||
))
|
))
|
||||||
|
|
||||||
|
def test_resolve_repository_name(self):
|
||||||
|
# docker hub library image
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('image'),
|
||||||
|
('index.docker.io', 'image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# docker hub image
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('username/image'),
|
||||||
|
('index.docker.io', 'username/image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# private registry
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('my.registry.net/image'),
|
||||||
|
('my.registry.net', 'image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# private registry with port
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('my.registry.net:5000/image'),
|
||||||
|
('my.registry.net:5000', 'image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# private registry with username
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('my.registry.net/username/image'),
|
||||||
|
('my.registry.net', 'username/image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# no dots but port
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('hostname:5000/image'),
|
||||||
|
('hostname:5000', 'image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# no dots but port and username
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('hostname:5000/username/image'),
|
||||||
|
('hostname:5000', 'username/image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# localhost
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('localhost/image'),
|
||||||
|
('localhost', 'image'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# localhost with username
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_repository_name('localhost/username/image'),
|
||||||
|
('localhost', 'username/image'),
|
||||||
|
)
|
||||||
|
|
||||||
def test_resolve_authconfig(self):
|
def test_resolve_authconfig(self):
|
||||||
auth_config = {
|
auth_config = {
|
||||||
'https://index.docker.io/v1/': {'auth': 'indexuser'},
|
'https://index.docker.io/v1/': {'auth': 'indexuser'},
|
||||||
|
|
@ -231,6 +320,40 @@ class UtilsTest(base.BaseTestCase):
|
||||||
resolve_authconfig(auth_config, 'does.not.exist') is None
|
resolve_authconfig(auth_config, 'does.not.exist') is None
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_resolve_registry_and_auth(self):
|
||||||
|
auth_config = {
|
||||||
|
'https://index.docker.io/v1/': {'auth': 'indexuser'},
|
||||||
|
'my.registry.net': {'auth': 'privateuser'},
|
||||||
|
}
|
||||||
|
|
||||||
|
# library image
|
||||||
|
image = 'image'
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
|
||||||
|
{'auth': 'indexuser'},
|
||||||
|
)
|
||||||
|
|
||||||
|
# docker hub image
|
||||||
|
image = 'username/image'
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
|
||||||
|
{'auth': 'indexuser'},
|
||||||
|
)
|
||||||
|
|
||||||
|
# private registry
|
||||||
|
image = 'my.registry.net/image'
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
|
||||||
|
{'auth': 'privateuser'},
|
||||||
|
)
|
||||||
|
|
||||||
|
# unauthenticated registry
|
||||||
|
image = 'other.registry.net/image'
|
||||||
|
self.assertEqual(
|
||||||
|
resolve_authconfig(auth_config, resolve_repository_name(image)[0]),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
def test_split_port_with_host_ip(self):
|
def test_split_port_with_host_ip(self):
|
||||||
internal_port, external_port = split_port("127.0.0.1:1000:2000")
|
internal_port, external_port = split_port("127.0.0.1:1000:2000")
|
||||||
self.assertEqual(internal_port, ["2000"])
|
self.assertEqual(internal_port, ["2000"])
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue